block: Convert bio_for_each_segment() to bvec_iter
More prep work for immutable biovecs - with immutable bvecs drivers won't be able to use the biovec directly, they'll need to use helpers that take into account bio->bi_iter.bi_bvec_done. This updates callers for the new usage without changing the implementation yet. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Paul Clements <Paul.Clements@steeleye.com> Cc: Jim Paris <jim@jtan.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> Cc: support@lsi.com Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Quoc-Son Anh <quoc-sonx.anh@intel.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Jan Kara <jack@suse.cz> Cc: linux-m68k@lists.linux-m68k.org Cc: linuxppc-dev@lists.ozlabs.org Cc: drbd-user@lists.linbit.com Cc: nbd-general@lists.sourceforge.net Cc: cbe-oss-dev@lists.ozlabs.org Cc: xen-devel@lists.xensource.com Cc: virtualization@lists.linux-foundation.org Cc: linux-raid@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: DL-MPTFusionLinux@lsi.com Cc: linux-scsi@vger.kernel.org Cc: devel@driverdev.osuosl.org Cc: linux-fsdevel@vger.kernel.org Cc: cluster-devel@redhat.com Cc: linux-mm@kvack.org Acked-by: Geoff Levand <geoff@infradead.org>
This commit is contained in:
@@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl)
|
||||
struct bio_vec *bv;
|
||||
int n;
|
||||
|
||||
__bio_for_each_segment(bv, b->bio, n, 0)
|
||||
bio_for_each_segment_all(bv, b->bio, n)
|
||||
__free_page(bv->bv_page);
|
||||
|
||||
__btree_node_write_done(cl);
|
||||
@@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b)
|
||||
struct bio_vec *bv;
|
||||
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
|
||||
|
||||
bio_for_each_segment(bv, b->bio, j)
|
||||
bio_for_each_segment_all(bv, b->bio, j)
|
||||
memcpy(page_address(bv->bv_page),
|
||||
base + j * PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
|
@@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
|
||||
{
|
||||
char name[BDEVNAME_SIZE];
|
||||
struct bio *check;
|
||||
struct bio_vec *bv;
|
||||
struct bio_vec bv, *bv2;
|
||||
struct bvec_iter iter;
|
||||
int i;
|
||||
|
||||
check = bio_clone(bio, GFP_NOIO);
|
||||
@@ -185,13 +186,13 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
|
||||
|
||||
submit_bio_wait(READ_SYNC, check);
|
||||
|
||||
bio_for_each_segment(bv, bio, i) {
|
||||
void *p1 = kmap_atomic(bv->bv_page);
|
||||
void *p2 = page_address(check->bi_io_vec[i].bv_page);
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
void *p1 = kmap_atomic(bv.bv_page);
|
||||
void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
|
||||
|
||||
cache_set_err_on(memcmp(p1 + bv->bv_offset,
|
||||
p2 + bv->bv_offset,
|
||||
bv->bv_len),
|
||||
cache_set_err_on(memcmp(p1 + bv.bv_offset,
|
||||
p2 + bv.bv_offset,
|
||||
bv.bv_len),
|
||||
dc->disk.c,
|
||||
"verify failed at dev %s sector %llu",
|
||||
bdevname(dc->bdev, name),
|
||||
@@ -200,8 +201,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
|
||||
kunmap_atomic(p1);
|
||||
}
|
||||
|
||||
bio_for_each_segment_all(bv, check, i)
|
||||
__free_page(bv->bv_page);
|
||||
bio_for_each_segment_all(bv2, check, i)
|
||||
__free_page(bv2->bv_page);
|
||||
out_put:
|
||||
bio_put(check);
|
||||
}
|
||||
|
@@ -22,12 +22,12 @@ static void bch_bi_idx_hack_endio(struct bio *bio, int error)
|
||||
static void bch_generic_make_request_hack(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_iter.bi_idx) {
|
||||
int i;
|
||||
struct bio_vec *bv;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
|
||||
|
||||
bio_for_each_segment(bv, bio, i)
|
||||
clone->bi_io_vec[clone->bi_vcnt++] = *bv;
|
||||
bio_for_each_segment(bv, bio, iter)
|
||||
clone->bi_io_vec[clone->bi_vcnt++] = bv;
|
||||
|
||||
clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
|
||||
clone->bi_bdev = bio->bi_bdev;
|
||||
@@ -73,8 +73,9 @@ static void bch_generic_make_request_hack(struct bio *bio)
|
||||
struct bio *bch_bio_split(struct bio *bio, int sectors,
|
||||
gfp_t gfp, struct bio_set *bs)
|
||||
{
|
||||
unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9;
|
||||
struct bio_vec *bv;
|
||||
unsigned vcnt = 0, nbytes = sectors << 9;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
struct bio *ret = NULL;
|
||||
|
||||
BUG_ON(sectors <= 0);
|
||||
@@ -86,49 +87,35 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
|
||||
ret = bio_alloc_bioset(gfp, 1, bs);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
idx = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bio_for_each_segment(bv, bio, idx) {
|
||||
vcnt = idx - bio->bi_iter.bi_idx;
|
||||
|
||||
if (!nbytes) {
|
||||
ret = bio_alloc_bioset(gfp, vcnt, bs);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
|
||||
memcpy(ret->bi_io_vec, __bio_iovec(bio),
|
||||
sizeof(struct bio_vec) * vcnt);
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
vcnt++;
|
||||
|
||||
if (nbytes <= bv.bv_len)
|
||||
break;
|
||||
} else if (nbytes < bv->bv_len) {
|
||||
ret = bio_alloc_bioset(gfp, ++vcnt, bs);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
|
||||
memcpy(ret->bi_io_vec, __bio_iovec(bio),
|
||||
sizeof(struct bio_vec) * vcnt);
|
||||
|
||||
ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
|
||||
bv->bv_offset += nbytes;
|
||||
bv->bv_len -= nbytes;
|
||||
break;
|
||||
}
|
||||
|
||||
nbytes -= bv->bv_len;
|
||||
nbytes -= bv.bv_len;
|
||||
}
|
||||
|
||||
ret = bio_alloc_bioset(gfp, vcnt, bs);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
ret->bi_io_vec[ret->bi_vcnt++] = bv;
|
||||
|
||||
if (ret->bi_vcnt == vcnt)
|
||||
break;
|
||||
}
|
||||
|
||||
ret->bi_io_vec[ret->bi_vcnt - 1].bv_len = nbytes;
|
||||
out:
|
||||
ret->bi_bdev = bio->bi_bdev;
|
||||
ret->bi_iter.bi_sector = bio->bi_iter.bi_sector;
|
||||
ret->bi_iter.bi_size = sectors << 9;
|
||||
ret->bi_rw = bio->bi_rw;
|
||||
ret->bi_vcnt = vcnt;
|
||||
ret->bi_max_vecs = vcnt;
|
||||
|
||||
bio->bi_iter.bi_sector += sectors;
|
||||
bio->bi_iter.bi_size -= sectors << 9;
|
||||
bio->bi_iter.bi_idx = idx;
|
||||
|
||||
if (bio_integrity(bio)) {
|
||||
if (bio_integrity_clone(ret, bio, gfp)) {
|
||||
@@ -137,9 +124,10 @@ out:
|
||||
}
|
||||
|
||||
bio_integrity_trim(ret, 0, bio_sectors(ret));
|
||||
bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
|
||||
}
|
||||
|
||||
bio_advance(bio, ret->bi_iter.bi_size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -155,12 +143,13 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
|
||||
|
||||
if (bio_segments(bio) > max_segments ||
|
||||
q->merge_bvec_fn) {
|
||||
struct bio_vec *bv;
|
||||
int i, seg = 0;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
unsigned seg = 0;
|
||||
|
||||
ret = 0;
|
||||
|
||||
bio_for_each_segment(bv, bio, i) {
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
struct bvec_merge_data bvm = {
|
||||
.bi_bdev = bio->bi_bdev,
|
||||
.bi_sector = bio->bi_iter.bi_sector,
|
||||
@@ -172,11 +161,11 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
|
||||
break;
|
||||
|
||||
if (q->merge_bvec_fn &&
|
||||
q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
|
||||
q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
|
||||
break;
|
||||
|
||||
seg++;
|
||||
ret += bv->bv_len >> 9;
|
||||
ret += bv.bv_len >> 9;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -198,14 +198,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
|
||||
|
||||
static void bio_csum(struct bio *bio, struct bkey *k)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
uint64_t csum = 0;
|
||||
int i;
|
||||
|
||||
bio_for_each_segment(bv, bio, i) {
|
||||
void *d = kmap(bv->bv_page) + bv->bv_offset;
|
||||
csum = bch_crc64_update(csum, d, bv->bv_len);
|
||||
kunmap(bv->bv_page);
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
void *d = kmap(bv.bv_page) + bv.bv_offset;
|
||||
csum = bch_crc64_update(csum, d, bv.bv_len);
|
||||
kunmap(bv.bv_page);
|
||||
}
|
||||
|
||||
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
|
||||
@@ -1182,17 +1182,17 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
|
||||
static int flash_dev_cache_miss(struct btree *b, struct search *s,
|
||||
struct bio *bio, unsigned sectors)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
int i;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
|
||||
/* Zero fill bio */
|
||||
|
||||
bio_for_each_segment(bv, bio, i) {
|
||||
unsigned j = min(bv->bv_len >> 9, sectors);
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
unsigned j = min(bv.bv_len >> 9, sectors);
|
||||
|
||||
void *p = kmap(bv->bv_page);
|
||||
memset(p + bv->bv_offset, 0, j << 9);
|
||||
kunmap(bv->bv_page);
|
||||
void *p = kmap(bv.bv_page);
|
||||
memset(p + bv.bv_offset, 0, j << 9);
|
||||
kunmap(bv.bv_page);
|
||||
|
||||
sectors -= j;
|
||||
}
|
||||
|
Reference in New Issue
Block a user