block: Introduce new bio_split()
The new bio_split() can split arbitrary bios - it's not restricted to single page bios, like the old bio_split() (previously renamed to bio_pair_split()). It also has different semantics - it doesn't allocate a struct bio_pair, leaving it up to the caller to handle completions. Then convert the existing bio_pair_split() users to the new bio_split() - and also nvme, which was open coding bio splitting. (We have to take that BUG_ON() out of bio_integrity_trim() because this bio_split() needs to use it, and there's no reason it has to be used on bios marked as cloned; BIO_CLONED doesn't seem to have clearly documented semantics anyways.) Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Neil Brown <neilb@suse.de>
This commit is contained in:
@@ -513,65 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
|
||||
|
||||
static void raid0_make_request(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
unsigned int chunk_sects;
|
||||
sector_t sector_offset;
|
||||
struct strip_zone *zone;
|
||||
struct md_rdev *tmp_dev;
|
||||
struct bio *split;
|
||||
|
||||
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
|
||||
md_flush_request(mddev, bio);
|
||||
return;
|
||||
}
|
||||
|
||||
chunk_sects = mddev->chunk_sectors;
|
||||
if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
|
||||
do {
|
||||
sector_t sector = bio->bi_iter.bi_sector;
|
||||
struct bio_pair *bp;
|
||||
/* Sanity check -- queue functions should prevent this happening */
|
||||
if (bio_multiple_segments(bio))
|
||||
goto bad_map;
|
||||
/* This is a one page bio that upper layers
|
||||
* refuse to split for us, so we need to split it.
|
||||
*/
|
||||
if (likely(is_power_of_2(chunk_sects)))
|
||||
bp = bio_pair_split(bio, chunk_sects - (sector &
|
||||
(chunk_sects-1)));
|
||||
else
|
||||
bp = bio_pair_split(bio, chunk_sects -
|
||||
sector_div(sector, chunk_sects));
|
||||
raid0_make_request(mddev, &bp->bio1);
|
||||
raid0_make_request(mddev, &bp->bio2);
|
||||
bio_pair_release(bp);
|
||||
return;
|
||||
}
|
||||
unsigned chunk_sects = mddev->chunk_sectors;
|
||||
|
||||
sector_offset = bio->bi_iter.bi_sector;
|
||||
zone = find_zone(mddev->private, §or_offset);
|
||||
tmp_dev = map_sector(mddev, zone, bio->bi_iter.bi_sector,
|
||||
§or_offset);
|
||||
bio->bi_bdev = tmp_dev->bdev;
|
||||
bio->bi_iter.bi_sector = sector_offset + zone->dev_start +
|
||||
tmp_dev->data_offset;
|
||||
unsigned sectors = chunk_sects -
|
||||
(likely(is_power_of_2(chunk_sects))
|
||||
? (sector & (chunk_sects-1))
|
||||
: sector_div(sector, chunk_sects));
|
||||
|
||||
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
|
||||
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
|
||||
/* Just ignore it */
|
||||
bio_endio(bio, 0);
|
||||
return;
|
||||
}
|
||||
if (sectors < bio_sectors(bio)) {
|
||||
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
|
||||
bio_chain(split, bio);
|
||||
} else {
|
||||
split = bio;
|
||||
}
|
||||
|
||||
generic_make_request(bio);
|
||||
return;
|
||||
zone = find_zone(mddev->private, §or);
|
||||
tmp_dev = map_sector(mddev, zone, sector, §or);
|
||||
split->bi_bdev = tmp_dev->bdev;
|
||||
split->bi_iter.bi_sector = sector + zone->dev_start +
|
||||
tmp_dev->data_offset;
|
||||
|
||||
bad_map:
|
||||
printk("md/raid0:%s: make_request bug: can't convert block across chunks"
|
||||
" or bigger than %dk %llu %d\n",
|
||||
mdname(mddev), chunk_sects / 2,
|
||||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
bio_sectors(bio) / 2);
|
||||
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
if (unlikely((split->bi_rw & REQ_DISCARD) &&
|
||||
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
|
||||
/* Just ignore it */
|
||||
bio_endio(split, 0);
|
||||
} else
|
||||
generic_make_request(split);
|
||||
} while (split != bio);
|
||||
}
|
||||
|
||||
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
|
||||
|
Reference in New Issue
Block a user