Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "The major piece in here is the immutable bio_ve series from Kent, the rest is fairly minor. It was supposed to go in last round, but various issues pushed it to this release instead. The pull request contains: - Various smaller blk-mq fixes from different folks. Nothing major here, just minor fixes and cleanups. - Fix for a memory leak in the error path in the block ioctl code from Christian Engelmayer. - Header export fix from CaiZhiyong. - Finally the immutable biovec changes from Kent Overstreet. This enables some nice future work on making arbitrarily sized bios possible, and splitting more efficient. Related fixes to immutable bio_vecs: - dm-cache immutable fixup from Mike Snitzer. - btrfs immutable fixup from Muthu Kumar. - bio-integrity fix from Nic Bellinger, which is also going to stable" * 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits) xtensa: fixup simdisk driver to work with immutable bio_vecs block/blk-mq-cpu.c: use hotcpu_notifier() blk-mq: for_each_* macro correctness block: Fix memory leak in rw_copy_check_uvector() handling bio-integrity: Fix bio_integrity_verify segment start bug block: remove unrelated header files and export symbol blk-mq: uses page->list incorrectly blk-mq: use __smp_call_function_single directly btrfs: fix missing increment of bi_remaining Revert "block: Warn and free bio if bi_end_io is not set" block: Warn and free bio if bi_end_io is not set blk-mq: fix initializing request's start time block: blk-mq: don't export blk_mq_free_queue() block: blk-mq: make blk_sync_queue support mq block: blk-mq: support draining mq queue dm cache: increment bi_remaining when bi_end_io is restored block: fixup for generic bio chaining block: Really silence spurious compiler warnings block: Silence spurious compiler warnings block: Kill bio_pair_split() ...
This commit is contained in:
@@ -414,7 +414,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
|
||||
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
|
||||
{
|
||||
struct pool *pool = tc->pool;
|
||||
sector_t block_nr = bio->bi_sector;
|
||||
sector_t block_nr = bio->bi_iter.bi_sector;
|
||||
|
||||
if (block_size_is_power_of_two(pool))
|
||||
block_nr >>= pool->sectors_per_block_shift;
|
||||
@@ -427,14 +427,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
|
||||
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
|
||||
{
|
||||
struct pool *pool = tc->pool;
|
||||
sector_t bi_sector = bio->bi_sector;
|
||||
sector_t bi_sector = bio->bi_iter.bi_sector;
|
||||
|
||||
bio->bi_bdev = tc->pool_dev->bdev;
|
||||
if (block_size_is_power_of_two(pool))
|
||||
bio->bi_sector = (block << pool->sectors_per_block_shift) |
|
||||
(bi_sector & (pool->sectors_per_block - 1));
|
||||
bio->bi_iter.bi_sector =
|
||||
(block << pool->sectors_per_block_shift) |
|
||||
(bi_sector & (pool->sectors_per_block - 1));
|
||||
else
|
||||
bio->bi_sector = (block * pool->sectors_per_block) +
|
||||
bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
|
||||
sector_div(bi_sector, pool->sectors_per_block);
|
||||
}
|
||||
|
||||
@@ -612,8 +613,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
|
||||
|
||||
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
|
||||
{
|
||||
if (m->bio)
|
||||
if (m->bio) {
|
||||
m->bio->bi_end_io = m->saved_bi_end_io;
|
||||
atomic_inc(&m->bio->bi_remaining);
|
||||
}
|
||||
cell_error(m->tc->pool, m->cell);
|
||||
list_del(&m->list);
|
||||
mempool_free(m, m->tc->pool->mapping_pool);
|
||||
@@ -627,8 +630,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
||||
int r;
|
||||
|
||||
bio = m->bio;
|
||||
if (bio)
|
||||
if (bio) {
|
||||
bio->bi_end_io = m->saved_bi_end_io;
|
||||
atomic_inc(&bio->bi_remaining);
|
||||
}
|
||||
|
||||
if (m->err) {
|
||||
cell_error(pool, m->cell);
|
||||
@@ -731,7 +736,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
|
||||
*/
|
||||
static int io_overlaps_block(struct pool *pool, struct bio *bio)
|
||||
{
|
||||
return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
|
||||
return bio->bi_iter.bi_size ==
|
||||
(pool->sectors_per_block << SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
static int io_overwrites_block(struct pool *pool, struct bio *bio)
|
||||
@@ -1136,7 +1142,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
|
||||
if (bio_detain(pool, &key, bio, &cell))
|
||||
return;
|
||||
|
||||
if (bio_data_dir(bio) == WRITE && bio->bi_size)
|
||||
if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
|
||||
break_sharing(tc, bio, block, &key, lookup_result, cell);
|
||||
else {
|
||||
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
||||
@@ -1159,7 +1165,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
|
||||
/*
|
||||
* Remap empty bios (flushes) immediately, without provisioning.
|
||||
*/
|
||||
if (!bio->bi_size) {
|
||||
if (!bio->bi_iter.bi_size) {
|
||||
inc_all_io_entry(pool, bio);
|
||||
cell_defer_no_holder(tc, cell);
|
||||
|
||||
@@ -1258,7 +1264,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
|
||||
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
|
||||
switch (r) {
|
||||
case 0:
|
||||
if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
|
||||
if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
|
||||
handle_unserviceable_bio(tc->pool, bio);
|
||||
else {
|
||||
inc_all_io_entry(tc->pool, bio);
|
||||
@@ -2939,7 +2945,7 @@ out_unlock:
|
||||
|
||||
static int thin_map(struct dm_target *ti, struct bio *bio)
|
||||
{
|
||||
bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
|
||||
bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
|
||||
|
||||
return thin_bio_map(ti, bio);
|
||||
}
|
||||
|
Reference in New Issue
Block a user