Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe: - the big change is the cleanup from Mike Christie, cleaning up our uses of command types and modified flags. This is what will throw some merge conflicts - regression fix for the above for btrfs, from Vincent - following up to the above, better packing of struct request from Christoph - a 2038 fix for blktrace from Arnd - a few trivial/spelling fixes from Bart Van Assche - a front merge check fix from Damien, which could cause issues on SMR drives - Atari partition fix from Gabriel - convert cfq to highres timers, since jiffies isn't granular enough for some devices these days. From Jan and Jeff - CFQ priority boost fix idle classes, from me - cleanup series from Ming, improving our bio/bvec iteration - a direct issue fix for blk-mq from Omar - fix for plug merging not involving the IO scheduler, like we do for other types of merges. From Tahsin - expose DAX type internally and through sysfs. From Toshi and Yigal * 'for-4.8/core' of git://git.kernel.dk/linux-block: (76 commits) block: Fix front merge check block: do not merge requests without consulting with io scheduler block: Fix spelling in a source code comment block: expose QUEUE_FLAG_DAX in sysfs block: add QUEUE_FLAG_DAX for devices to advertise their DAX support Btrfs: fix comparison in __btrfs_map_block() block: atari: Return early for unsupported sector size Doc: block: Fix a typo in queue-sysfs.txt cfq-iosched: Charge at least 1 jiffie instead of 1 ns cfq-iosched: Fix regression in bonnie++ rewrite performance cfq-iosched: Convert slice_resid from u64 to s64 block: Convert fifo_time from ulong to u64 blktrace: avoid using timespec block/blk-cgroup.c: Declare local symbols static block/bio-integrity.c: Add #include "blk.h" block/partition-generic.c: Remove a set-but-not-used variable block: bio: kill BIO_MAX_SIZE cfq-iosched: temporarily boost queue priority for idle classes block: drbd: avoid to use BIO_MAX_SIZE block: bio: remove BIO_MAX_SECTORS ...
This commit is contained in:
@@ -1673,6 +1673,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
|
||||
}
|
||||
bio->bi_bdev = block_ctx->dev->bdev;
|
||||
bio->bi_iter.bi_sector = dev_bytenr >> 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
for (j = i; j < num_pages; j++) {
|
||||
ret = bio_add_page(bio, block_ctx->pagev[j],
|
||||
@@ -1685,7 +1686,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
|
||||
"btrfsic: error, failed to add a single page!\n");
|
||||
return -1;
|
||||
}
|
||||
if (submit_bio_wait(READ, bio)) {
|
||||
if (submit_bio_wait(bio)) {
|
||||
printk(KERN_INFO
|
||||
"btrfsic: read error at logical %llu dev %s!\n",
|
||||
block_ctx->start, block_ctx->dev->name);
|
||||
@@ -2206,7 +2207,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
|
||||
block->dev_bytenr, block->mirror_num);
|
||||
next_block = block->next_in_same_bio;
|
||||
block->iodone_w_error = iodone_w_error;
|
||||
if (block->submit_bio_bh_rw & REQ_FLUSH) {
|
||||
if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
|
||||
dev_state->last_flush_gen++;
|
||||
if ((dev_state->state->print_mask &
|
||||
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
|
||||
@@ -2242,7 +2243,7 @@ static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
|
||||
block->dev_bytenr, block->mirror_num);
|
||||
|
||||
block->iodone_w_error = iodone_w_error;
|
||||
if (block->submit_bio_bh_rw & REQ_FLUSH) {
|
||||
if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
|
||||
dev_state->last_flush_gen++;
|
||||
if ((dev_state->state->print_mask &
|
||||
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
|
||||
@@ -2855,12 +2856,12 @@ static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
|
||||
return ds;
|
||||
}
|
||||
|
||||
int btrfsic_submit_bh(int rw, struct buffer_head *bh)
|
||||
int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
|
||||
{
|
||||
struct btrfsic_dev_state *dev_state;
|
||||
|
||||
if (!btrfsic_is_initialized)
|
||||
return submit_bh(rw, bh);
|
||||
return submit_bh(op, op_flags, bh);
|
||||
|
||||
mutex_lock(&btrfsic_mutex);
|
||||
/* since btrfsic_submit_bh() might also be called before
|
||||
@@ -2869,26 +2870,26 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
|
||||
|
||||
/* Only called to write the superblock (incl. FLUSH/FUA) */
|
||||
if (NULL != dev_state &&
|
||||
(rw & WRITE) && bh->b_size > 0) {
|
||||
(op == REQ_OP_WRITE) && bh->b_size > 0) {
|
||||
u64 dev_bytenr;
|
||||
|
||||
dev_bytenr = 4096 * bh->b_blocknr;
|
||||
if (dev_state->state->print_mask &
|
||||
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
|
||||
printk(KERN_INFO
|
||||
"submit_bh(rw=0x%x, blocknr=%llu (bytenr %llu),"
|
||||
" size=%zu, data=%p, bdev=%p)\n",
|
||||
rw, (unsigned long long)bh->b_blocknr,
|
||||
"submit_bh(op=0x%x,0x%x, blocknr=%llu "
|
||||
"(bytenr %llu), size=%zu, data=%p, bdev=%p)\n",
|
||||
op, op_flags, (unsigned long long)bh->b_blocknr,
|
||||
dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev);
|
||||
btrfsic_process_written_block(dev_state, dev_bytenr,
|
||||
&bh->b_data, 1, NULL,
|
||||
NULL, bh, rw);
|
||||
} else if (NULL != dev_state && (rw & REQ_FLUSH)) {
|
||||
NULL, bh, op_flags);
|
||||
} else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) {
|
||||
if (dev_state->state->print_mask &
|
||||
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
|
||||
printk(KERN_INFO
|
||||
"submit_bh(rw=0x%x FLUSH, bdev=%p)\n",
|
||||
rw, bh->b_bdev);
|
||||
"submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n",
|
||||
op, op_flags, bh->b_bdev);
|
||||
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
|
||||
if ((dev_state->state->print_mask &
|
||||
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
|
||||
@@ -2906,7 +2907,7 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
|
||||
block->never_written = 0;
|
||||
block->iodone_w_error = 0;
|
||||
block->flush_gen = dev_state->last_flush_gen + 1;
|
||||
block->submit_bio_bh_rw = rw;
|
||||
block->submit_bio_bh_rw = op_flags;
|
||||
block->orig_bio_bh_private = bh->b_private;
|
||||
block->orig_bio_bh_end_io.bh = bh->b_end_io;
|
||||
block->next_in_same_bio = NULL;
|
||||
@@ -2915,10 +2916,10 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
|
||||
}
|
||||
}
|
||||
mutex_unlock(&btrfsic_mutex);
|
||||
return submit_bh(rw, bh);
|
||||
return submit_bh(op, op_flags, bh);
|
||||
}
|
||||
|
||||
static void __btrfsic_submit_bio(int rw, struct bio *bio)
|
||||
static void __btrfsic_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct btrfsic_dev_state *dev_state;
|
||||
|
||||
@@ -2930,7 +2931,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
|
||||
* btrfsic_mount(), this might return NULL */
|
||||
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
|
||||
if (NULL != dev_state &&
|
||||
(rw & WRITE) && NULL != bio->bi_io_vec) {
|
||||
(bio_op(bio) == REQ_OP_WRITE) && NULL != bio->bi_io_vec) {
|
||||
unsigned int i;
|
||||
u64 dev_bytenr;
|
||||
u64 cur_bytenr;
|
||||
@@ -2942,9 +2943,9 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
|
||||
if (dev_state->state->print_mask &
|
||||
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
|
||||
printk(KERN_INFO
|
||||
"submit_bio(rw=0x%x, bi_vcnt=%u,"
|
||||
"submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
|
||||
" bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
|
||||
rw, bio->bi_vcnt,
|
||||
bio_op(bio), bio->bi_rw, bio->bi_vcnt,
|
||||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
dev_bytenr, bio->bi_bdev);
|
||||
|
||||
@@ -2975,18 +2976,18 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
|
||||
btrfsic_process_written_block(dev_state, dev_bytenr,
|
||||
mapped_datav, bio->bi_vcnt,
|
||||
bio, &bio_is_patched,
|
||||
NULL, rw);
|
||||
NULL, bio->bi_rw);
|
||||
while (i > 0) {
|
||||
i--;
|
||||
kunmap(bio->bi_io_vec[i].bv_page);
|
||||
}
|
||||
kfree(mapped_datav);
|
||||
} else if (NULL != dev_state && (rw & REQ_FLUSH)) {
|
||||
} else if (NULL != dev_state && (bio->bi_rw & REQ_PREFLUSH)) {
|
||||
if (dev_state->state->print_mask &
|
||||
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
|
||||
printk(KERN_INFO
|
||||
"submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
|
||||
rw, bio->bi_bdev);
|
||||
"submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
|
||||
bio_op(bio), bio->bi_rw, bio->bi_bdev);
|
||||
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
|
||||
if ((dev_state->state->print_mask &
|
||||
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
|
||||
@@ -3004,7 +3005,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
|
||||
block->never_written = 0;
|
||||
block->iodone_w_error = 0;
|
||||
block->flush_gen = dev_state->last_flush_gen + 1;
|
||||
block->submit_bio_bh_rw = rw;
|
||||
block->submit_bio_bh_rw = bio->bi_rw;
|
||||
block->orig_bio_bh_private = bio->bi_private;
|
||||
block->orig_bio_bh_end_io.bio = bio->bi_end_io;
|
||||
block->next_in_same_bio = NULL;
|
||||
@@ -3016,16 +3017,16 @@ leave:
|
||||
mutex_unlock(&btrfsic_mutex);
|
||||
}
|
||||
|
||||
void btrfsic_submit_bio(int rw, struct bio *bio)
|
||||
void btrfsic_submit_bio(struct bio *bio)
|
||||
{
|
||||
__btrfsic_submit_bio(rw, bio);
|
||||
submit_bio(rw, bio);
|
||||
__btrfsic_submit_bio(bio);
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
||||
int btrfsic_submit_bio_wait(int rw, struct bio *bio)
|
||||
int btrfsic_submit_bio_wait(struct bio *bio)
|
||||
{
|
||||
__btrfsic_submit_bio(rw, bio);
|
||||
return submit_bio_wait(rw, bio);
|
||||
__btrfsic_submit_bio(bio);
|
||||
return submit_bio_wait(bio);
|
||||
}
|
||||
|
||||
int btrfsic_mount(struct btrfs_root *root,
|
||||
|
@@ -20,9 +20,9 @@
|
||||
#define __BTRFS_CHECK_INTEGRITY__
|
||||
|
||||
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
|
||||
int btrfsic_submit_bh(int rw, struct buffer_head *bh);
|
||||
void btrfsic_submit_bio(int rw, struct bio *bio);
|
||||
int btrfsic_submit_bio_wait(int rw, struct bio *bio);
|
||||
int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh);
|
||||
void btrfsic_submit_bio(struct bio *bio);
|
||||
int btrfsic_submit_bio_wait(struct bio *bio);
|
||||
#else
|
||||
#define btrfsic_submit_bh submit_bh
|
||||
#define btrfsic_submit_bio submit_bio
|
||||
|
@@ -363,6 +363,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
||||
kfree(cb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio->bi_private = cb;
|
||||
bio->bi_end_io = end_compressed_bio_write;
|
||||
atomic_inc(&cb->pending_bios);
|
||||
@@ -373,7 +374,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
||||
page = compressed_pages[pg_index];
|
||||
page->mapping = inode->i_mapping;
|
||||
if (bio->bi_iter.bi_size)
|
||||
ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
|
||||
ret = io_tree->ops->merge_bio_hook(page, 0,
|
||||
PAGE_SIZE,
|
||||
bio, 0);
|
||||
else
|
||||
@@ -401,13 +402,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
}
|
||||
|
||||
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
|
||||
ret = btrfs_map_bio(root, bio, 0, 1);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
|
||||
bio_put(bio);
|
||||
|
||||
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
|
||||
BUG_ON(!bio);
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio->bi_private = cb;
|
||||
bio->bi_end_io = end_compressed_bio_write;
|
||||
bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
@@ -431,7 +433,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
}
|
||||
|
||||
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
|
||||
ret = btrfs_map_bio(root, bio, 0, 1);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
|
||||
bio_put(bio);
|
||||
@@ -646,6 +648,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
|
||||
if (!comp_bio)
|
||||
goto fail2;
|
||||
bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
|
||||
comp_bio->bi_private = cb;
|
||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||
atomic_inc(&cb->pending_bios);
|
||||
@@ -656,7 +659,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
page->index = em_start >> PAGE_SHIFT;
|
||||
|
||||
if (comp_bio->bi_iter.bi_size)
|
||||
ret = tree->ops->merge_bio_hook(READ, page, 0,
|
||||
ret = tree->ops->merge_bio_hook(page, 0,
|
||||
PAGE_SIZE,
|
||||
comp_bio, 0);
|
||||
else
|
||||
@@ -687,8 +690,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
|
||||
root->sectorsize);
|
||||
|
||||
ret = btrfs_map_bio(root, READ, comp_bio,
|
||||
mirror_num, 0);
|
||||
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
|
||||
if (ret) {
|
||||
bio->bi_error = ret;
|
||||
bio_endio(comp_bio);
|
||||
@@ -699,6 +701,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
|
||||
GFP_NOFS);
|
||||
BUG_ON(!comp_bio);
|
||||
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
|
||||
comp_bio->bi_private = cb;
|
||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||
|
||||
@@ -717,7 +720,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
}
|
||||
|
||||
ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
|
||||
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
|
||||
if (ret) {
|
||||
bio->bi_error = ret;
|
||||
bio_endio(comp_bio);
|
||||
|
@@ -3091,7 +3091,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *new_root,
|
||||
struct btrfs_root *parent_root,
|
||||
u64 new_dirid);
|
||||
int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
|
||||
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
|
@@ -124,7 +124,6 @@ struct async_submit_bio {
|
||||
struct list_head list;
|
||||
extent_submit_bio_hook_t *submit_bio_start;
|
||||
extent_submit_bio_hook_t *submit_bio_done;
|
||||
int rw;
|
||||
int mirror_num;
|
||||
unsigned long bio_flags;
|
||||
/*
|
||||
@@ -727,7 +726,7 @@ static void end_workqueue_bio(struct bio *bio)
|
||||
fs_info = end_io_wq->info;
|
||||
end_io_wq->error = bio->bi_error;
|
||||
|
||||
if (bio->bi_rw & REQ_WRITE) {
|
||||
if (bio_op(bio) == REQ_OP_WRITE) {
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
|
||||
wq = fs_info->endio_meta_write_workers;
|
||||
func = btrfs_endio_meta_write_helper;
|
||||
@@ -797,7 +796,7 @@ static void run_one_async_start(struct btrfs_work *work)
|
||||
int ret;
|
||||
|
||||
async = container_of(work, struct async_submit_bio, work);
|
||||
ret = async->submit_bio_start(async->inode, async->rw, async->bio,
|
||||
ret = async->submit_bio_start(async->inode, async->bio,
|
||||
async->mirror_num, async->bio_flags,
|
||||
async->bio_offset);
|
||||
if (ret)
|
||||
@@ -830,9 +829,8 @@ static void run_one_async_done(struct btrfs_work *work)
|
||||
return;
|
||||
}
|
||||
|
||||
async->submit_bio_done(async->inode, async->rw, async->bio,
|
||||
async->mirror_num, async->bio_flags,
|
||||
async->bio_offset);
|
||||
async->submit_bio_done(async->inode, async->bio, async->mirror_num,
|
||||
async->bio_flags, async->bio_offset);
|
||||
}
|
||||
|
||||
static void run_one_async_free(struct btrfs_work *work)
|
||||
@@ -844,7 +842,7 @@ static void run_one_async_free(struct btrfs_work *work)
|
||||
}
|
||||
|
||||
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
||||
int rw, struct bio *bio, int mirror_num,
|
||||
struct bio *bio, int mirror_num,
|
||||
unsigned long bio_flags,
|
||||
u64 bio_offset,
|
||||
extent_submit_bio_hook_t *submit_bio_start,
|
||||
@@ -857,7 +855,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
||||
return -ENOMEM;
|
||||
|
||||
async->inode = inode;
|
||||
async->rw = rw;
|
||||
async->bio = bio;
|
||||
async->mirror_num = mirror_num;
|
||||
async->submit_bio_start = submit_bio_start;
|
||||
@@ -873,7 +870,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
||||
|
||||
atomic_inc(&fs_info->nr_async_submits);
|
||||
|
||||
if (rw & REQ_SYNC)
|
||||
if (bio->bi_rw & REQ_SYNC)
|
||||
btrfs_set_work_high_priority(&async->work);
|
||||
|
||||
btrfs_queue_work(fs_info->workers, &async->work);
|
||||
@@ -903,9 +900,8 @@ static int btree_csum_one_bio(struct bio *bio)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __btree_submit_bio_start(struct inode *inode, int rw,
|
||||
struct bio *bio, int mirror_num,
|
||||
unsigned long bio_flags,
|
||||
static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
{
|
||||
/*
|
||||
@@ -915,7 +911,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,
|
||||
return btree_csum_one_bio(bio);
|
||||
}
|
||||
|
||||
static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
|
||||
static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
{
|
||||
@@ -925,7 +921,7 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
|
||||
* when we're called for a write, we're already in the async
|
||||
* submission context. Just jump into btrfs_map_bio
|
||||
*/
|
||||
ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
|
||||
ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1);
|
||||
if (ret) {
|
||||
bio->bi_error = ret;
|
||||
bio_endio(bio);
|
||||
@@ -944,14 +940,14 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
||||
static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
{
|
||||
int async = check_async_write(inode, bio_flags);
|
||||
int ret;
|
||||
|
||||
if (!(rw & REQ_WRITE)) {
|
||||
if (bio_op(bio) != REQ_OP_WRITE) {
|
||||
/*
|
||||
* called for a read, do the setup so that checksum validation
|
||||
* can happen in the async kernel threads
|
||||
@@ -960,21 +956,19 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
||||
bio, BTRFS_WQ_ENDIO_METADATA);
|
||||
if (ret)
|
||||
goto out_w_error;
|
||||
ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
|
||||
mirror_num, 0);
|
||||
ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
|
||||
} else if (!async) {
|
||||
ret = btree_csum_one_bio(bio);
|
||||
if (ret)
|
||||
goto out_w_error;
|
||||
ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
|
||||
mirror_num, 0);
|
||||
ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
|
||||
} else {
|
||||
/*
|
||||
* kthread helpers are used to submit writes so that
|
||||
* checksumming can happen in parallel across all CPUs
|
||||
*/
|
||||
ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
|
||||
inode, rw, bio, mirror_num, 0,
|
||||
inode, bio, mirror_num, 0,
|
||||
bio_offset,
|
||||
__btree_submit_bio_start,
|
||||
__btree_submit_bio_done);
|
||||
@@ -3418,9 +3412,9 @@ static int write_dev_supers(struct btrfs_device *device,
|
||||
* to go down lazy.
|
||||
*/
|
||||
if (i == 0)
|
||||
ret = btrfsic_submit_bh(WRITE_FUA, bh);
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh);
|
||||
else
|
||||
ret = btrfsic_submit_bh(WRITE_SYNC, bh);
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
|
||||
if (ret)
|
||||
errors++;
|
||||
}
|
||||
@@ -3484,12 +3478,13 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
|
||||
|
||||
bio->bi_end_io = btrfs_end_empty_barrier;
|
||||
bio->bi_bdev = device->bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
init_completion(&device->flush_wait);
|
||||
bio->bi_private = &device->flush_wait;
|
||||
device->flush_bio = bio;
|
||||
|
||||
bio_get(bio);
|
||||
btrfsic_submit_bio(WRITE_FLUSH, bio);
|
||||
btrfsic_submit_bio(bio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -122,7 +122,7 @@ void btrfs_csum_final(u32 crc, char *result);
|
||||
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
||||
enum btrfs_wq_endio_type metadata);
|
||||
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
||||
int rw, struct bio *bio, int mirror_num,
|
||||
struct bio *bio, int mirror_num,
|
||||
unsigned long bio_flags, u64 bio_offset,
|
||||
extent_submit_bio_hook_t *submit_bio_start,
|
||||
extent_submit_bio_hook_t *submit_bio_done);
|
||||
|
@@ -2048,7 +2048,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
|
||||
*/
|
||||
btrfs_bio_counter_inc_blocked(root->fs_info);
|
||||
/* Tell the block device(s) that the sectors can be discarded */
|
||||
ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
|
||||
ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
|
||||
bytenr, &num_bytes, &bbio, 0);
|
||||
/* Error condition is -ENOMEM */
|
||||
if (!ret) {
|
||||
|
@@ -2049,9 +2049,10 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
|
||||
return -EIO;
|
||||
}
|
||||
bio->bi_bdev = dev->bdev;
|
||||
bio->bi_rw = WRITE_SYNC;
|
||||
bio_add_page(bio, page, length, pg_offset);
|
||||
|
||||
if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
|
||||
if (btrfsic_submit_bio_wait(bio)) {
|
||||
/* try to remap that extent elsewhere? */
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
bio_put(bio);
|
||||
@@ -2386,7 +2387,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||
int read_mode;
|
||||
int ret;
|
||||
|
||||
BUG_ON(failed_bio->bi_rw & REQ_WRITE);
|
||||
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
|
||||
|
||||
ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
|
||||
if (ret)
|
||||
@@ -2412,12 +2413,12 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||
free_io_failure(inode, failrec);
|
||||
return -EIO;
|
||||
}
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
|
||||
|
||||
pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
|
||||
read_mode, failrec->this_mirror, failrec->in_validation);
|
||||
|
||||
ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
|
||||
failrec->this_mirror,
|
||||
ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
|
||||
failrec->bio_flags, 0);
|
||||
if (ret) {
|
||||
free_io_failure(inode, failrec);
|
||||
@@ -2723,8 +2724,8 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
||||
}
|
||||
|
||||
|
||||
static int __must_check submit_one_bio(int rw, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags)
|
||||
static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
|
||||
unsigned long bio_flags)
|
||||
{
|
||||
int ret = 0;
|
||||
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
|
||||
@@ -2735,33 +2736,32 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
|
||||
start = page_offset(page) + bvec->bv_offset;
|
||||
|
||||
bio->bi_private = NULL;
|
||||
|
||||
bio_get(bio);
|
||||
|
||||
if (tree->ops && tree->ops->submit_bio_hook)
|
||||
ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
|
||||
ret = tree->ops->submit_bio_hook(page->mapping->host, bio,
|
||||
mirror_num, bio_flags, start);
|
||||
else
|
||||
btrfsic_submit_bio(rw, bio);
|
||||
btrfsic_submit_bio(bio);
|
||||
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
|
||||
static int merge_bio(struct extent_io_tree *tree, struct page *page,
|
||||
unsigned long offset, size_t size, struct bio *bio,
|
||||
unsigned long bio_flags)
|
||||
{
|
||||
int ret = 0;
|
||||
if (tree->ops && tree->ops->merge_bio_hook)
|
||||
ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
|
||||
ret = tree->ops->merge_bio_hook(page, offset, size, bio,
|
||||
bio_flags);
|
||||
BUG_ON(ret < 0);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
static int submit_extent_page(int rw, struct extent_io_tree *tree,
|
||||
static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree,
|
||||
struct writeback_control *wbc,
|
||||
struct page *page, sector_t sector,
|
||||
size_t size, unsigned long offset,
|
||||
@@ -2789,10 +2789,9 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
|
||||
|
||||
if (prev_bio_flags != bio_flags || !contig ||
|
||||
force_bio_submit ||
|
||||
merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
|
||||
merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
|
||||
bio_add_page(bio, page, page_size, offset) < page_size) {
|
||||
ret = submit_one_bio(rw, bio, mirror_num,
|
||||
prev_bio_flags);
|
||||
ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
|
||||
if (ret < 0) {
|
||||
*bio_ret = NULL;
|
||||
return ret;
|
||||
@@ -2813,6 +2812,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
|
||||
bio_add_page(bio, page, page_size, offset);
|
||||
bio->bi_end_io = end_io_func;
|
||||
bio->bi_private = tree;
|
||||
bio_set_op_attrs(bio, op, op_flags);
|
||||
if (wbc) {
|
||||
wbc_init_bio(wbc, bio);
|
||||
wbc_account_io(wbc, page, page_size);
|
||||
@@ -2821,7 +2821,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
|
||||
if (bio_ret)
|
||||
*bio_ret = bio;
|
||||
else
|
||||
ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
|
||||
ret = submit_one_bio(bio, mirror_num, bio_flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -2885,7 +2885,7 @@ static int __do_readpage(struct extent_io_tree *tree,
|
||||
get_extent_t *get_extent,
|
||||
struct extent_map **em_cached,
|
||||
struct bio **bio, int mirror_num,
|
||||
unsigned long *bio_flags, int rw,
|
||||
unsigned long *bio_flags, int read_flags,
|
||||
u64 *prev_em_start)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
@@ -3068,8 +3068,8 @@ static int __do_readpage(struct extent_io_tree *tree,
|
||||
}
|
||||
|
||||
pnr -= page->index;
|
||||
ret = submit_extent_page(rw, tree, NULL, page,
|
||||
sector, disk_io_size, pg_offset,
|
||||
ret = submit_extent_page(REQ_OP_READ, read_flags, tree, NULL,
|
||||
page, sector, disk_io_size, pg_offset,
|
||||
bdev, bio, pnr,
|
||||
end_bio_extent_readpage, mirror_num,
|
||||
*bio_flags,
|
||||
@@ -3100,7 +3100,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
|
||||
get_extent_t *get_extent,
|
||||
struct extent_map **em_cached,
|
||||
struct bio **bio, int mirror_num,
|
||||
unsigned long *bio_flags, int rw,
|
||||
unsigned long *bio_flags,
|
||||
u64 *prev_em_start)
|
||||
{
|
||||
struct inode *inode;
|
||||
@@ -3121,7 +3121,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
|
||||
|
||||
for (index = 0; index < nr_pages; index++) {
|
||||
__do_readpage(tree, pages[index], get_extent, em_cached, bio,
|
||||
mirror_num, bio_flags, rw, prev_em_start);
|
||||
mirror_num, bio_flags, 0, prev_em_start);
|
||||
put_page(pages[index]);
|
||||
}
|
||||
}
|
||||
@@ -3131,7 +3131,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
|
||||
int nr_pages, get_extent_t *get_extent,
|
||||
struct extent_map **em_cached,
|
||||
struct bio **bio, int mirror_num,
|
||||
unsigned long *bio_flags, int rw,
|
||||
unsigned long *bio_flags,
|
||||
u64 *prev_em_start)
|
||||
{
|
||||
u64 start = 0;
|
||||
@@ -3153,7 +3153,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
|
||||
index - first_index, start,
|
||||
end, get_extent, em_cached,
|
||||
bio, mirror_num, bio_flags,
|
||||
rw, prev_em_start);
|
||||
prev_em_start);
|
||||
start = page_start;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
first_index = index;
|
||||
@@ -3164,7 +3164,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
|
||||
__do_contiguous_readpages(tree, &pages[first_index],
|
||||
index - first_index, start,
|
||||
end, get_extent, em_cached, bio,
|
||||
mirror_num, bio_flags, rw,
|
||||
mirror_num, bio_flags,
|
||||
prev_em_start);
|
||||
}
|
||||
|
||||
@@ -3172,7 +3172,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
||||
struct page *page,
|
||||
get_extent_t *get_extent,
|
||||
struct bio **bio, int mirror_num,
|
||||
unsigned long *bio_flags, int rw)
|
||||
unsigned long *bio_flags, int read_flags)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
@@ -3192,7 +3192,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
||||
}
|
||||
|
||||
ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
|
||||
bio_flags, rw, NULL);
|
||||
bio_flags, read_flags, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3204,9 +3204,9 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
||||
int ret;
|
||||
|
||||
ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
|
||||
&bio_flags, READ);
|
||||
&bio_flags, 0);
|
||||
if (bio)
|
||||
ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
|
||||
ret = submit_one_bio(bio, mirror_num, bio_flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3440,8 +3440,8 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
|
||||
page->index, cur, end);
|
||||
}
|
||||
|
||||
ret = submit_extent_page(write_flags, tree, wbc, page,
|
||||
sector, iosize, pg_offset,
|
||||
ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc,
|
||||
page, sector, iosize, pg_offset,
|
||||
bdev, &epd->bio, max_nr,
|
||||
end_bio_extent_writepage,
|
||||
0, 0, 0, false);
|
||||
@@ -3480,13 +3480,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
||||
size_t pg_offset = 0;
|
||||
loff_t i_size = i_size_read(inode);
|
||||
unsigned long end_index = i_size >> PAGE_SHIFT;
|
||||
int write_flags;
|
||||
int write_flags = 0;
|
||||
unsigned long nr_written = 0;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
write_flags = WRITE_SYNC;
|
||||
else
|
||||
write_flags = WRITE;
|
||||
|
||||
trace___extent_writepage(page, inode, wbc);
|
||||
|
||||
@@ -3730,7 +3728,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
||||
u64 offset = eb->start;
|
||||
unsigned long i, num_pages;
|
||||
unsigned long bio_flags = 0;
|
||||
int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
|
||||
int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
|
||||
int ret = 0;
|
||||
|
||||
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
|
||||
@@ -3744,9 +3742,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
||||
|
||||
clear_page_dirty_for_io(p);
|
||||
set_page_writeback(p);
|
||||
ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
|
||||
PAGE_SIZE, 0, bdev, &epd->bio,
|
||||
-1, end_bio_extent_buffer_writepage,
|
||||
ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc,
|
||||
p, offset >> 9, PAGE_SIZE, 0, bdev,
|
||||
&epd->bio, -1,
|
||||
end_bio_extent_buffer_writepage,
|
||||
0, epd->bio_flags, bio_flags, false);
|
||||
epd->bio_flags = bio_flags;
|
||||
if (ret) {
|
||||
@@ -4056,13 +4055,12 @@ retry:
|
||||
static void flush_epd_write_bio(struct extent_page_data *epd)
|
||||
{
|
||||
if (epd->bio) {
|
||||
int rw = WRITE;
|
||||
int ret;
|
||||
|
||||
if (epd->sync_io)
|
||||
rw = WRITE_SYNC;
|
||||
bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
|
||||
epd->sync_io ? WRITE_SYNC : 0);
|
||||
|
||||
ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
|
||||
ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
|
||||
BUG_ON(ret < 0); /* -ENOMEM */
|
||||
epd->bio = NULL;
|
||||
}
|
||||
@@ -4189,19 +4187,19 @@ int extent_readpages(struct extent_io_tree *tree,
|
||||
if (nr < ARRAY_SIZE(pagepool))
|
||||
continue;
|
||||
__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
|
||||
&bio, 0, &bio_flags, READ, &prev_em_start);
|
||||
&bio, 0, &bio_flags, &prev_em_start);
|
||||
nr = 0;
|
||||
}
|
||||
if (nr)
|
||||
__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
|
||||
&bio, 0, &bio_flags, READ, &prev_em_start);
|
||||
&bio, 0, &bio_flags, &prev_em_start);
|
||||
|
||||
if (em_cached)
|
||||
free_extent_map(em_cached);
|
||||
|
||||
BUG_ON(!list_empty(pages));
|
||||
if (bio)
|
||||
return submit_one_bio(READ, bio, 0, bio_flags);
|
||||
return submit_one_bio(bio, 0, bio_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -5236,7 +5234,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
||||
err = __extent_read_full_page(tree, page,
|
||||
get_extent, &bio,
|
||||
mirror_num, &bio_flags,
|
||||
READ | REQ_META);
|
||||
REQ_META);
|
||||
if (err)
|
||||
ret = err;
|
||||
} else {
|
||||
@@ -5245,8 +5243,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
||||
}
|
||||
|
||||
if (bio) {
|
||||
err = submit_one_bio(READ | REQ_META, bio, mirror_num,
|
||||
bio_flags);
|
||||
err = submit_one_bio(bio, mirror_num, bio_flags);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@@ -63,16 +63,16 @@ struct btrfs_root;
|
||||
struct btrfs_io_bio;
|
||||
struct io_failure_record;
|
||||
|
||||
typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
|
||||
struct bio *bio, int mirror_num,
|
||||
unsigned long bio_flags, u64 bio_offset);
|
||||
typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset);
|
||||
struct extent_io_ops {
|
||||
int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
|
||||
u64 start, u64 end, int *page_started,
|
||||
unsigned long *nr_written);
|
||||
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
|
||||
extent_submit_bio_hook_t *submit_bio_hook;
|
||||
int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
|
||||
int (*merge_bio_hook)(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
|
||||
|
@@ -1823,7 +1823,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
|
||||
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
|
||||
* we don't create bios that span stripes or chunks
|
||||
*/
|
||||
int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
|
||||
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags)
|
||||
{
|
||||
@@ -1838,7 +1838,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
|
||||
|
||||
length = bio->bi_iter.bi_size;
|
||||
map_length = length;
|
||||
ret = btrfs_map_block(root->fs_info, rw, logical,
|
||||
ret = btrfs_map_block(root->fs_info, bio_op(bio), logical,
|
||||
&map_length, NULL, 0);
|
||||
/* Will always return 0 with map_multi == NULL */
|
||||
BUG_ON(ret < 0);
|
||||
@@ -1855,9 +1855,8 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
|
||||
* At IO completion time the cums attached on the ordered extent record
|
||||
* are inserted into the btree
|
||||
*/
|
||||
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
|
||||
struct bio *bio, int mirror_num,
|
||||
unsigned long bio_flags,
|
||||
static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
@@ -1876,14 +1875,14 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw,
|
||||
* At IO completion time the cums attached on the ordered extent record
|
||||
* are inserted into the btree
|
||||
*/
|
||||
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
|
||||
static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
int ret;
|
||||
|
||||
ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
|
||||
ret = btrfs_map_bio(root, bio, mirror_num, 1);
|
||||
if (ret) {
|
||||
bio->bi_error = ret;
|
||||
bio_endio(bio);
|
||||
@@ -1895,7 +1894,7 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
|
||||
* extent_io.c submission hook. This does the right thing for csum calculation
|
||||
* on write, or reading the csums from the tree before a read
|
||||
*/
|
||||
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
||||
static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
{
|
||||
@@ -1910,7 +1909,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
||||
if (btrfs_is_free_space_inode(inode))
|
||||
metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
|
||||
|
||||
if (!(rw & REQ_WRITE)) {
|
||||
if (bio_op(bio) != REQ_OP_WRITE) {
|
||||
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
|
||||
if (ret)
|
||||
goto out;
|
||||
@@ -1932,7 +1931,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
||||
goto mapit;
|
||||
/* we're doing a write, do the async checksumming */
|
||||
ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
|
||||
inode, rw, bio, mirror_num,
|
||||
inode, bio, mirror_num,
|
||||
bio_flags, bio_offset,
|
||||
__btrfs_submit_bio_start,
|
||||
__btrfs_submit_bio_done);
|
||||
@@ -1944,7 +1943,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
||||
}
|
||||
|
||||
mapit:
|
||||
ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
|
||||
ret = btrfs_map_bio(root, bio, mirror_num, 0);
|
||||
|
||||
out:
|
||||
if (ret < 0) {
|
||||
@@ -7790,12 +7789,12 @@ err:
|
||||
}
|
||||
|
||||
static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
|
||||
int rw, int mirror_num)
|
||||
int mirror_num)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
int ret;
|
||||
|
||||
BUG_ON(rw & REQ_WRITE);
|
||||
BUG_ON(bio_op(bio) == REQ_OP_WRITE);
|
||||
|
||||
bio_get(bio);
|
||||
|
||||
@@ -7804,7 +7803,7 @@ static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
|
||||
ret = btrfs_map_bio(root, bio, mirror_num, 0);
|
||||
err:
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
@@ -7855,7 +7854,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
int read_mode;
|
||||
int ret;
|
||||
|
||||
BUG_ON(failed_bio->bi_rw & REQ_WRITE);
|
||||
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
|
||||
|
||||
ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
|
||||
if (ret)
|
||||
@@ -7883,13 +7882,13 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
free_io_failure(inode, failrec);
|
||||
return -EIO;
|
||||
}
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
|
||||
|
||||
btrfs_debug(BTRFS_I(inode)->root->fs_info,
|
||||
"Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
|
||||
read_mode, failrec->this_mirror, failrec->in_validation);
|
||||
|
||||
ret = submit_dio_repair_bio(inode, bio, read_mode,
|
||||
failrec->this_mirror);
|
||||
ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
|
||||
if (ret) {
|
||||
free_io_failure(inode, failrec);
|
||||
bio_put(bio);
|
||||
@@ -8179,7 +8178,7 @@ static void btrfs_endio_direct_write(struct bio *bio)
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
|
||||
static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
|
||||
struct bio *bio, int mirror_num,
|
||||
unsigned long bio_flags, u64 offset)
|
||||
{
|
||||
@@ -8197,8 +8196,8 @@ static void btrfs_end_dio_bio(struct bio *bio)
|
||||
|
||||
if (err)
|
||||
btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
|
||||
"direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
|
||||
btrfs_ino(dip->inode), bio->bi_rw,
|
||||
"direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
|
||||
btrfs_ino(dip->inode), bio_op(bio), bio->bi_rw,
|
||||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
bio->bi_iter.bi_size, err);
|
||||
|
||||
@@ -8272,11 +8271,11 @@ static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
|
||||
}
|
||||
|
||||
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
|
||||
int rw, u64 file_offset, int skip_sum,
|
||||
u64 file_offset, int skip_sum,
|
||||
int async_submit)
|
||||
{
|
||||
struct btrfs_dio_private *dip = bio->bi_private;
|
||||
int write = rw & REQ_WRITE;
|
||||
bool write = bio_op(bio) == REQ_OP_WRITE;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
int ret;
|
||||
|
||||
@@ -8297,8 +8296,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
|
||||
|
||||
if (write && async_submit) {
|
||||
ret = btrfs_wq_submit_bio(root->fs_info,
|
||||
inode, rw, bio, 0, 0,
|
||||
file_offset,
|
||||
inode, bio, 0, 0, file_offset,
|
||||
__btrfs_submit_bio_start_direct_io,
|
||||
__btrfs_submit_bio_done);
|
||||
goto err;
|
||||
@@ -8317,13 +8315,13 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
|
||||
goto err;
|
||||
}
|
||||
map:
|
||||
ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
|
||||
ret = btrfs_map_bio(root, bio, 0, async_submit);
|
||||
err:
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
|
||||
static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
|
||||
int skip_sum)
|
||||
{
|
||||
struct inode *inode = dip->inode;
|
||||
@@ -8342,8 +8340,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
|
||||
int i;
|
||||
|
||||
map_length = orig_bio->bi_iter.bi_size;
|
||||
ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
|
||||
&map_length, NULL, 0);
|
||||
ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
|
||||
start_sector << 9, &map_length, NULL, 0);
|
||||
if (ret)
|
||||
return -EIO;
|
||||
|
||||
@@ -8363,6 +8361,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
|
||||
bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
|
||||
bio->bi_private = dip;
|
||||
bio->bi_end_io = btrfs_end_dio_bio;
|
||||
btrfs_io_bio(bio)->logical = file_offset;
|
||||
@@ -8382,7 +8381,7 @@ next_block:
|
||||
* before we're done setting it up
|
||||
*/
|
||||
atomic_inc(&dip->pending_bios);
|
||||
ret = __btrfs_submit_dio_bio(bio, inode, rw,
|
||||
ret = __btrfs_submit_dio_bio(bio, inode,
|
||||
file_offset, skip_sum,
|
||||
async_submit);
|
||||
if (ret) {
|
||||
@@ -8400,12 +8399,13 @@ next_block:
|
||||
start_sector, GFP_NOFS);
|
||||
if (!bio)
|
||||
goto out_err;
|
||||
bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
|
||||
bio->bi_private = dip;
|
||||
bio->bi_end_io = btrfs_end_dio_bio;
|
||||
btrfs_io_bio(bio)->logical = file_offset;
|
||||
|
||||
map_length = orig_bio->bi_iter.bi_size;
|
||||
ret = btrfs_map_block(root->fs_info, rw,
|
||||
ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
|
||||
start_sector << 9,
|
||||
&map_length, NULL, 0);
|
||||
if (ret) {
|
||||
@@ -8425,7 +8425,7 @@ next_block:
|
||||
}
|
||||
|
||||
submit:
|
||||
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
|
||||
ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
|
||||
async_submit);
|
||||
if (!ret)
|
||||
return 0;
|
||||
@@ -8445,14 +8445,14 @@ out_err:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void btrfs_submit_direct(int rw, struct bio *dio_bio,
|
||||
struct inode *inode, loff_t file_offset)
|
||||
static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
|
||||
loff_t file_offset)
|
||||
{
|
||||
struct btrfs_dio_private *dip = NULL;
|
||||
struct bio *io_bio = NULL;
|
||||
struct btrfs_io_bio *btrfs_bio;
|
||||
int skip_sum;
|
||||
int write = rw & REQ_WRITE;
|
||||
bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
|
||||
int ret = 0;
|
||||
|
||||
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
|
||||
@@ -8503,7 +8503,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
|
||||
dio_data->unsubmitted_oe_range_end;
|
||||
}
|
||||
|
||||
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
|
||||
ret = btrfs_submit_direct_hook(dip, skip_sum);
|
||||
if (!ret)
|
||||
return;
|
||||
|
||||
|
@@ -1320,7 +1320,9 @@ write_data:
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_write_end_io;
|
||||
submit_bio(WRITE, bio);
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
||||
submit_bio(bio);
|
||||
}
|
||||
return;
|
||||
|
||||
@@ -1573,11 +1575,12 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_rmw_end_io;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
btrfs_bio_wq_end_io(rbio->fs_info, bio,
|
||||
BTRFS_WQ_ENDIO_RAID56);
|
||||
|
||||
submit_bio(READ, bio);
|
||||
submit_bio(bio);
|
||||
}
|
||||
/* the actual write will happen once the reads are done */
|
||||
return 0;
|
||||
@@ -2097,11 +2100,12 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_recover_end_io;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
btrfs_bio_wq_end_io(rbio->fs_info, bio,
|
||||
BTRFS_WQ_ENDIO_RAID56);
|
||||
|
||||
submit_bio(READ, bio);
|
||||
submit_bio(bio);
|
||||
}
|
||||
out:
|
||||
return 0;
|
||||
@@ -2433,7 +2437,9 @@ submit_write:
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_write_end_io;
|
||||
submit_bio(WRITE, bio);
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
||||
submit_bio(bio);
|
||||
}
|
||||
return;
|
||||
|
||||
@@ -2610,11 +2616,12 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
|
||||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid56_parity_scrub_end_io;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
btrfs_bio_wq_end_io(rbio->fs_info, bio,
|
||||
BTRFS_WQ_ENDIO_RAID56);
|
||||
|
||||
submit_bio(READ, bio);
|
||||
submit_bio(bio);
|
||||
}
|
||||
/* the actual write will happen once the reads are done */
|
||||
return;
|
||||
|
@@ -1504,8 +1504,9 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
|
||||
sblock->no_io_error_seen = 0;
|
||||
} else {
|
||||
bio->bi_iter.bi_sector = page->physical >> 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
|
||||
if (btrfsic_submit_bio_wait(READ, bio))
|
||||
if (btrfsic_submit_bio_wait(bio))
|
||||
sblock->no_io_error_seen = 0;
|
||||
}
|
||||
|
||||
@@ -1583,6 +1584,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
|
||||
return -EIO;
|
||||
bio->bi_bdev = page_bad->dev->bdev;
|
||||
bio->bi_iter.bi_sector = page_bad->physical >> 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
||||
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
|
||||
if (PAGE_SIZE != ret) {
|
||||
@@ -1590,7 +1592,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (btrfsic_submit_bio_wait(WRITE, bio)) {
|
||||
if (btrfsic_submit_bio_wait(bio)) {
|
||||
btrfs_dev_stat_inc_and_print(page_bad->dev,
|
||||
BTRFS_DEV_STAT_WRITE_ERRS);
|
||||
btrfs_dev_replace_stats_inc(
|
||||
@@ -1684,6 +1686,7 @@ again:
|
||||
bio->bi_end_io = scrub_wr_bio_end_io;
|
||||
bio->bi_bdev = sbio->dev->bdev;
|
||||
bio->bi_iter.bi_sector = sbio->physical >> 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
sbio->err = 0;
|
||||
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
|
||||
spage->physical_for_dev_replace ||
|
||||
@@ -1731,7 +1734,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
|
||||
* orders the requests before sending them to the driver which
|
||||
* doubled the write performance on spinning disks when measured
|
||||
* with Linux 3.5 */
|
||||
btrfsic_submit_bio(WRITE, sbio->bio);
|
||||
btrfsic_submit_bio(sbio->bio);
|
||||
}
|
||||
|
||||
static void scrub_wr_bio_end_io(struct bio *bio)
|
||||
@@ -2041,7 +2044,7 @@ static void scrub_submit(struct scrub_ctx *sctx)
|
||||
sbio = sctx->bios[sctx->curr];
|
||||
sctx->curr = -1;
|
||||
scrub_pending_bio_inc(sctx);
|
||||
btrfsic_submit_bio(READ, sbio->bio);
|
||||
btrfsic_submit_bio(sbio->bio);
|
||||
}
|
||||
|
||||
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
|
||||
@@ -2088,6 +2091,7 @@ again:
|
||||
bio->bi_end_io = scrub_bio_end_io;
|
||||
bio->bi_bdev = sbio->dev->bdev;
|
||||
bio->bi_iter.bi_sector = sbio->physical >> 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
sbio->err = 0;
|
||||
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
|
||||
spage->physical ||
|
||||
@@ -4436,6 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
|
||||
bio->bi_iter.bi_size = 0;
|
||||
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
|
||||
bio->bi_bdev = dev->bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
|
||||
ret = bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
if (ret != PAGE_SIZE) {
|
||||
leave_with_eio:
|
||||
@@ -4444,7 +4449,7 @@ leave_with_eio:
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
|
||||
if (btrfsic_submit_bio_wait(bio))
|
||||
goto leave_with_eio;
|
||||
|
||||
bio_put(bio);
|
||||
|
@@ -462,7 +462,7 @@ loop_lock:
|
||||
sync_pending = 0;
|
||||
}
|
||||
|
||||
btrfsic_submit_bio(cur->bi_rw, cur);
|
||||
btrfsic_submit_bio(cur);
|
||||
num_run++;
|
||||
batch_run++;
|
||||
|
||||
@@ -5260,7 +5260,7 @@ void btrfs_put_bbio(struct btrfs_bio *bbio)
|
||||
kfree(bbio);
|
||||
}
|
||||
|
||||
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
|
||||
u64 logical, u64 *length,
|
||||
struct btrfs_bio **bbio_ret,
|
||||
int mirror_num, int need_raid_map)
|
||||
@@ -5346,7 +5346,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
raid56_full_stripe_start *= full_stripe_len;
|
||||
}
|
||||
|
||||
if (rw & REQ_DISCARD) {
|
||||
if (op == REQ_OP_DISCARD) {
|
||||
/* we don't discard raid56 yet */
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
ret = -EOPNOTSUPP;
|
||||
@@ -5359,7 +5359,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
For other RAID types and for RAID[56] reads, just allow a single
|
||||
stripe (on a single disk). */
|
||||
if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
|
||||
(rw & REQ_WRITE)) {
|
||||
(op == REQ_OP_WRITE)) {
|
||||
max_len = stripe_len * nr_data_stripes(map) -
|
||||
(offset - raid56_full_stripe_start);
|
||||
} else {
|
||||
@@ -5384,8 +5384,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
btrfs_dev_replace_set_lock_blocking(dev_replace);
|
||||
|
||||
if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
|
||||
!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
|
||||
dev_replace->tgtdev != NULL) {
|
||||
op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
|
||||
op != REQ_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
|
||||
/*
|
||||
* in dev-replace case, for repair case (that's the only
|
||||
* case where the mirror is selected explicitly when
|
||||
@@ -5472,15 +5472,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
(offset + *length);
|
||||
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||
if (rw & REQ_DISCARD)
|
||||
if (op == REQ_OP_DISCARD)
|
||||
num_stripes = min_t(u64, map->num_stripes,
|
||||
stripe_nr_end - stripe_nr_orig);
|
||||
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
|
||||
&stripe_index);
|
||||
if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
|
||||
if (op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
|
||||
op != REQ_GET_READ_MIRRORS)
|
||||
mirror_num = 1;
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
|
||||
if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
|
||||
if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
|
||||
op == REQ_GET_READ_MIRRORS)
|
||||
num_stripes = map->num_stripes;
|
||||
else if (mirror_num)
|
||||
stripe_index = mirror_num - 1;
|
||||
@@ -5493,7 +5495,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
}
|
||||
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
|
||||
if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
|
||||
if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
|
||||
op == REQ_GET_READ_MIRRORS) {
|
||||
num_stripes = map->num_stripes;
|
||||
} else if (mirror_num) {
|
||||
stripe_index = mirror_num - 1;
|
||||
@@ -5507,9 +5510,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
|
||||
stripe_index *= map->sub_stripes;
|
||||
|
||||
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
|
||||
if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
|
||||
num_stripes = map->sub_stripes;
|
||||
else if (rw & REQ_DISCARD)
|
||||
else if (op == REQ_OP_DISCARD)
|
||||
num_stripes = min_t(u64, map->sub_stripes *
|
||||
(stripe_nr_end - stripe_nr_orig),
|
||||
map->num_stripes);
|
||||
@@ -5527,7 +5530,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
if (need_raid_map &&
|
||||
((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
|
||||
(op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS ||
|
||||
mirror_num > 1)) {
|
||||
/* push stripe_nr back to the start of the full stripe */
|
||||
stripe_nr = div_u64(raid56_full_stripe_start,
|
||||
@@ -5555,8 +5558,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
/* We distribute the parity blocks across stripes */
|
||||
div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
|
||||
&stripe_index);
|
||||
if (!(rw & (REQ_WRITE | REQ_DISCARD |
|
||||
REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
|
||||
if ((op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
|
||||
op != REQ_GET_READ_MIRRORS) && mirror_num <= 1)
|
||||
mirror_num = 1;
|
||||
}
|
||||
} else {
|
||||
@@ -5579,9 +5582,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
|
||||
num_alloc_stripes = num_stripes;
|
||||
if (dev_replace_is_ongoing) {
|
||||
if (rw & (REQ_WRITE | REQ_DISCARD))
|
||||
if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD)
|
||||
num_alloc_stripes <<= 1;
|
||||
if (rw & REQ_GET_READ_MIRRORS)
|
||||
if (op == REQ_GET_READ_MIRRORS)
|
||||
num_alloc_stripes++;
|
||||
tgtdev_indexes = num_stripes;
|
||||
}
|
||||
@@ -5596,7 +5599,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
|
||||
/* build raid_map */
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
|
||||
need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
|
||||
need_raid_map &&
|
||||
((op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) ||
|
||||
mirror_num > 1)) {
|
||||
u64 tmp;
|
||||
unsigned rot;
|
||||
@@ -5621,7 +5625,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
RAID6_Q_STRIPE;
|
||||
}
|
||||
|
||||
if (rw & REQ_DISCARD) {
|
||||
if (op == REQ_OP_DISCARD) {
|
||||
u32 factor = 0;
|
||||
u32 sub_stripes = 0;
|
||||
u64 stripes_per_dev = 0;
|
||||
@@ -5701,14 +5705,15 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
}
|
||||
}
|
||||
|
||||
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
|
||||
if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
|
||||
max_errors = btrfs_chunk_max_errors(map);
|
||||
|
||||
if (bbio->raid_map)
|
||||
sort_parity_stripes(bbio, num_stripes);
|
||||
|
||||
tgtdev_indexes = 0;
|
||||
if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
|
||||
if (dev_replace_is_ongoing &&
|
||||
(op == REQ_OP_WRITE || op == REQ_OP_DISCARD) &&
|
||||
dev_replace->tgtdev != NULL) {
|
||||
int index_where_to_add;
|
||||
u64 srcdev_devid = dev_replace->srcdev->devid;
|
||||
@@ -5743,7 +5748,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
}
|
||||
}
|
||||
num_stripes = index_where_to_add;
|
||||
} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
|
||||
} else if (dev_replace_is_ongoing && (op == REQ_GET_READ_MIRRORS) &&
|
||||
dev_replace->tgtdev != NULL) {
|
||||
u64 srcdev_devid = dev_replace->srcdev->devid;
|
||||
int index_srcdev = 0;
|
||||
@@ -5815,21 +5820,21 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
|
||||
u64 logical, u64 *length,
|
||||
struct btrfs_bio **bbio_ret, int mirror_num)
|
||||
{
|
||||
return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
|
||||
return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
|
||||
mirror_num, 0);
|
||||
}
|
||||
|
||||
/* For Scrub/replace */
|
||||
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
|
||||
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
|
||||
u64 logical, u64 *length,
|
||||
struct btrfs_bio **bbio_ret, int mirror_num,
|
||||
int need_raid_map)
|
||||
{
|
||||
return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
|
||||
return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
|
||||
mirror_num, need_raid_map);
|
||||
}
|
||||
|
||||
@@ -5943,7 +5948,7 @@ static void btrfs_end_bio(struct bio *bio)
|
||||
BUG_ON(stripe_index >= bbio->num_stripes);
|
||||
dev = bbio->stripes[stripe_index].dev;
|
||||
if (dev->bdev) {
|
||||
if (bio->bi_rw & WRITE)
|
||||
if (bio_op(bio) == REQ_OP_WRITE)
|
||||
btrfs_dev_stat_inc(dev,
|
||||
BTRFS_DEV_STAT_WRITE_ERRS);
|
||||
else
|
||||
@@ -5997,7 +6002,7 @@ static void btrfs_end_bio(struct bio *bio)
|
||||
*/
|
||||
static noinline void btrfs_schedule_bio(struct btrfs_root *root,
|
||||
struct btrfs_device *device,
|
||||
int rw, struct bio *bio)
|
||||
struct bio *bio)
|
||||
{
|
||||
int should_queue = 1;
|
||||
struct btrfs_pending_bios *pending_bios;
|
||||
@@ -6008,9 +6013,9 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
|
||||
}
|
||||
|
||||
/* don't bother with additional async steps for reads, right now */
|
||||
if (!(rw & REQ_WRITE)) {
|
||||
if (bio_op(bio) == REQ_OP_READ) {
|
||||
bio_get(bio);
|
||||
btrfsic_submit_bio(rw, bio);
|
||||
btrfsic_submit_bio(bio);
|
||||
bio_put(bio);
|
||||
return;
|
||||
}
|
||||
@@ -6024,7 +6029,6 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
|
||||
atomic_inc(&root->fs_info->nr_async_bios);
|
||||
WARN_ON(bio->bi_next);
|
||||
bio->bi_next = NULL;
|
||||
bio->bi_rw |= rw;
|
||||
|
||||
spin_lock(&device->io_lock);
|
||||
if (bio->bi_rw & REQ_SYNC)
|
||||
@@ -6050,7 +6054,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
|
||||
|
||||
static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
|
||||
struct bio *bio, u64 physical, int dev_nr,
|
||||
int rw, int async)
|
||||
int async)
|
||||
{
|
||||
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
|
||||
|
||||
@@ -6064,8 +6068,8 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
|
||||
|
||||
rcu_read_lock();
|
||||
name = rcu_dereference(dev->name);
|
||||
pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
|
||||
"(%s id %llu), size=%u\n", rw,
|
||||
pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
|
||||
"(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rw,
|
||||
(u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
|
||||
name->str, dev->devid, bio->bi_iter.bi_size);
|
||||
rcu_read_unlock();
|
||||
@@ -6076,9 +6080,9 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
|
||||
btrfs_bio_counter_inc_noblocked(root->fs_info);
|
||||
|
||||
if (async)
|
||||
btrfs_schedule_bio(root, dev, rw, bio);
|
||||
btrfs_schedule_bio(root, dev, bio);
|
||||
else
|
||||
btrfsic_submit_bio(rw, bio);
|
||||
btrfsic_submit_bio(bio);
|
||||
}
|
||||
|
||||
static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
|
||||
@@ -6095,7 +6099,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
|
||||
}
|
||||
}
|
||||
|
||||
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
|
||||
int mirror_num, int async_submit)
|
||||
{
|
||||
struct btrfs_device *dev;
|
||||
@@ -6112,8 +6116,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
map_length = length;
|
||||
|
||||
btrfs_bio_counter_inc_blocked(root->fs_info);
|
||||
ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
|
||||
mirror_num, 1);
|
||||
ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical,
|
||||
&map_length, &bbio, mirror_num, 1);
|
||||
if (ret) {
|
||||
btrfs_bio_counter_dec(root->fs_info);
|
||||
return ret;
|
||||
@@ -6127,10 +6131,10 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
|
||||
|
||||
if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
|
||||
((rw & WRITE) || (mirror_num > 1))) {
|
||||
((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
|
||||
/* In this case, map_length has been set to the length of
|
||||
a single stripe; not the whole write */
|
||||
if (rw & WRITE) {
|
||||
if (bio_op(bio) == REQ_OP_WRITE) {
|
||||
ret = raid56_parity_write(root, bio, bbio, map_length);
|
||||
} else {
|
||||
ret = raid56_parity_recover(root, bio, bbio, map_length,
|
||||
@@ -6149,7 +6153,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
|
||||
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
|
||||
dev = bbio->stripes[dev_nr].dev;
|
||||
if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
|
||||
if (!dev || !dev->bdev ||
|
||||
(bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
|
||||
bbio_error(bbio, first_bio, logical);
|
||||
continue;
|
||||
}
|
||||
@@ -6161,7 +6166,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
bio = first_bio;
|
||||
|
||||
submit_stripe_bio(root, bbio, bio,
|
||||
bbio->stripes[dev_nr].physical, dev_nr, rw,
|
||||
bbio->stripes[dev_nr].physical, dev_nr,
|
||||
async_submit);
|
||||
}
|
||||
btrfs_bio_counter_dec(root->fs_info);
|
||||
|
@@ -375,10 +375,10 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
|
||||
u64 end, u64 *length);
|
||||
void btrfs_get_bbio(struct btrfs_bio *bbio);
|
||||
void btrfs_put_bbio(struct btrfs_bio *bbio);
|
||||
int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
|
||||
u64 logical, u64 *length,
|
||||
struct btrfs_bio **bbio_ret, int mirror_num);
|
||||
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
|
||||
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
|
||||
u64 logical, u64 *length,
|
||||
struct btrfs_bio **bbio_ret, int mirror_num,
|
||||
int need_raid_map);
|
||||
@@ -391,7 +391,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *extent_root, u64 type);
|
||||
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
|
||||
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
|
||||
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
|
||||
int mirror_num, int async_submit);
|
||||
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
|
||||
fmode_t flags, void *holder);
|
||||
|
Reference in New Issue
Block a user