Merge branch 'for-4.10/block' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe: "This is the main block pull request this series. Contrary to previous release, I've kept the core and driver changes in the same branch. We always ended up having dependencies between the two for obvious reasons, so makes more sense to keep them together. That said, I'll probably try and keep more topical branches going forward, especially for cycles that end up being as busy as this one. The major parts of this pull request is: - Improved support for O_DIRECT on block devices, with a small private implementation instead of using the pig that is fs/direct-io.c. From Christoph. - Request completion tracking in a scalable fashion. This is utilized by two components in this pull, the new hybrid polling and the writeback queue throttling code. - Improved support for polling with O_DIRECT, adding a hybrid mode that combines pure polling with an initial sleep. From me. - Support for automatic throttling of writeback queues on the block side. This uses feedback from the device completion latencies to scale the queue on the block side up or down. From me. - Support from SMR drives in the block layer and for SD. From Hannes and Shaun. - Multi-connection support for nbd. From Josef. - Cleanup of request and bio flags, so we have a clear split between which are bio (or rq) private, and which ones are shared. From Christoph. - A set of patches from Bart, that improve how we handle queue stopping and starting in blk-mq. - Support for WRITE_ZEROES from Chaitanya. - Lightnvm updates from Javier/Matias. - Supoort for FC for the nvme-over-fabrics code. From James Smart. - A bunch of fixes from a whole slew of people, too many to name here" * 'for-4.10/block' of git://git.kernel.dk/linux-block: (182 commits) blk-stat: fix a few cases of missing batch flushing blk-flush: run the queue when inserting blk-mq flush elevator: make the rqhash helpers exported blk-mq: abstract out blk_mq_dispatch_rq_list() helper blk-mq: add blk_mq_start_stopped_hw_queue() block: improve handling of the magic discard payload blk-wbt: don't throttle discard or write zeroes nbd: use dev_err_ratelimited in io path nbd: reset the setup task for NBD_CLEAR_SOCK nvme-fabrics: Add FC LLDD loopback driver to test FC-NVME nvme-fabrics: Add target support for FC transport nvme-fabrics: Add host support for FC transport nvme-fabrics: Add FC transport LLDD api definitions nvme-fabrics: Add FC transport FC-NVME definitions nvme-fabrics: Add FC transport error codes to nvme.h Add type 0x28 NVME type code to scsi fc headers nvme-fabrics: patch target code in prep for FC transport support nvme-fabrics: set sqe.command_id in core not transports parser: add u64 number parser nvme-rdma: align to generic ib_event logging helper ...
このコミットが含まれているのは:
@@ -297,7 +297,7 @@ static void bch_btree_node_read(struct btree *b)
|
||||
bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
|
||||
bio->bi_end_io = btree_node_read_endio;
|
||||
bio->bi_private = &cl;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
|
||||
bio->bi_opf = REQ_OP_READ | REQ_META;
|
||||
|
||||
bch_bio_map(bio, b->keys.set[0].data);
|
||||
|
||||
@@ -393,7 +393,7 @@ static void do_btree_node_write(struct btree *b)
|
||||
b->bio->bi_end_io = btree_node_write_endio;
|
||||
b->bio->bi_private = cl;
|
||||
b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
|
||||
bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
|
||||
b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
|
||||
bch_bio_map(b->bio, i);
|
||||
|
||||
/*
|
||||
|
@@ -52,7 +52,7 @@ void bch_btree_verify(struct btree *b)
|
||||
bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
|
||||
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
|
||||
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
|
||||
bio->bi_opf = REQ_OP_READ | REQ_META;
|
||||
bch_bio_map(bio, sorted);
|
||||
|
||||
submit_bio_wait(bio);
|
||||
@@ -107,22 +107,26 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
|
||||
{
|
||||
char name[BDEVNAME_SIZE];
|
||||
struct bio *check;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv, cbv;
|
||||
struct bvec_iter iter, citer = { 0 };
|
||||
|
||||
check = bio_clone(bio, GFP_NOIO);
|
||||
if (!check)
|
||||
return;
|
||||
bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC);
|
||||
check->bi_opf = REQ_OP_READ;
|
||||
|
||||
if (bio_alloc_pages(check, GFP_NOIO))
|
||||
goto out_put;
|
||||
|
||||
submit_bio_wait(check);
|
||||
|
||||
citer.bi_size = UINT_MAX;
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
void *p1 = kmap_atomic(bv.bv_page);
|
||||
void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
|
||||
void *p2;
|
||||
|
||||
cbv = bio_iter_iovec(check, citer);
|
||||
p2 = page_address(cbv.bv_page);
|
||||
|
||||
cache_set_err_on(memcmp(p1 + bv.bv_offset,
|
||||
p2 + bv.bv_offset,
|
||||
@@ -133,6 +137,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
|
||||
(uint64_t) bio->bi_iter.bi_sector);
|
||||
|
||||
kunmap_atomic(p1);
|
||||
bio_advance_iter(check, &citer, bv.bv_len);
|
||||
}
|
||||
|
||||
bio_free_pages(check);
|
||||
|
@@ -24,9 +24,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
|
||||
struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
|
||||
struct bio *bio = &b->bio;
|
||||
|
||||
bio_init(bio);
|
||||
bio->bi_max_vecs = bucket_pages(c);
|
||||
bio->bi_io_vec = bio->bi_inline_vecs;
|
||||
bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
@@ -448,13 +448,11 @@ static void do_journal_discard(struct cache *ca)
|
||||
|
||||
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
|
||||
|
||||
bio_init(bio);
|
||||
bio_init(bio, bio->bi_inline_vecs, 1);
|
||||
bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
|
||||
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
|
||||
ca->sb.d[ja->discard_idx]);
|
||||
bio->bi_bdev = ca->bdev;
|
||||
bio->bi_max_vecs = 1;
|
||||
bio->bi_io_vec = bio->bi_inline_vecs;
|
||||
bio->bi_iter.bi_size = bucket_bytes(ca);
|
||||
bio->bi_end_io = journal_discard_endio;
|
||||
|
||||
|
@@ -77,15 +77,13 @@ static void moving_init(struct moving_io *io)
|
||||
{
|
||||
struct bio *bio = &io->bio.bio;
|
||||
|
||||
bio_init(bio);
|
||||
bio_init(bio, bio->bi_inline_vecs,
|
||||
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
|
||||
bio_get(bio);
|
||||
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
|
||||
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
|
||||
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
|
||||
PAGE_SECTORS);
|
||||
bio->bi_private = &io->cl;
|
||||
bio->bi_io_vec = bio->bi_inline_vecs;
|
||||
bch_bio_map(bio, NULL);
|
||||
}
|
||||
|
||||
|
@@ -404,8 +404,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
|
||||
|
||||
if (!congested &&
|
||||
mode == CACHE_MODE_WRITEBACK &&
|
||||
op_is_write(bio_op(bio)) &&
|
||||
(bio->bi_opf & REQ_SYNC))
|
||||
op_is_write(bio->bi_opf) &&
|
||||
op_is_sync(bio->bi_opf))
|
||||
goto rescale;
|
||||
|
||||
spin_lock(&dc->io_lock);
|
||||
@@ -623,7 +623,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
|
||||
{
|
||||
struct bio *bio = &s->bio.bio;
|
||||
|
||||
bio_init(bio);
|
||||
bio_init(bio, NULL, 0);
|
||||
__bio_clone_fast(bio, orig_bio);
|
||||
bio->bi_end_io = request_endio;
|
||||
bio->bi_private = &s->cl;
|
||||
@@ -923,7 +923,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
||||
flush->bi_bdev = bio->bi_bdev;
|
||||
flush->bi_end_io = request_endio;
|
||||
flush->bi_private = cl;
|
||||
bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
|
||||
closure_bio_submit(flush, cl);
|
||||
}
|
||||
|
@@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
|
||||
return "bad uuid pointer";
|
||||
|
||||
bkey_copy(&c->uuid_bucket, k);
|
||||
uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
|
||||
uuid_io(c, REQ_OP_READ, 0, k, cl);
|
||||
|
||||
if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
|
||||
struct uuid_entry_v0 *u0 = (void *) c->uuids;
|
||||
@@ -600,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
|
||||
ca->prio_last_buckets[bucket_nr] = bucket;
|
||||
bucket_nr++;
|
||||
|
||||
prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
|
||||
prio_io(ca, bucket, REQ_OP_READ, 0);
|
||||
|
||||
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
|
||||
pr_warn("bad csum reading priorities");
|
||||
@@ -1152,9 +1152,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
||||
dc->bdev = bdev;
|
||||
dc->bdev->bd_holder = dc;
|
||||
|
||||
bio_init(&dc->sb_bio);
|
||||
dc->sb_bio.bi_max_vecs = 1;
|
||||
dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
|
||||
bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
|
||||
dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
|
||||
get_page(sb_page);
|
||||
|
||||
@@ -1814,9 +1812,7 @@ static int cache_alloc(struct cache *ca)
|
||||
__module_get(THIS_MODULE);
|
||||
kobject_init(&ca->kobj, &bch_cache_ktype);
|
||||
|
||||
bio_init(&ca->journal.bio);
|
||||
ca->journal.bio.bi_max_vecs = 8;
|
||||
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
|
||||
bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
|
||||
|
||||
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
|
||||
|
||||
@@ -1852,9 +1848,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
|
||||
ca->bdev = bdev;
|
||||
ca->bdev->bd_holder = ca;
|
||||
|
||||
bio_init(&ca->sb_bio);
|
||||
ca->sb_bio.bi_max_vecs = 1;
|
||||
ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
|
||||
bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
|
||||
ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
|
||||
get_page(sb_page);
|
||||
|
||||
|
@@ -106,14 +106,13 @@ static void dirty_init(struct keybuf_key *w)
|
||||
struct dirty_io *io = w->private;
|
||||
struct bio *bio = &io->bio;
|
||||
|
||||
bio_init(bio);
|
||||
bio_init(bio, bio->bi_inline_vecs,
|
||||
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
|
||||
if (!io->dc->writeback_percent)
|
||||
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
|
||||
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
|
||||
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
|
||||
bio->bi_private = w;
|
||||
bio->bi_io_vec = bio->bi_inline_vecs;
|
||||
bch_bio_map(bio, NULL);
|
||||
}
|
||||
|
||||
|
@@ -57,8 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
|
||||
if (would_skip)
|
||||
return false;
|
||||
|
||||
return bio->bi_opf & REQ_SYNC ||
|
||||
in_use <= CUTOFF_WRITEBACK;
|
||||
return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
|
||||
}
|
||||
|
||||
static inline void bch_writeback_queue(struct cached_dev *dc)
|
||||
|
@@ -611,9 +611,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
|
||||
char *ptr;
|
||||
int len;
|
||||
|
||||
bio_init(&b->bio);
|
||||
b->bio.bi_io_vec = b->bio_vec;
|
||||
b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
|
||||
bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
|
||||
b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
|
||||
b->bio.bi_bdev = b->c->bdev;
|
||||
b->bio.bi_end_io = inline_endio;
|
||||
@@ -1316,7 +1314,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
|
||||
{
|
||||
struct dm_io_request io_req = {
|
||||
.bi_op = REQ_OP_WRITE,
|
||||
.bi_op_flags = WRITE_FLUSH,
|
||||
.bi_op_flags = REQ_PREFLUSH,
|
||||
.mem.type = DM_IO_KMEM,
|
||||
.mem.ptr.addr = NULL,
|
||||
.client = c->dm_io,
|
||||
|
@@ -1135,7 +1135,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
|
||||
clone->bi_private = io;
|
||||
clone->bi_end_io = crypt_endio;
|
||||
clone->bi_bdev = cc->dev->bdev;
|
||||
bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
|
||||
clone->bi_opf = io->base_bio->bi_opf;
|
||||
}
|
||||
|
||||
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
||||
|
@@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc)
|
||||
};
|
||||
|
||||
lc->io_req.bi_op = REQ_OP_WRITE;
|
||||
lc->io_req.bi_op_flags = WRITE_FLUSH;
|
||||
lc->io_req.bi_op_flags = REQ_PREFLUSH;
|
||||
|
||||
return dm_io(&lc->io_req, 1, &null_location, NULL);
|
||||
}
|
||||
|
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
|
||||
struct mirror *m;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_op = REQ_OP_WRITE,
|
||||
.bi_op_flags = WRITE_FLUSH,
|
||||
.bi_op_flags = REQ_PREFLUSH,
|
||||
.mem.type = DM_IO_KMEM,
|
||||
.mem.ptr.addr = NULL,
|
||||
.client = ms->io_client,
|
||||
@@ -656,7 +656,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
|
||||
struct mirror *m;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_op = REQ_OP_WRITE,
|
||||
.bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
|
||||
.bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
|
||||
.mem.type = DM_IO_BIO,
|
||||
.mem.ptr.bio = bio,
|
||||
.notify.fn = write_callback,
|
||||
|
@@ -75,12 +75,6 @@ static void dm_old_start_queue(struct request_queue *q)
|
||||
|
||||
static void dm_mq_start_queue(struct request_queue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
blk_mq_start_stopped_hw_queues(q, true);
|
||||
blk_mq_kick_requeue_list(q);
|
||||
}
|
||||
@@ -105,20 +99,10 @@ static void dm_old_stop_queue(struct request_queue *q)
|
||||
|
||||
static void dm_mq_stop_queue(struct request_queue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (blk_queue_stopped(q)) {
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
if (blk_mq_queue_stopped(q))
|
||||
return;
|
||||
}
|
||||
|
||||
queue_flag_set(QUEUE_FLAG_STOPPED, q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
/* Avoid that requeuing could restart the queue. */
|
||||
blk_mq_cancel_requeue_work(q);
|
||||
blk_mq_stop_hw_queues(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
}
|
||||
|
||||
void dm_stop_queue(struct request_queue *q)
|
||||
@@ -313,7 +297,7 @@ static void dm_unprep_request(struct request *rq)
|
||||
|
||||
if (!rq->q->mq_ops) {
|
||||
rq->special = NULL;
|
||||
rq->cmd_flags &= ~REQ_DONTPREP;
|
||||
rq->rq_flags &= ~RQF_DONTPREP;
|
||||
}
|
||||
|
||||
if (clone)
|
||||
@@ -338,12 +322,7 @@ static void dm_old_requeue_request(struct request *rq)
|
||||
|
||||
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (!blk_queue_stopped(q))
|
||||
blk_mq_delay_kick_requeue_list(q, msecs);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
blk_mq_delay_kick_requeue_list(q, msecs);
|
||||
}
|
||||
|
||||
void dm_mq_kick_requeue_list(struct mapped_device *md)
|
||||
@@ -354,7 +333,7 @@ EXPORT_SYMBOL(dm_mq_kick_requeue_list);
|
||||
|
||||
static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
|
||||
{
|
||||
blk_mq_requeue_request(rq);
|
||||
blk_mq_requeue_request(rq, false);
|
||||
__dm_mq_kick_requeue_list(rq->q, msecs);
|
||||
}
|
||||
|
||||
@@ -431,7 +410,7 @@ static void dm_softirq_done(struct request *rq)
|
||||
return;
|
||||
}
|
||||
|
||||
if (rq->cmd_flags & REQ_FAILED)
|
||||
if (rq->rq_flags & RQF_FAILED)
|
||||
mapped = false;
|
||||
|
||||
dm_done(clone, tio->error, mapped);
|
||||
@@ -460,7 +439,7 @@ static void dm_complete_request(struct request *rq, int error)
|
||||
*/
|
||||
static void dm_kill_unmapped_request(struct request *rq, int error)
|
||||
{
|
||||
rq->cmd_flags |= REQ_FAILED;
|
||||
rq->rq_flags |= RQF_FAILED;
|
||||
dm_complete_request(rq, error);
|
||||
}
|
||||
|
||||
@@ -476,7 +455,7 @@ static void end_clone_request(struct request *clone, int error)
|
||||
* For just cleaning up the information of the queue in which
|
||||
* the clone was dispatched.
|
||||
* The clone is *NOT* freed actually here because it is alloced
|
||||
* from dm own mempool (REQ_ALLOCED isn't set).
|
||||
* from dm own mempool (RQF_ALLOCED isn't set).
|
||||
*/
|
||||
__blk_put_request(clone->q, clone);
|
||||
}
|
||||
@@ -497,7 +476,7 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
|
||||
int r;
|
||||
|
||||
if (blk_queue_io_stat(clone->q))
|
||||
clone->cmd_flags |= REQ_IO_STAT;
|
||||
clone->rq_flags |= RQF_IO_STAT;
|
||||
|
||||
clone->start_time = jiffies;
|
||||
r = blk_insert_cloned_request(clone->q, clone);
|
||||
@@ -633,7 +612,7 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
|
||||
return BLKPREP_DEFER;
|
||||
|
||||
rq->special = tio;
|
||||
rq->cmd_flags |= REQ_DONTPREP;
|
||||
rq->rq_flags |= RQF_DONTPREP;
|
||||
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
@@ -904,17 +883,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
}
|
||||
|
||||
/*
|
||||
* On suspend dm_stop_queue() handles stopping the blk-mq
|
||||
* request_queue BUT: even though the hw_queues are marked
|
||||
* BLK_MQ_S_STOPPED at that point there is still a race that
|
||||
* is allowing block/blk-mq.c to call ->queue_rq against a
|
||||
* hctx that it really shouldn't. The following check guards
|
||||
* against this rarity (albeit _not_ race-free).
|
||||
*/
|
||||
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
|
||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||
|
||||
if (ti->type->busy && ti->type->busy(ti))
|
||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||
|
||||
|
@@ -741,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
|
||||
/*
|
||||
* Commit exceptions to disk.
|
||||
*/
|
||||
if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA))
|
||||
if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
|
||||
ps->valid = 0;
|
||||
|
||||
/*
|
||||
@@ -818,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
|
||||
for (i = 0; i < nr_merged; i++)
|
||||
clear_exception(ps, ps->current_committed - 1 - i);
|
||||
|
||||
r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA);
|
||||
r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
|
@@ -1525,9 +1525,9 @@ static struct mapped_device *alloc_dev(int minor)
|
||||
if (!md->bdev)
|
||||
goto bad;
|
||||
|
||||
bio_init(&md->flush_bio);
|
||||
bio_init(&md->flush_bio, NULL, 0);
|
||||
md->flush_bio.bi_bdev = md->bdev;
|
||||
bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
|
||||
dm_stats_init(&md->stats);
|
||||
|
||||
|
@@ -394,7 +394,7 @@ static void submit_flushes(struct work_struct *ws)
|
||||
bi->bi_end_io = md_end_flush;
|
||||
bi->bi_private = rdev;
|
||||
bi->bi_bdev = rdev->bdev;
|
||||
bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
atomic_inc(&mddev->flush_pending);
|
||||
submit_bio(bi);
|
||||
rcu_read_lock();
|
||||
@@ -743,7 +743,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
|
||||
bio_add_page(bio, page, size, 0);
|
||||
bio->bi_private = rdev;
|
||||
bio->bi_end_io = super_written;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA;
|
||||
|
||||
atomic_inc(&mddev->pending_writes);
|
||||
submit_bio(bio);
|
||||
|
@@ -130,7 +130,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
|
||||
}
|
||||
multipath = conf->multipaths + mp_bh->path;
|
||||
|
||||
bio_init(&mp_bh->bio);
|
||||
bio_init(&mp_bh->bio, NULL, 0);
|
||||
__bio_clone_fast(&mp_bh->bio, bio);
|
||||
|
||||
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
|
||||
|
@@ -685,7 +685,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
|
||||
bio_reset(&log->flush_bio);
|
||||
log->flush_bio.bi_bdev = log->rdev->bdev;
|
||||
log->flush_bio.bi_end_io = r5l_log_flush_endio;
|
||||
bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
submit_bio(&log->flush_bio);
|
||||
}
|
||||
|
||||
@@ -1053,7 +1053,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
|
||||
mb->checksum = cpu_to_le32(crc);
|
||||
|
||||
if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
|
||||
WRITE_FUA, false)) {
|
||||
REQ_FUA, false)) {
|
||||
__free_page(page);
|
||||
return -EIO;
|
||||
}
|
||||
@@ -1205,7 +1205,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
|
||||
INIT_LIST_HEAD(&log->io_end_ios);
|
||||
INIT_LIST_HEAD(&log->flushing_ios);
|
||||
INIT_LIST_HEAD(&log->finished_ios);
|
||||
bio_init(&log->flush_bio);
|
||||
bio_init(&log->flush_bio, NULL, 0);
|
||||
|
||||
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
|
||||
if (!log->io_kc)
|
||||
|
@@ -913,7 +913,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
|
||||
op = REQ_OP_WRITE;
|
||||
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
|
||||
op_flags = WRITE_FUA;
|
||||
op_flags = REQ_FUA;
|
||||
if (test_bit(R5_Discard, &sh->dev[i].flags))
|
||||
op = REQ_OP_DISCARD;
|
||||
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
|
||||
@@ -2004,13 +2004,8 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
|
||||
for (i = 0; i < disks; i++) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
|
||||
bio_init(&dev->req);
|
||||
dev->req.bi_io_vec = &dev->vec;
|
||||
dev->req.bi_max_vecs = 1;
|
||||
|
||||
bio_init(&dev->rreq);
|
||||
dev->rreq.bi_io_vec = &dev->rvec;
|
||||
dev->rreq.bi_max_vecs = 1;
|
||||
bio_init(&dev->req, &dev->vec, 1);
|
||||
bio_init(&dev->rreq, &dev->rvec, 1);
|
||||
}
|
||||
}
|
||||
return sh;
|
||||
|
新しいイシューから参照
ユーザーをブロックする