block: add a op_is_flush helper
This centralizes the checks for bios that needs to be go into the flush state machine. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:

committed by
Jens Axboe

parent
c13660a08c
commit
f73f44eb00
@@ -1035,7 +1035,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
|
|||||||
* Flush requests do not use the elevator so skip initialization.
|
* Flush requests do not use the elevator so skip initialization.
|
||||||
* This allows a request to share the flush and elevator data.
|
* This allows a request to share the flush and elevator data.
|
||||||
*/
|
*/
|
||||||
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
|
if (op_is_flush(bio->bi_opf))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@@ -1641,7 +1641,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
|
|||||||
return BLK_QC_T_NONE;
|
return BLK_QC_T_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
|
if (op_is_flush(bio->bi_opf)) {
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
where = ELEVATOR_INSERT_FLUSH;
|
where = ELEVATOR_INSERT_FLUSH;
|
||||||
goto get_rq;
|
goto get_rq;
|
||||||
@@ -2145,7 +2145,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
|||||||
*/
|
*/
|
||||||
BUG_ON(blk_queued_rq(rq));
|
BUG_ON(blk_queued_rq(rq));
|
||||||
|
|
||||||
if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
|
if (op_is_flush(rq->cmd_flags))
|
||||||
where = ELEVATOR_INSERT_FLUSH;
|
where = ELEVATOR_INSERT_FLUSH;
|
||||||
|
|
||||||
add_acct_request(q, rq, where);
|
add_acct_request(q, rq, where);
|
||||||
@@ -3256,7 +3256,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|||||||
/*
|
/*
|
||||||
* rq is already accounted, so use raw insert
|
* rq is already accounted, so use raw insert
|
||||||
*/
|
*/
|
||||||
if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
|
if (op_is_flush(rq->cmd_flags))
|
||||||
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
|
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
|
||||||
else
|
else
|
||||||
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
|
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
|
||||||
|
@@ -111,7 +111,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
|
|||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
struct blk_mq_ctx *ctx;
|
struct blk_mq_ctx *ctx;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
const bool is_flush = op & (REQ_PREFLUSH | REQ_FUA);
|
|
||||||
|
|
||||||
blk_queue_enter_live(q);
|
blk_queue_enter_live(q);
|
||||||
ctx = blk_mq_get_ctx(q);
|
ctx = blk_mq_get_ctx(q);
|
||||||
@@ -126,7 +125,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
|
|||||||
* Flush requests are special and go directly to the
|
* Flush requests are special and go directly to the
|
||||||
* dispatch list.
|
* dispatch list.
|
||||||
*/
|
*/
|
||||||
if (!is_flush && e->type->ops.mq.get_request) {
|
if (!op_is_flush(op) && e->type->ops.mq.get_request) {
|
||||||
rq = e->type->ops.mq.get_request(q, op, data);
|
rq = e->type->ops.mq.get_request(q, op, data);
|
||||||
if (rq)
|
if (rq)
|
||||||
rq->rq_flags |= RQF_QUEUED;
|
rq->rq_flags |= RQF_QUEUED;
|
||||||
@@ -139,7 +138,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (rq) {
|
if (rq) {
|
||||||
if (!is_flush) {
|
if (!op_is_flush(op)) {
|
||||||
rq->elv.icq = NULL;
|
rq->elv.icq = NULL;
|
||||||
if (e && e->type->icq_cache)
|
if (e && e->type->icq_cache)
|
||||||
blk_mq_sched_assign_ioc(q, rq, bio);
|
blk_mq_sched_assign_ioc(q, rq, bio);
|
||||||
|
@@ -1406,7 +1406,7 @@ insert:
|
|||||||
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
const int is_sync = op_is_sync(bio->bi_opf);
|
const int is_sync = op_is_sync(bio->bi_opf);
|
||||||
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
|
const int is_flush_fua = op_is_flush(bio->bi_opf);
|
||||||
struct blk_mq_alloc_data data = { .flags = 0 };
|
struct blk_mq_alloc_data data = { .flags = 0 };
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned int request_count = 0, srcu_idx;
|
unsigned int request_count = 0, srcu_idx;
|
||||||
@@ -1527,7 +1527,7 @@ done:
|
|||||||
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
const int is_sync = op_is_sync(bio->bi_opf);
|
const int is_sync = op_is_sync(bio->bi_opf);
|
||||||
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
|
const int is_flush_fua = op_is_flush(bio->bi_opf);
|
||||||
struct blk_plug *plug;
|
struct blk_plug *plug;
|
||||||
unsigned int request_count = 0;
|
unsigned int request_count = 0;
|
||||||
struct blk_mq_alloc_data data = { .flags = 0 };
|
struct blk_mq_alloc_data data = { .flags = 0 };
|
||||||
|
@@ -666,7 +666,7 @@ static inline struct search *search_alloc(struct bio *bio,
|
|||||||
s->iop.write_prio = 0;
|
s->iop.write_prio = 0;
|
||||||
s->iop.error = 0;
|
s->iop.error = 0;
|
||||||
s->iop.flags = 0;
|
s->iop.flags = 0;
|
||||||
s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
|
s->iop.flush_journal = op_is_flush(bio->bi_opf);
|
||||||
s->iop.wq = bcache_wq;
|
s->iop.wq = bcache_wq;
|
||||||
|
|
||||||
return s;
|
return s;
|
||||||
|
@@ -787,8 +787,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
|
|||||||
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
|
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
|
||||||
|
|
||||||
spin_lock_irqsave(&cache->lock, flags);
|
spin_lock_irqsave(&cache->lock, flags);
|
||||||
if (cache->need_tick_bio &&
|
if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
|
||||||
!(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
|
|
||||||
bio_op(bio) != REQ_OP_DISCARD) {
|
bio_op(bio) != REQ_OP_DISCARD) {
|
||||||
pb->tick = true;
|
pb->tick = true;
|
||||||
cache->need_tick_bio = false;
|
cache->need_tick_bio = false;
|
||||||
@@ -828,11 +827,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
|
|||||||
return to_oblock(block_nr);
|
return to_oblock(block_nr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bio_triggers_commit(struct cache *cache, struct bio *bio)
|
|
||||||
{
|
|
||||||
return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* You must increment the deferred set whilst the prison cell is held. To
|
* You must increment the deferred set whilst the prison cell is held. To
|
||||||
* encourage this, we ask for 'cell' to be passed in.
|
* encourage this, we ask for 'cell' to be passed in.
|
||||||
@@ -884,7 +878,7 @@ static void issue(struct cache *cache, struct bio *bio)
|
|||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (!bio_triggers_commit(cache, bio)) {
|
if (!op_is_flush(bio->bi_opf)) {
|
||||||
accounted_request(cache, bio);
|
accounted_request(cache, bio);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -1069,8 +1063,7 @@ static void dec_io_migrations(struct cache *cache)
|
|||||||
|
|
||||||
static bool discard_or_flush(struct bio *bio)
|
static bool discard_or_flush(struct bio *bio)
|
||||||
{
|
{
|
||||||
return bio_op(bio) == REQ_OP_DISCARD ||
|
return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
|
||||||
bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
|
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
|
||||||
|
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
|
|||||||
|
|
||||||
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
|
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
|
||||||
{
|
{
|
||||||
return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
|
return op_is_flush(bio->bi_opf) &&
|
||||||
dm_thin_changed_this_transaction(tc->td);
|
dm_thin_changed_this_transaction(tc->td);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -870,8 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
|
|||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
while ((bio = bio_list_pop(&cell->bios))) {
|
while ((bio = bio_list_pop(&cell->bios))) {
|
||||||
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
|
if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
|
||||||
bio_op(bio) == REQ_OP_DISCARD)
|
|
||||||
bio_list_add(&info->defer_bios, bio);
|
bio_list_add(&info->defer_bios, bio);
|
||||||
else {
|
else {
|
||||||
inc_all_io_entry(info->tc->pool, bio);
|
inc_all_io_entry(info->tc->pool, bio);
|
||||||
@@ -1716,9 +1715,8 @@ static void __remap_and_issue_shared_cell(void *context,
|
|||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
while ((bio = bio_list_pop(&cell->bios))) {
|
while ((bio = bio_list_pop(&cell->bios))) {
|
||||||
if ((bio_data_dir(bio) == WRITE) ||
|
if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
|
||||||
(bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
|
bio_op(bio) == REQ_OP_DISCARD)
|
||||||
bio_op(bio) == REQ_OP_DISCARD))
|
|
||||||
bio_list_add(&info->defer_bios, bio);
|
bio_list_add(&info->defer_bios, bio);
|
||||||
else {
|
else {
|
||||||
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
|
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
|
||||||
@@ -2635,8 +2633,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|||||||
return DM_MAPIO_SUBMITTED;
|
return DM_MAPIO_SUBMITTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
|
if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
|
||||||
bio_op(bio) == REQ_OP_DISCARD) {
|
|
||||||
thin_defer_bio_with_throttle(tc, bio);
|
thin_defer_bio_with_throttle(tc, bio);
|
||||||
return DM_MAPIO_SUBMITTED;
|
return DM_MAPIO_SUBMITTED;
|
||||||
}
|
}
|
||||||
|
@@ -220,6 +220,15 @@ static inline bool op_is_write(unsigned int op)
|
|||||||
return (op & 1);
|
return (op & 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if the bio or request is one that needs special treatment in the
|
||||||
|
* flush state machine.
|
||||||
|
*/
|
||||||
|
static inline bool op_is_flush(unsigned int op)
|
||||||
|
{
|
||||||
|
return op & (REQ_FUA | REQ_PREFLUSH);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reads are always treated as synchronous, as are requests with the FUA or
|
* Reads are always treated as synchronous, as are requests with the FUA or
|
||||||
* PREFLUSH flag. Other operations may be marked as synchronous using the
|
* PREFLUSH flag. Other operations may be marked as synchronous using the
|
||||||
|
Reference in New Issue
Block a user