blk-mq: ensure that we set REQ_IO_STAT so diskstats work
If disk stats are enabled on the queue, a request needs to be marked with REQ_IO_STAT for accounting to be active on that request. This fixes an issue with virtio-blk not showing up in /proc/diskstats after the conversion to blk-mq. Add QUEUE_FLAG_MQ_DEFAULT, setting stats and same cpu-group completion on by default. Reported-by: Dave Chinner <david@fromorbit.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_can_queue);
|
EXPORT_SYMBOL(blk_mq_can_queue);
|
||||||
|
|
||||||
static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq,
|
static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
|
||||||
unsigned int rw_flags)
|
struct request *rq, unsigned int rw_flags)
|
||||||
{
|
{
|
||||||
|
if (blk_queue_io_stat(q))
|
||||||
|
rw_flags |= REQ_IO_STAT;
|
||||||
|
|
||||||
rq->mq_ctx = ctx;
|
rq->mq_ctx = ctx;
|
||||||
rq->cmd_flags = rw_flags;
|
rq->cmd_flags = rw_flags;
|
||||||
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
|
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
|
||||||
@@ -197,7 +200,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
|
|||||||
|
|
||||||
rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
|
rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
|
||||||
if (rq) {
|
if (rq) {
|
||||||
blk_mq_rq_ctx_init(ctx, rq, rw);
|
blk_mq_rq_ctx_init(q, ctx, rq, rw);
|
||||||
break;
|
break;
|
||||||
} else if (!(gfp & __GFP_WAIT))
|
} else if (!(gfp & __GFP_WAIT))
|
||||||
break;
|
break;
|
||||||
@@ -921,7 +924,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
trace_block_getrq(q, bio, rw);
|
trace_block_getrq(q, bio, rw);
|
||||||
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
|
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
|
||||||
if (likely(rq))
|
if (likely(rq))
|
||||||
blk_mq_rq_ctx_init(ctx, rq, rw);
|
blk_mq_rq_ctx_init(q, ctx, rq, rw);
|
||||||
else {
|
else {
|
||||||
blk_mq_put_ctx(ctx);
|
blk_mq_put_ctx(ctx);
|
||||||
trace_block_sleeprq(q, bio, rw);
|
trace_block_sleeprq(q, bio, rw);
|
||||||
@@ -1377,6 +1380,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
|
|||||||
q->queue_hw_ctx = hctxs;
|
q->queue_hw_ctx = hctxs;
|
||||||
|
|
||||||
q->mq_ops = reg->ops;
|
q->mq_ops = reg->ops;
|
||||||
|
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
|
||||||
|
|
||||||
blk_queue_make_request(q, blk_mq_make_request);
|
blk_queue_make_request(q, blk_mq_make_request);
|
||||||
blk_queue_rq_timed_out(q, reg->ops->timeout);
|
blk_queue_rq_timed_out(q, reg->ops->timeout);
|
||||||
|
@@ -505,6 +505,9 @@ struct request_queue {
|
|||||||
(1 << QUEUE_FLAG_SAME_COMP) | \
|
(1 << QUEUE_FLAG_SAME_COMP) | \
|
||||||
(1 << QUEUE_FLAG_ADD_RANDOM))
|
(1 << QUEUE_FLAG_ADD_RANDOM))
|
||||||
|
|
||||||
|
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||||
|
(1 << QUEUE_FLAG_SAME_COMP))
|
||||||
|
|
||||||
static inline void queue_lockdep_assert_held(struct request_queue *q)
|
static inline void queue_lockdep_assert_held(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (q->queue_lock)
|
if (q->queue_lock)
|
||||||
|
Reference in New Issue
Block a user