Revert "block: remove the request_queue to argument request based tracepoints"

This reverts commit 1cb3032406 which is
commit a54895fa057c67700270777f7661d8d3c7fda88a upstream.

It breaks the Android GKI kernel abi, and is not needed for Android
devices, so revert it for now.  If it is needed for this branch, it can
come back later in an ABI-stable way.

Bug: 161946584
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I4148feec0c39bb0a5644203da6f1374bf1769dbd
This commit is contained in:
Greg Kroah-Hartman
2022-08-30 12:44:38 +02:00
parent 33d6fea819
commit 1e247e4040
8 changed files with 57 additions and 39 deletions

View File

@@ -808,7 +808,7 @@ static struct request *attempt_merge(struct request_queue *q,
*/
blk_account_io_merge_request(next);
trace_block_rq_merge(next);
trace_block_rq_merge(q, next);
/*
* ownership of bio passed from next to req, return 'next' for

View File

@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
void blk_mq_sched_request_inserted(struct request *rq)
{
trace_block_rq_insert(rq);
trace_block_rq_insert(rq->q, rq);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);

View File

@@ -737,7 +737,7 @@ void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
trace_block_rq_issue(rq);
trace_block_rq_issue(q, rq);
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
rq->io_start_time_ns = ktime_get_ns();
@@ -764,7 +764,7 @@ static void __blk_mq_requeue_request(struct request *rq)
blk_mq_put_driver_tag(rq);
trace_block_rq_requeue(rq);
trace_block_rq_requeue(q, rq);
rq_qos_requeue(q, rq);
if (blk_mq_request_started(rq)) {
@@ -1864,7 +1864,7 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
lockdep_assert_held(&ctx->lock);
trace_block_rq_insert(rq);
trace_block_rq_insert(hctx->queue, rq);
if (at_head)
list_add(&rq->queuelist, &ctx->rq_lists[type]);
@@ -1921,7 +1921,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
*/
list_for_each_entry(rq, list, queuelist) {
BUG_ON(rq->mq_ctx != ctx);
trace_block_rq_insert(rq);
trace_block_rq_insert(hctx->queue, rq);
}
spin_lock(&ctx->lock);

View File

@@ -397,7 +397,7 @@ static int map_request(struct dm_rq_target_io *tio)
}
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
ret = dm_dispatch_clone_request(clone, rq);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {

View File

@@ -2359,7 +2359,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
}
}
blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc));
blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
sizeof(blktrc));
}
/**

View File

@@ -75,7 +75,8 @@ static inline bool blk_trace_note_message_enabled(struct request_queue *q)
return ret;
}
extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct block_device *bdev,
char __user *arg);
@@ -89,7 +90,7 @@ extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
# define blk_trace_shutdown(q) do { } while (0)
# define blk_add_driver_data(rq, data, len) do {} while (0)
# define blk_add_driver_data(q, rq, data, len) do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
# define blk_trace_startstop(q, start) (-ENOTTY)
# define blk_trace_remove(q) (-ENOTTY)

View File

@@ -64,6 +64,7 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
/**
* block_rq_requeue - place block IO request back on a queue
* @q: queue holding operation
* @rq: block IO operation request
*
* The block operation request @rq is being placed back into queue
@@ -72,9 +73,9 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
*/
TRACE_EVENT(block_rq_requeue,
TP_PROTO(struct request *rq),
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(rq),
TP_ARGS(q, rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -146,9 +147,9 @@ TRACE_EVENT(block_rq_complete,
DECLARE_EVENT_CLASS(block_rq,
TP_PROTO(struct request *rq),
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(rq),
TP_ARGS(q, rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -180,6 +181,7 @@ DECLARE_EVENT_CLASS(block_rq,
/**
* block_rq_insert - insert block operation request into queue
* @q: target queue
* @rq: block IO operation request
*
* Called immediately before block operation request @rq is inserted
@@ -189,13 +191,14 @@ DECLARE_EVENT_CLASS(block_rq,
*/
DEFINE_EVENT(block_rq, block_rq_insert,
TP_PROTO(struct request *rq),
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(rq)
TP_ARGS(q, rq)
);
/**
* block_rq_issue - issue pending block IO request operation to device driver
* @q: queue holding operation
* @rq: block IO operation operation request
*
* Called when block operation request @rq from queue @q is sent to a
@@ -203,13 +206,14 @@ DEFINE_EVENT(block_rq, block_rq_insert,
*/
DEFINE_EVENT(block_rq, block_rq_issue,
TP_PROTO(struct request *rq),
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(rq)
TP_ARGS(q, rq)
);
/**
* block_rq_merge - merge request with another one in the elevator
* @q: queue holding operation
* @rq: block IO operation operation request
*
* Called when block operation request @rq from queue @q is merged to another
@@ -217,9 +221,9 @@ DEFINE_EVENT(block_rq, block_rq_issue,
*/
DEFINE_EVENT(block_rq, block_rq_merge,
TP_PROTO(struct request *rq),
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(rq)
TP_ARGS(q, rq)
);
/**
@@ -601,6 +605,7 @@ TRACE_EVENT(block_bio_remap,
/**
* block_rq_remap - map request for a block operation request
* @q: queue holding the operation
* @rq: block IO operation request
* @dev: device for the operation
* @from: original sector for the operation
@@ -611,9 +616,10 @@ TRACE_EVENT(block_bio_remap,
*/
TRACE_EVENT(block_rq_remap,
TP_PROTO(struct request *rq, dev_t dev, sector_t from),
TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
sector_t from),
TP_ARGS(rq, dev, from),
TP_ARGS(q, rq, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )

View File

@@ -800,12 +800,12 @@ static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
#endif
static u64
blk_trace_request_get_cgid(struct request *rq)
blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
{
if (!rq->bio)
return 0;
/* Use the first bio */
return blk_trace_bio_get_cgid(rq->q, rq->bio);
return blk_trace_bio_get_cgid(q, rq->bio);
}
/*
@@ -846,35 +846,40 @@ static void blk_add_trace_rq(struct request *rq, int error,
rcu_read_unlock();
}
static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
static void blk_add_trace_rq_insert(void *ignore,
struct request_queue *q, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
blk_trace_request_get_cgid(rq));
blk_trace_request_get_cgid(q, rq));
}
static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
static void blk_add_trace_rq_issue(void *ignore,
struct request_queue *q, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
blk_trace_request_get_cgid(rq));
blk_trace_request_get_cgid(q, rq));
}
static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
static void blk_add_trace_rq_merge(void *ignore,
struct request_queue *q, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
blk_trace_request_get_cgid(rq));
blk_trace_request_get_cgid(q, rq));
}
static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
static void blk_add_trace_rq_requeue(void *ignore,
struct request_queue *q,
struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
blk_trace_request_get_cgid(rq));
blk_trace_request_get_cgid(q, rq));
}
static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
int error, unsigned int nr_bytes)
{
blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
blk_trace_request_get_cgid(rq));
blk_trace_request_get_cgid(rq->q, rq));
}
/**
@@ -1082,14 +1087,16 @@ static void blk_add_trace_bio_remap(void *ignore,
* Add a trace for that action.
*
**/
static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
static void blk_add_trace_rq_remap(void *ignore,
struct request_queue *q,
struct request *rq, dev_t dev,
sector_t from)
{
struct blk_trace *bt;
struct blk_io_trace_remap r;
rcu_read_lock();
bt = rcu_dereference(rq->q->blk_trace);
bt = rcu_dereference(q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return;
@@ -1101,12 +1108,13 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
sizeof(r), &r, blk_trace_request_get_cgid(rq));
sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
rcu_read_unlock();
}
/**
* blk_add_driver_data - Add binary message with driver-specific data
* @q: queue the io is for
* @rq: io request
* @data: driver-specific data
* @len: length of driver-specific data
@@ -1115,12 +1123,14 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
* Some drivers might want to write driver-specific data per request.
*
**/
void blk_add_driver_data(struct request *rq, void *data, size_t len)
void blk_add_driver_data(struct request_queue *q,
struct request *rq,
void *data, size_t len)
{
struct blk_trace *bt;
rcu_read_lock();
bt = rcu_dereference(rq->q->blk_trace);
bt = rcu_dereference(q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return;
@@ -1128,7 +1138,7 @@ void blk_add_driver_data(struct request *rq, void *data, size_t len)
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
BLK_TA_DRV_DATA, 0, len, data,
blk_trace_request_get_cgid(rq));
blk_trace_request_get_cgid(q, rq));
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);