FROMGIT: block/mq-deadline: Micro-optimize the batching algorithm
When dispatching the first request of a batch, the deadline_move_request() call clears .next_rq[] for the opposite data direction. .next_rq[] is not restored when changing data direction. Fix this by not clearing .next_rq[] and by keeping track of the data direction of a batch in a variable instead. This patch is a micro-optimization because: - The number of deadline_next_request() calls for the read direction is halved. - The number of times that deadline_next_request() returns NULL is reduced. Cc: Damien Le Moal <damien.lemoal@wdc.com> Cc: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Ming Lei <ming.lei@redhat.com> Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com> Cc: Himanshu Madhani <himanshu.madhani@oracle.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> BUG: 187357408 Change-Id: I582e99603a5443d75cf2b18a5daa2c93b5c66de3 (cherry picked from commit ea0fd2a525436ab5b9ada0f1953b0c0a29357311 git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche <bvanassche@google.com>
This commit is contained in:

committed by
Todd Kjos

parent
179aecb2b2
commit
63544e140b
@@ -51,6 +51,8 @@ struct deadline_data {
|
|||||||
struct rb_root sort_list[DD_DIR_COUNT];
|
struct rb_root sort_list[DD_DIR_COUNT];
|
||||||
struct list_head fifo_list[DD_DIR_COUNT];
|
struct list_head fifo_list[DD_DIR_COUNT];
|
||||||
|
|
||||||
|
/* Data direction of latest dispatched request. */
|
||||||
|
enum dd_data_dir last_dir;
|
||||||
/*
|
/*
|
||||||
* next in sort order. read, write or both are NULL
|
* next in sort order. read, write or both are NULL
|
||||||
*/
|
*/
|
||||||
@@ -177,8 +179,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
|
|||||||
{
|
{
|
||||||
const enum dd_data_dir data_dir = rq_data_dir(rq);
|
const enum dd_data_dir data_dir = rq_data_dir(rq);
|
||||||
|
|
||||||
dd->next_rq[DD_READ] = NULL;
|
|
||||||
dd->next_rq[DD_WRITE] = NULL;
|
|
||||||
dd->next_rq[data_dir] = deadline_latter_request(rq);
|
dd->next_rq[data_dir] = deadline_latter_request(rq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -290,10 +290,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd)
|
|||||||
/*
|
/*
|
||||||
* batches are currently reads XOR writes
|
* batches are currently reads XOR writes
|
||||||
*/
|
*/
|
||||||
rq = deadline_next_request(dd, DD_WRITE);
|
rq = deadline_next_request(dd, dd->last_dir);
|
||||||
if (!rq)
|
|
||||||
rq = deadline_next_request(dd, DD_READ);
|
|
||||||
|
|
||||||
if (rq && dd->batching < dd->fifo_batch)
|
if (rq && dd->batching < dd->fifo_batch)
|
||||||
/* we have a next request are still entitled to batch */
|
/* we have a next request are still entitled to batch */
|
||||||
goto dispatch_request;
|
goto dispatch_request;
|
||||||
@@ -359,6 +356,7 @@ dispatch_find_request:
|
|||||||
if (!rq)
|
if (!rq)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
dd->last_dir = data_dir;
|
||||||
dd->batching = 0;
|
dd->batching = 0;
|
||||||
|
|
||||||
dispatch_request:
|
dispatch_request:
|
||||||
@@ -471,6 +469,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
|
|||||||
dd->fifo_expire[DD_WRITE] = write_expire;
|
dd->fifo_expire[DD_WRITE] = write_expire;
|
||||||
dd->writes_starved = writes_starved;
|
dd->writes_starved = writes_starved;
|
||||||
dd->front_merges = 1;
|
dd->front_merges = 1;
|
||||||
|
dd->last_dir = DD_WRITE;
|
||||||
dd->fifo_batch = fifo_batch;
|
dd->fifo_batch = fifo_batch;
|
||||||
spin_lock_init(&dd->lock);
|
spin_lock_init(&dd->lock);
|
||||||
spin_lock_init(&dd->zone_lock);
|
spin_lock_init(&dd->zone_lock);
|
||||||
|
Reference in New Issue
Block a user