summaryrefslogtreecommitdiff
path: root/block/mq-deadline.c
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2021-06-17 17:44:52 -0700
committerJens Axboe <axboe@kernel.dk>2021-06-21 15:03:40 -0600
commitd672d325b1492f5b0e54b7226f01e2d57b58bfb4 (patch)
treea9a5dea29e7aac94a5dfd5d3b2f3b36457c04eab /block/mq-deadline.c
parent07757588e5076748308dd95ee2e3cd0b82ebb8c4 (diff)
downloadlwn-d672d325b1492f5b0e54b7226f01e2d57b58bfb4.tar.gz
lwn-d672d325b1492f5b0e54b7226f01e2d57b58bfb4.zip
block/mq-deadline: Micro-optimize the batching algorithm
When dispatching the first request of a batch, the deadline_move_request() call clears .next_rq[] for the opposite data direction. .next_rq[] is not restored when changing data direction. Fix this by not clearing .next_rq[] and by keeping track of the data direction of a batch in a variable instead. This patch is a micro-optimization because: - The number of deadline_next_request() calls for the read direction is halved. - The number of times that deadline_next_request() returns NULL is reduced. Cc: Damien Le Moal <damien.lemoal@wdc.com> Cc: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Ming Lei <ming.lei@redhat.com> Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com> Cc: Himanshu Madhani <himanshu.madhani@oracle.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20210618004456.7280-13-bvanassche@acm.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/mq-deadline.c')
-rw-r--r--block/mq-deadline.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 44da481c3fea..b09ae1f332a2 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -53,6 +53,8 @@ struct deadline_data {
struct rb_root sort_list[DD_DIR_COUNT];
struct list_head fifo_list[DD_DIR_COUNT];
+ /* Data direction of latest dispatched request. */
+ enum dd_data_dir last_dir;
/*
* next in sort order. read, write or both are NULL
*/
@@ -179,8 +181,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
{
const enum dd_data_dir data_dir = rq_data_dir(rq);
- dd->next_rq[DD_READ] = NULL;
- dd->next_rq[DD_WRITE] = NULL;
dd->next_rq[data_dir] = deadline_latter_request(rq);
/*
@@ -292,10 +292,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd)
/*
* batches are currently reads XOR writes
*/
- rq = deadline_next_request(dd, DD_WRITE);
- if (!rq)
- rq = deadline_next_request(dd, DD_READ);
-
+ rq = deadline_next_request(dd, dd->last_dir);
if (rq && dd->batching < dd->fifo_batch)
/* we have a next request are still entitled to batch */
goto dispatch_request;
@@ -361,6 +358,7 @@ dispatch_find_request:
if (!rq)
return NULL;
+ dd->last_dir = data_dir;
dd->batching = 0;
dispatch_request:
@@ -473,6 +471,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
dd->fifo_expire[DD_WRITE] = write_expire;
dd->writes_starved = writes_starved;
dd->front_merges = 1;
+ dd->last_dir = DD_WRITE;
dd->fifo_batch = fifo_batch;
spin_lock_init(&dd->lock);
spin_lock_init(&dd->zone_lock);