summaryrefslogtreecommitdiff
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorDamien Le Moal <damien.lemoal@wdc.com>2021-06-18 10:59:22 +0900
committerJens Axboe <axboe@kernel.dk>2021-06-18 08:51:48 -0600
commite42cfb1da0bf33c313318da201730324c423351d (patch)
tree53fc872475028ef3bce42e418c791c2ede206f4d /block/blk-mq-sched.c
parentf0c1c4d2864ed614f90d2da1bab1a1c42907b940 (diff)
downloadlwn-e42cfb1da0bf33c313318da201730324c423351d.tar.gz
lwn-e42cfb1da0bf33c313318da201730324c423351d.zip
block: Remove unnecessary elevator operation checks
The insert_requests and dispatch_request elevator operations are mandatory for the correct execution of an elevator, and all implemented elevators (bfq, kyber and mq-deadline) implement them. As a result, there is no need to check for these operations before calling them when a queue has an elevator set. This simplifies the code in __blk_mq_sched_dispatch_requests() and blk_mq_sched_insert_request(). To avoid out-of-tree elevators to crash the kernel in case of bad implementation, add a check in elv_register() to verify that these operations are implemented. A small, probably not significant, IOPS improvement of 0.1% is observed with this patch applied (4.117 MIOPS to 4.123 MIOPS, average of 20 fio runs doing 4K random direct reads with psync and 32 jobs). Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20210618015922.713999-1-damien.lemoal@wdc.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 80273245d11a..2403a5c2b053 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -294,8 +294,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
- struct elevator_queue *e = q->elevator;
- const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
+ const bool has_sched = q->elevator;
int ret = 0;
LIST_HEAD(rq_list);
@@ -326,12 +325,12 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
- if (has_sched_dispatch)
+ if (has_sched)
ret = blk_mq_do_dispatch_sched(hctx);
else
ret = blk_mq_do_dispatch_ctx(hctx);
}
- } else if (has_sched_dispatch) {
+ } else if (has_sched) {
ret = blk_mq_do_dispatch_sched(hctx);
} else if (hctx->dispatch_busy) {
/* dequeue request one by one from sw queue if queue is busy */
@@ -463,7 +462,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
goto run;
}
- if (e && e->type->ops.insert_requests) {
+ if (e) {
LIST_HEAD(list);
list_add(&rq->queuelist, &list);
@@ -494,9 +493,9 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
percpu_ref_get(&q->q_usage_counter);
e = hctx->queue->elevator;
- if (e && e->type->ops.insert_requests)
+ if (e) {
e->type->ops.insert_requests(hctx, list, false);
- else {
+ } else {
/*
* try to issue requests directly if the hw queue isn't
* busy in case of 'none' scheduler, and this way may save