summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2021-12-03 21:15:34 +0800
committerJens Axboe <axboe@kernel.dk>2021-12-03 14:51:29 -0700
commit4cafe86c9267f9dd5819df946ba8c038ba958370 (patch)
treed5c2871f88972a6f2a893a4add72818d9ea9110c /block/blk-mq.c
parentbcc330f42f442a98d61f153d16c0b6487461ee81 (diff)
downloadlwn-4cafe86c9267f9dd5819df946ba8c038ba958370.tar.gz
lwn-4cafe86c9267f9dd5819df946ba8c038ba958370.zip
blk-mq: run dispatch lock once in case of issuing from list
It isn't necessary to call blk_mq_run_dispatch_ops() once for issuing single request directly, and enough to do it one time when issuing from whole list. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20211203131534.3668411-5-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 24c65bb8719b..22ec21aa0c22 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2464,12 +2464,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{
- blk_status_t ret;
- struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
-
- blk_mq_run_dispatch_ops(rq->q,
- ret = __blk_mq_try_issue_directly(hctx, rq, true, last));
- return ret;
+ return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
}
static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
@@ -2526,7 +2521,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
plug->rq_count = 0;
if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
- blk_mq_plug_issue_direct(plug, false);
+ blk_mq_run_dispatch_ops(plug->mq_list->q,
+ blk_mq_plug_issue_direct(plug, false));
if (rq_list_empty(plug->mq_list))
return;
}
@@ -2867,7 +2863,9 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for
* insert.
*/
- return blk_mq_request_issue_directly(rq, true);
+ blk_mq_run_dispatch_ops(rq->q,
+ ret = blk_mq_request_issue_directly(rq, true));
+ return ret;
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);