diff options
author | Christoph Hellwig <hch@lst.de> | 2023-04-13 08:40:41 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-04-13 06:52:29 -0600 |
commit | 94aa228c2a2f6edc8e9b7c4745942ea4c5978977 (patch) | |
tree | f668b0a3abcaa3af5745ec83dbd968e5b31a51b9 /block | |
parent | 90110e04f265b95f59fbae09c228c5920b8a302f (diff) | |
download | lwn-94aa228c2a2f6edc8e9b7c4745942ea4c5978977.tar.gz lwn-94aa228c2a2f6edc8e9b7c4745942ea4c5978977.zip |
blk-mq: move more logic into blk_mq_insert_requests
Move all logic related to the direct insert (including the call to
blk_mq_run_hw_queue) into blk_mq_insert_requests to streamline the code
flow up a bit, and to allow marking blk_mq_try_issue_list_directly
static.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413064057.707578-5-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-sched.c | 17 | ||||
-rw-r--r-- | block/blk-mq.c | 20 | ||||
-rw-r--r-- | block/blk-mq.h | 4 |
3 files changed, 21 insertions, 20 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index c4b2d44b2d4e..811a9765b745 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -472,23 +472,10 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, e = hctx->queue->elevator; if (e) { e->type->ops.insert_requests(hctx, list, false); + blk_mq_run_hw_queue(hctx, run_queue_async); } else { - /* - * try to issue requests directly if the hw queue isn't - * busy in case of 'none' scheduler, and this way may save - * us one extra enqueue & dequeue to sw queue. - */ - if (!hctx->dispatch_busy && !run_queue_async) { - blk_mq_run_dispatch_ops(hctx->queue, - blk_mq_try_issue_list_directly(hctx, list)); - if (list_empty(list)) - goto out; - } - blk_mq_insert_requests(hctx, ctx, list); + blk_mq_insert_requests(hctx, ctx, list, run_queue_async); } - - blk_mq_run_hw_queue(hctx, run_queue_async); - out: percpu_ref_put(&q->q_usage_counter); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 29014a0f9f39..536f001282bb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -44,6 +44,9 @@ static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); +static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, + struct list_head *list); + static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, blk_qc_t qc) { @@ -2495,13 +2498,24 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head, } void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list) + struct list_head *list, bool run_queue_async) { struct request *rq; enum hctx_type type = hctx->type; /* + * Try to issue requests directly if the hw queue isn't busy to save an + * extra enqueue & dequeue to the sw queue. + */ + if (!hctx->dispatch_busy && !run_queue_async) { + blk_mq_run_dispatch_ops(hctx->queue, + blk_mq_try_issue_list_directly(hctx, list)); + if (list_empty(list)) + goto out; + } + + /* * preemption doesn't flush plug list, so it's possible ctx->cpu is * offline now */ @@ -2514,6 +2528,8 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, list_splice_tail_init(list, &ctx->rq_lists[type]); blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); +out: + blk_mq_run_hw_queue(hctx, run_queue_async); } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, @@ -2755,7 +2771,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) } while (!rq_list_empty(plug->mq_list)); } -void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, +static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) { int queued = 0; diff --git a/block/blk-mq.h b/block/blk-mq.h index fa13b694ff27..5d551f9ef2d6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -70,9 +70,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, void blk_mq_request_bypass_insert(struct request *rq, bool at_head, bool run_queue); void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list); -void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, - struct list_head *list); + struct list_head *list, bool run_queue_async); /* * CPU -> queue mappings |