summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-01-24 10:26:58 +0100
committerJens Axboe <axboe@kernel.dk>2024-02-05 10:06:53 -0700
commit72e84e909eb5354e1e405c968dfdc4dcc23d41cc (patch)
tree9081ada23cb175ac725495cb63b731f77e087a9c /block/blk-mq.c
parent337e89feb7c29043dacd851b6ac28542a9a8aacf (diff)
downloadlwn-72e84e909eb5354e1e405c968dfdc4dcc23d41cc.tar.gz
lwn-72e84e909eb5354e1e405c968dfdc4dcc23d41cc.zip
blk-mq: special case cached requests less
Share the main merge / split / integrity preparation code between the cached request vs newly allocated request cases, and add comments explaining the cached request handling. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Tested-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20240124092658.2258309-4-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c41
1 files changed, 22 insertions, 19 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ad57a259e975..c53a196f579e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2967,29 +2967,24 @@ void blk_mq_submit_bio(struct bio *bio)
struct blk_plug *plug = blk_mq_plug(bio);
const int is_sync = op_is_sync(bio->bi_opf);
struct blk_mq_hw_ctx *hctx;
- struct request *rq = NULL;
unsigned int nr_segs = 1;
+ struct request *rq;
blk_status_t ret;
bio = blk_queue_bounce(bio, q);
+ /*
+ * If the plug has a cached request for this queue, try use it.
+ *
+ * The cached request already holds a q_usage_counter reference and we
+ * don't have to acquire a new one if we use it.
+ */
rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
- if (rq) {
- if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
- bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
- if (!bio)
- return;
- }
- if (!bio_integrity_prep(bio))
- return;
- if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+ if (!rq) {
+ if (unlikely(bio_queue_enter(bio)))
return;
- blk_mq_use_cached_rq(rq, plug, bio);
- goto done;
}
- if (unlikely(bio_queue_enter(bio)))
- return;
if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
if (!bio)
@@ -3001,11 +2996,14 @@ void blk_mq_submit_bio(struct bio *bio)
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
goto queue_exit;
- rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
- if (unlikely(!rq))
- goto queue_exit;
+ if (!rq) {
+ rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+ if (unlikely(!rq))
+ goto queue_exit;
+ } else {
+ blk_mq_use_cached_rq(rq, plug, bio);
+ }
-done:
trace_block_getrq(bio);
rq_qos_track(q, rq, bio);
@@ -3039,7 +3037,12 @@ done:
return;
queue_exit:
- blk_queue_exit(q);
+ /*
+ * Don't drop the queue reference if we were trying to use a cached
+ * request and thus didn't acquire one.
+ */
+ if (!rq)
+ blk_queue_exit(q);
}
#ifdef CONFIG_BLK_MQ_STACKING