summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-03-22 15:01:51 -0400
committerJens Axboe <axboe@fb.com>2017-03-22 20:16:59 -0600
commit5eb6126e1c5c5b69d90003444acc99743881b7b7 (patch)
tree9cab7739f84564d9bbe060e1b2334ddc8ec3a7b9 /block/blk-mq.c
parent254d259da0c3cb77f03a2adb8959c293f638a3d2 (diff)
downloadlwn-5eb6126e1c5c5b69d90003444acc99743881b7b7.tar.gz
lwn-5eb6126e1c5c5b69d90003444acc99743881b7b7.zip
blk-mq: improve blk_mq_try_issue_directly
Rename blk_mq_try_issue_directly to __blk_mq_try_issue_directly and add a new wrapper that takes care of RCU / SRCU locking to avoid having boileplate code in the caller which would get duplicated with new callers. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c32
1 files changed, 18 insertions, 14 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5a1ff4894285..e82e1a9c7d5e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1430,7 +1430,7 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
}
-static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
+static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
bool may_sleep)
{
struct request_queue *q = rq->q;
@@ -1475,13 +1475,27 @@ insert:
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
}
+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, blk_qc_t *cookie)
+{
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+ rcu_read_lock();
+ __blk_mq_try_issue_directly(rq, cookie, false);
+ rcu_read_unlock();
+ } else {
+ unsigned int srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+ __blk_mq_try_issue_directly(rq, cookie, true);
+ srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
+ }
+}
+
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { .flags = 0 };
struct request *rq;
- unsigned int request_count = 0, srcu_idx;
+ unsigned int request_count = 0;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
@@ -1579,18 +1593,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} else /* is_sync */
old_rq = rq;
blk_mq_put_ctx(data.ctx);
- if (!old_rq)
- goto done;
-
- if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
- rcu_read_lock();
- blk_mq_try_issue_directly(old_rq, &cookie, false);
- rcu_read_unlock();
- } else {
- srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
- blk_mq_try_issue_directly(old_rq, &cookie, true);
- srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
- }
+ if (old_rq)
+ blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
goto done;
}