summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-09-29 09:12:38 +0200
committerJens Axboe <axboe@kernel.dk>2021-10-15 21:02:44 -0600
commit1f14a0989073459c2be6b5c7b3fb7f9f3d7fa2af (patch)
treedaf8fd67682f5150f3f3f9fe02d2ae6e1ead31cb /block/blk-core.c
parentcc9c884dd7f4f036965e23f5445f838db316eb46 (diff)
downloadlwn-1f14a0989073459c2be6b5c7b3fb7f9f3d7fa2af.tar.gz
lwn-1f14a0989073459c2be6b5c7b3fb7f9f3d7fa2af.zip
block: factor out a blk_try_enter_queue helper
Factor out the code to try to get q_usage_counter without blocking into a separate helper. Both to improve code readability and to prepare for splitting bio_queue_enter from blk_queue_enter. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Darrick J. Wong <djwong@kernel.org> Link: https://lore.kernel.org/r/20210929071241.934472-3-hch@lst.de Tested-by: Yi Zhang <yi.zhang@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c60
1 files changed, 32 insertions, 28 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c071f1a90b10..7e9eadacf2de 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -416,6 +416,30 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);
+static bool blk_try_enter_queue(struct request_queue *q, bool pm)
+{
+ rcu_read_lock();
+ if (!percpu_ref_tryget_live(&q->q_usage_counter))
+ goto fail;
+
+ /*
+ * The code that increments the pm_only counter must ensure that the
+ * counter is globally visible before the queue is unfrozen.
+ */
+ if (blk_queue_pm_only(q) &&
+ (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
+ goto fail_put;
+
+ rcu_read_unlock();
+ return true;
+
+fail_put:
+ percpu_ref_put(&q->q_usage_counter);
+fail:
+ rcu_read_unlock();
+ return false;
+}
+
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
@@ -425,40 +449,18 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
const bool pm = flags & BLK_MQ_REQ_PM;
- while (true) {
- bool success = false;
-
- rcu_read_lock();
- if (percpu_ref_tryget_live(&q->q_usage_counter)) {
- /*
- * The code that increments the pm_only counter is
- * responsible for ensuring that that counter is
- * globally visible before the queue is unfrozen.
- */
- if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
- !blk_queue_pm_only(q)) {
- success = true;
- } else {
- percpu_ref_put(&q->q_usage_counter);
- }
- }
- rcu_read_unlock();
-
- if (success)
- return 0;
-
+ while (!blk_try_enter_queue(q, pm)) {
if (flags & BLK_MQ_REQ_NOWAIT)
return -EBUSY;
/*
- * read pair of barrier in blk_freeze_queue_start(),
- * we need to order reading __PERCPU_REF_DEAD flag of
- * .q_usage_counter and reading .mq_freeze_depth or
- * queue dying flag, otherwise the following wait may
- * never return if the two reads are reordered.
+ * read pair of barrier in blk_freeze_queue_start(), we need to
+ * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
+ * reading .mq_freeze_depth or queue dying flag, otherwise the
+ * following wait may never return if the two reads are
+ * reordered.
*/
smp_rmb();
-
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
blk_pm_resume_queue(pm, q)) ||
@@ -466,6 +468,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
if (blk_queue_dying(q))
return -ENODEV;
}
+
+ return 0;
}
static inline int bio_queue_enter(struct bio *bio)