summaryrefslogtreecommitdiff
path: root/block/blk-iolatency.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2018-12-04 12:59:04 -0500
committerJens Axboe <axboe@kernel.dk>2018-12-07 22:26:38 -0700
commitd3fcdff19054575a368dfdac7407cabffea36c43 (patch)
tree0d65fe0426e7a00cc381cf5f0ed79c898347880d /block/blk-iolatency.c
parentb6c7b58f5fcc2386bddf9852011c42c1d2b83979 (diff)
downloadlwn-d3fcdff19054575a368dfdac7407cabffea36c43.tar.gz
lwn-d3fcdff19054575a368dfdac7407cabffea36c43.zip
block: convert io-latency to use rq_qos_wait
Now that we have this common helper, convert io-latency over to use it as well. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-iolatency.c')
-rw-r--r--block/blk-iolatency.c31
1 files changed, 8 insertions, 23 deletions
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 0b14c3d57769..bee092727cad 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -262,15 +262,15 @@ static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
stat->rqs.mean);
}
-static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
- wait_queue_entry_t *wait,
- bool first_block)
+static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
{
- struct rq_wait *rqw = &iolat->rq_wait;
+ atomic_dec(&rqw->inflight);
+ wake_up(&rqw->wait);
+}
- if (first_block && waitqueue_active(&rqw->wait) &&
- rqw->wait.head.next != &wait->entry)
- return false;
+static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
+{
+ struct iolatency_grp *iolat = private_data;
return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
}
@@ -281,8 +281,6 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
{
struct rq_wait *rqw = &iolat->rq_wait;
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
- DEFINE_WAIT(wait);
- bool first_block = true;
if (use_delay)
blkcg_schedule_throttle(rqos->q, use_memdelay);
@@ -299,20 +297,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
return;
}
- if (iolatency_may_queue(iolat, &wait, first_block))
- return;
-
- do {
- prepare_to_wait_exclusive(&rqw->wait, &wait,
- TASK_UNINTERRUPTIBLE);
-
- if (iolatency_may_queue(iolat, &wait, first_block))
- break;
- first_block = false;
- io_schedule();
- } while (1);
-
- finish_wait(&rqw->wait, &wait);
+ rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
}
#define SCALE_DOWN_FACTOR 2