summaryrefslogtreecommitdiff
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-01-17 06:03:22 -0700
committerJens Axboe <axboe@fb.com>2017-01-17 10:04:20 -0700
commitbd166ef183c263c5ced656d49ef19c7da4adc774 (patch)
tree449bbd3b4e671b370b96e3846b2281116e7089e9 /block/blk-cgroup.c
parent2af8cbe30531eca73c8f3ba277f155fc0020b01a (diff)
downloadlwn-bd166ef183c263c5ced656d49ef19c7da4adc774.tar.gz
lwn-bd166ef183c263c5ced656d49ef19c7da4adc774.zip
blk-mq-sched: add framework for MQ capable IO schedulers
This adds a set of hooks that intercepts the blk-mq path of allocating/inserting/issuing/completing requests, allowing us to develop a scheduler within that framework. We reuse the existing elevator scheduler API on the registration side, but augment that with the scheduler flagging support for the blk-mq interfce, and with a separate set of ops hooks for MQ devices. We split driver and scheduler tags, so we can run the scheduling independently of device queue depth. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 8ba0af780e88..2630f64bed19 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1223,7 +1223,11 @@ int blkcg_activate_policy(struct request_queue *q,
if (blkcg_policy_enabled(q, pol))
return 0;
- blk_queue_bypass_start(q);
+ if (q->mq_ops) {
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+ } else
+ blk_queue_bypass_start(q);
pd_prealloc:
if (!pd_prealloc) {
pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
@@ -1261,7 +1265,10 @@ pd_prealloc:
spin_unlock_irq(q->queue_lock);
out_bypass_end:
- blk_queue_bypass_end(q);
+ if (q->mq_ops)
+ blk_mq_unfreeze_queue(q);
+ else
+ blk_queue_bypass_end(q);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
return ret;
@@ -1284,7 +1291,12 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (!blkcg_policy_enabled(q, pol))
return;
- blk_queue_bypass_start(q);
+ if (q->mq_ops) {
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+ } else
+ blk_queue_bypass_start(q);
+
spin_lock_irq(q->queue_lock);
__clear_bit(pol->plid, q->blkcg_pols);
@@ -1304,7 +1316,11 @@ void blkcg_deactivate_policy(struct request_queue *q,
}
spin_unlock_irq(q->queue_lock);
- blk_queue_bypass_end(q);
+
+ if (q->mq_ops)
+ blk_mq_unfreeze_queue(q);
+ else
+ blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);