summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 13:14:59 -0800
committerJens Axboe <axboe@kernel.dk>2012-03-06 21:27:22 +0100
commit6ecf23afab13c39d3bb0e2d826d0984b0dd53733 (patch)
tree48436e2eb507d623ff2c2332aa34e9b7380f33e1 /block/blk-core.c
parentd732580b4eb31553c63744a47d590f770cafb8f0 (diff)
downloadlwn-6ecf23afab13c39d3bb0e2d826d0984b0dd53733.tar.gz
lwn-6ecf23afab13c39d3bb0e2d826d0984b0dd53733.zip
block: extend queue bypassing to cover blkcg policies
Extend queue bypassing such that dying queue is always bypassing and blk-throttle is drained on bypass. With blkcg policies updated to test blk_queue_bypass() instead of blk_queue_dead(), this ensures that no bio or request is held by or going through blkcg policies on a bypassing queue. This will be used to implement blkg cleanup on elevator switches and policy changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 98ddef430093..7713c73d9590 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -372,8 +372,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (q->elevator)
elv_drain_elevator(q);
- if (drain_all)
- blk_throtl_drain(q);
+ blk_throtl_drain(q);
/*
* This function might be called on a queue which failed
@@ -415,8 +414,8 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
*
* In bypass mode, only the dispatch FIFO queue of @q is used. This
* function makes @q enter bypass mode and drains all requests which were
- * issued before. On return, it's guaranteed that no request has ELVPRIV
- * set.
+ * throttled or issued before. On return, it's guaranteed that no request
+ * is being throttled or has ELVPRIV set.
*/
void blk_queue_bypass_start(struct request_queue *q)
{
@@ -461,6 +460,11 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
spin_lock_irq(lock);
+
+ /* dead queue is permanently in bypass mode till released */
+ q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);