summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-13 13:11:35 -0700
committerJens Axboe <axboe@kernel.dk>2012-04-20 10:06:06 +0200
commit6d18b008daf46bcd82b8ae250aae0785f9714096 (patch)
treee70e66ce74220d5a0c7382acd20efbd4835c8f77
parent3c96cb32d318f323c1bf972a4c66821f8499e34d (diff)
downloadlwn-6d18b008daf46bcd82b8ae250aae0785f9714096.tar.gz
lwn-6d18b008daf46bcd82b8ae250aae0785f9714096.zip
blkcg: shoot down blkgs if all policies are deactivated
There's no reason to keep blkgs around if no policy is activated for the queue. This patch moves queue locking out of blkg_destroy_all() and call it from blkg_deactivate_policy() on deactivation of the last policy on the queue. This change was suggested by Vivek. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-cgroup.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 10f0d2fc0b23..b1807d4ecedb 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -258,7 +258,7 @@ static void blkg_destroy_all(struct request_queue *q)
{
struct blkio_group *blkg, *n;
- spin_lock_irq(q->queue_lock);
+ lockdep_assert_held(q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkio_cgroup *blkcg = blkg->blkcg;
@@ -267,8 +267,6 @@ static void blkg_destroy_all(struct request_queue *q)
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
-
- spin_unlock_irq(q->queue_lock);
}
static void blkg_rcu_free(struct rcu_head *rcu_head)
@@ -646,7 +644,10 @@ void blkcg_drain_queue(struct request_queue *q)
*/
void blkcg_exit_queue(struct request_queue *q)
{
+ spin_lock_irq(q->queue_lock);
blkg_destroy_all(q);
+ spin_unlock_irq(q->queue_lock);
+
blk_throtl_exit(q);
}
@@ -802,6 +803,10 @@ void blkcg_deactivate_policy(struct request_queue *q,
__clear_bit(pol->plid, q->blkcg_pols);
+ /* if no policy is left, no need for blkgs - shoot them down */
+ if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
+ blkg_destroy_all(q);
+
list_for_each_entry(blkg, &q->blkg_list, q_node) {
/* grab blkcg lock too while removing @pd from @blkg */
spin_lock(&blkg->blkcg->lock);