summaryrefslogtreecommitdiff
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-13 13:11:34 -0700
committerJens Axboe <axboe@kernel.dk>2012-04-20 10:06:06 +0200
commit3c96cb32d318f323c1bf972a4c66821f8499e34d (patch)
treee76d0437f487405d8a22a727b8085696b949179d /block/blk-cgroup.c
parenta2b1693bac45ea3fe3ba612fd22c45f17449f610 (diff)
downloadlwn-3c96cb32d318f323c1bf972a4c66821f8499e34d.tar.gz
lwn-3c96cb32d318f323c1bf972a4c66821f8499e34d.zip
blkcg: drop stuff unused after per-queue policy activation update
* All_q_list is unused. Drop all_q_{mutex|list}. * @for_root of blkg_lookup_create() is always %false when called from outside blk-cgroup.c proper. Factor out __blkg_lookup_create() so that it doesn't check whether @q is bypassing and use the underscored version for the @for_root callsite. * blkg_destroy_all() is used only from blkcg proper and @destroy_root is always %true. Make it static and drop @destroy_root. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c61
1 files changed, 20 insertions, 41 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d6d59ad105b4..10f0d2fc0b23 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -25,8 +25,6 @@
#define MAX_KEY_LEN 100
static DEFINE_MUTEX(blkcg_pol_mutex);
-static DEFINE_MUTEX(all_q_mutex);
-static LIST_HEAD(all_q_list);
struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkio_root_cgroup);
@@ -179,9 +177,8 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
}
EXPORT_SYMBOL_GPL(blkg_lookup);
-struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
- struct request_queue *q,
- bool for_root)
+static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg,
+ struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock)
{
struct blkio_group *blkg;
@@ -189,13 +186,6 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
- /*
- * This could be the first entry point of blkcg implementation and
- * we shouldn't allow anything to go through for a bypassing queue.
- */
- if (unlikely(blk_queue_bypass(q)) && !for_root)
- return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
-
blkg = __blkg_lookup(blkcg, q);
if (blkg)
return blkg;
@@ -223,6 +213,18 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
out:
return blkg;
}
+
+struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
+ struct request_queue *q)
+{
+ /*
+ * This could be the first entry point of blkcg implementation and
+ * we shouldn't allow anything to go through for a bypassing queue.
+ */
+ if (unlikely(blk_queue_bypass(q)))
+ return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+ return __blkg_lookup_create(blkcg, q);
+}
EXPORT_SYMBOL_GPL(blkg_lookup_create);
static void blkg_destroy(struct blkio_group *blkg)
@@ -249,12 +251,10 @@ static void blkg_destroy(struct blkio_group *blkg)
/**
* blkg_destroy_all - destroy all blkgs associated with a request_queue
* @q: request_queue of interest
- * @destroy_root: whether to destroy root blkg or not
*
- * Destroy blkgs associated with @q. If @destroy_root is %true, all are
- * destroyed; otherwise, root blkg is left alone.
+ * Destroy all blkgs associated with @q.
*/
-void blkg_destroy_all(struct request_queue *q, bool destroy_root)
+static void blkg_destroy_all(struct request_queue *q)
{
struct blkio_group *blkg, *n;
@@ -263,10 +263,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root)
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkio_cgroup *blkcg = blkg->blkcg;
- /* skip root? */
- if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
- continue;
-
spin_lock(&blkcg->lock);
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
@@ -274,7 +270,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root)
spin_unlock_irq(q->queue_lock);
}
-EXPORT_SYMBOL_GPL(blkg_destroy_all);
static void blkg_rcu_free(struct rcu_head *rcu_head)
{
@@ -492,7 +487,7 @@ int blkg_conf_prep(struct blkio_cgroup *blkcg,
spin_lock_irq(disk->queue->queue_lock);
if (blkcg_policy_enabled(disk->queue, pol))
- blkg = blkg_lookup_create(blkcg, disk->queue, false);
+ blkg = blkg_lookup_create(blkcg, disk->queue);
else
blkg = ERR_PTR(-EINVAL);
@@ -625,20 +620,9 @@ done:
*/
int blkcg_init_queue(struct request_queue *q)
{
- int ret;
-
might_sleep();
- ret = blk_throtl_init(q);
- if (ret)
- return ret;
-
- mutex_lock(&all_q_mutex);
- INIT_LIST_HEAD(&q->all_q_node);
- list_add_tail(&q->all_q_node, &all_q_list);
- mutex_unlock(&all_q_mutex);
-
- return 0;
+ return blk_throtl_init(q);
}
/**
@@ -662,12 +646,7 @@ void blkcg_drain_queue(struct request_queue *q)
*/
void blkcg_exit_queue(struct request_queue *q)
{
- mutex_lock(&all_q_mutex);
- list_del_init(&q->all_q_node);
- mutex_unlock(&all_q_mutex);
-
- blkg_destroy_all(q, true);
-
+ blkg_destroy_all(q);
blk_throtl_exit(q);
}
@@ -741,7 +720,7 @@ int blkcg_activate_policy(struct request_queue *q,
spin_lock_irq(q->queue_lock);
rcu_read_lock();
- blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
+ blkg = __blkg_lookup_create(&blkio_root_cgroup, q);
rcu_read_unlock();
if (IS_ERR(blkg)) {