diff options
author | Christoph Hellwig <hch@lst.de> | 2023-02-03 16:03:47 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-02-03 08:20:05 -0700 |
commit | 84d7d462b16dd5f0bf7c7ca9254bf81db2c952a2 (patch) | |
tree | 338edb3db9c6078f07403110e27b65c471818750 /block/blk-cgroup.c | |
parent | 180b04d450a7137270c12dbb6bebf1d5e6c0a6f2 (diff) | |
download | lwn-84d7d462b16dd5f0bf7c7ca9254bf81db2c952a2.tar.gz lwn-84d7d462b16dd5f0bf7c7ca9254bf81db2c952a2.zip |
blk-cgroup: pin the gendisk in struct blkcg_gq
Currently each blkcg_gq holds a request_queue reference, which is what
is used in the policies. But a lot of these interfaces will move over to
use a gendisk, so store a disk in struct blkcg_gq and hold a reference to
it.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20230203150400.3199230-7-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 35 |
1 files changed, 17 insertions, 18 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 0b3226cbf3f2..0e368387497d 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -116,7 +116,6 @@ static bool blkcg_policy_enabled(struct request_queue *q, static void blkg_free(struct blkcg_gq *blkg) { - struct request_queue *q = blkg->q; int i; /* @@ -126,16 +125,16 @@ static void blkg_free(struct blkcg_gq *blkg) * blkcg_mutex is used to synchronize blkg_free_workfn() and * blkcg_deactivate_policy(). */ - mutex_lock(&q->blkcg_mutex); + mutex_lock(&blkg->disk->queue->blkcg_mutex); for (i = 0; i < BLKCG_MAX_POLS; i++) if (blkg->pd[i]) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); if (blkg->parent) blkg_put(blkg->parent); list_del_init(&blkg->q_node); - mutex_unlock(&q->blkcg_mutex); + mutex_unlock(&blkg->disk->queue->blkcg_mutex); - blk_put_queue(q); + put_disk(blkg->disk); free_percpu(blkg->iostat_cpu); percpu_ref_exit(&blkg->refcnt); kfree(blkg); @@ -251,10 +250,12 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); if (!blkg->iostat_cpu) goto out_exit_refcnt; - if (!blk_get_queue(disk->queue)) + + if (test_bit(GD_DEAD, &disk->state)) goto out_free_iostat; + get_device(disk_to_dev(disk)); + blkg->disk = disk; - blkg->q = disk->queue; INIT_LIST_HEAD(&blkg->q_node); spin_lock_init(&blkg->async_bio_lock); bio_list_init(&blkg->async_bios); @@ -290,7 +291,7 @@ out_free_pds: while (--i >= 0) if (blkg->pd[i]) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); - blk_put_queue(disk->queue); + put_disk(blkg->disk); out_free_iostat: free_percpu(blkg->iostat_cpu); out_exit_refcnt: @@ -461,7 +462,7 @@ static void blkg_destroy(struct blkcg_gq *blkg) struct blkcg *blkcg = blkg->blkcg; int i; - lockdep_assert_held(&blkg->q->queue_lock); + lockdep_assert_held(&blkg->disk->queue->queue_lock); lockdep_assert_held(&blkcg->lock); /* @@ -485,7 +486,7 @@ static void blkg_destroy(struct blkcg_gq *blkg) blkg->online = false; - radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); + radix_tree_delete(&blkcg->blkg_tree, blkg->disk->queue->id); hlist_del_init_rcu(&blkg->blkcg_node); /* @@ -572,9 +573,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, const char *blkg_dev_name(struct blkcg_gq *blkg) { - if (!blkg->q->disk) - return NULL; - return bdi_dev_name(blkg->q->disk->bdi); + return bdi_dev_name(blkg->disk->bdi); } /** @@ -606,10 +605,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, rcu_read_lock(); hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { - spin_lock_irq(&blkg->q->queue_lock); - if (blkcg_policy_enabled(blkg->q, pol)) + spin_lock_irq(&blkg->disk->queue->queue_lock); + if (blkcg_policy_enabled(blkg->disk->queue, pol)) total += prfill(sf, blkg->pd[pol->plid], data); - spin_unlock_irq(&blkg->q->queue_lock); + spin_unlock_irq(&blkg->disk->queue->queue_lock); } rcu_read_unlock(); @@ -1033,9 +1032,9 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) rcu_read_lock(); hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { - spin_lock_irq(&blkg->q->queue_lock); + spin_lock_irq(&blkg->disk->queue->queue_lock); blkcg_print_one_stat(blkg, sf); - spin_unlock_irq(&blkg->q->queue_lock); + spin_unlock_irq(&blkg->disk->queue->queue_lock); } rcu_read_unlock(); return 0; @@ -1105,7 +1104,7 @@ static void blkcg_destroy_blkgs(struct blkcg *blkcg) while (!hlist_empty(&blkcg->blkg_list)) { struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, struct blkcg_gq, blkcg_node); - struct request_queue *q = blkg->q; + struct request_queue *q = blkg->disk->queue; if (need_resched() || !spin_trylock(&q->queue_lock)) { /* |