diff options
author | Chengming Zhou <zhouchengming@bytedance.com> | 2023-09-08 08:57:02 +0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-09-11 16:17:34 -0600 |
commit | 6be6d112419713334ddd9c01f219ca16adaa4c76 (patch) | |
tree | 18d2b39d8bd624ce27fa3779e95860aed42db0b0 /block/blk-mq.c | |
parent | 0bb80ecc33a8fb5a682236443c1e740d5c917d1d (diff) | |
download | lwn-6be6d112419713334ddd9c01f219ca16adaa4c76.tar.gz lwn-6be6d112419713334ddd9c01f219ca16adaa4c76.zip |
blk-mq: fix tags UAF when shrinking q->nr_hw_queues
When nr_hw_queues shrink, we free the excess tags before realloc'ing
hw_ctxs for each queue. During that resize, we may need to access those
tags, like blk_mq_tag_idle(hctx) will access queue shared tags.
This can cause a slab use-after-free, as reported by KASAN. Fix it by
moving the releasing of excess tags to the end.
Fixes: e1dd7bc93029 ("blk-mq: fix tags leak when shrink nr_hw_queues")
Reported-by: Yi Zhang <yi.zhang@redhat.com>
Closes: https://lore.kernel.org/all/CAHj4cs_CK63uoDpGBGZ6DN4OCTpzkR3UaVgK=LX8Owr8ej2ieQ@mail.gmail.com/
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20230908005702.2183908-1-chengming.zhou@linux.dev
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index ec922c6bccbe..1fafd54dce3c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, struct blk_mq_tags **new_tags; int i; - if (set->nr_hw_queues >= new_nr_hw_queues) { - for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++) - __blk_mq_free_map_and_rqs(set, i); + if (set->nr_hw_queues >= new_nr_hw_queues) goto done; - } new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); @@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, { struct request_queue *q; LIST_HEAD(head); - int prev_nr_hw_queues; + int prev_nr_hw_queues = set->nr_hw_queues; + int i; lockdep_assert_held(&set->tag_list_lock); @@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, blk_mq_sysfs_unregister_hctxs(q); } - prev_nr_hw_queues = set->nr_hw_queues; if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) goto reregister; @@ -4781,6 +4778,10 @@ switch_back: list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue(q); + + /* Free the excess tags when nr_hw_queues shrink. */ + for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++) + __blk_mq_free_map_and_rqs(set, i); } void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) |