summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2017-04-05 12:01:31 -0700
committerJens Axboe <axboe@fb.com>2017-04-07 08:56:46 -0600
commit93252632e828da3e90241a1c0e766556abf71598 (patch)
tree388756bb6862ea8943c87240644a33feb8c5c402 /block/blk-mq.c
parent6917ff0b5bd4139e08a3f3146529dcb3b95ba7a6 (diff)
downloadlwn-93252632e828da3e90241a1c0e766556abf71598.tar.gz
lwn-93252632e828da3e90241a1c0e766556abf71598.zip
blk-mq-sched: set up scheduler tags when bringing up new queues
If a new hardware queue is added at runtime, we don't allocate scheduler tags for it, leading to a crash. This hooks up the scheduler framework to blk_mq_{init,exit}_hctx() to make sure everything gets properly initialized/freed. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 828f4bd193f2..72e744cd638c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1924,6 +1924,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx);
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
+
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@@ -1990,9 +1992,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
+ if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
+ goto exit_hctx;
+
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
if (!hctx->fq)
- goto exit_hctx;
+ goto sched_exit_hctx;
if (set->ops->init_request &&
set->ops->init_request(set->driver_data,
@@ -2007,6 +2012,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
free_fq:
kfree(hctx->fq);
+ sched_exit_hctx:
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);