diff options
author | Jens Axboe <axboe@fb.com> | 2017-04-10 09:54:54 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-04-28 08:10:15 -0600 |
commit | 9f993737906b30d7b2454a38637d1f70ffd60f2f (patch) | |
tree | de9f50b9f8411ad9306796746900195a0b6f45e6 /block/blk-mq.c | |
parent | 60ae36ad0340b1ba88530d6a5e141455dd3afd81 (diff) | |
download | lwn-9f993737906b30d7b2454a38637d1f70ffd60f2f.tar.gz lwn-9f993737906b30d7b2454a38637d1f70ffd60f2f.zip |
blk-mq: unify hctx delayed_run_work and run_work
They serve the exact same purpose. Get rid of the non-delayed
work variant, and just run it without delay for the normal case.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <Bart.VanAssche@sandisk.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 27 |
1 files changed, 6 insertions, 21 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index e6aad49c1686..5c68fce87ffc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1166,13 +1166,9 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, put_cpu(); } - if (msecs == 0) - kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), - &hctx->run_work); - else - kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), - &hctx->delayed_run_work, - msecs_to_jiffies(msecs)); + kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), + &hctx->run_work, + msecs_to_jiffies(msecs)); } void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) @@ -1224,7 +1220,7 @@ EXPORT_SYMBOL(blk_mq_queue_stopped); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) { - cancel_work(&hctx->run_work); + cancel_delayed_work_sync(&hctx->run_work); cancel_delayed_work(&hctx->delay_work); set_bit(BLK_MQ_S_STOPPED, &hctx->state); } @@ -1282,17 +1278,7 @@ static void blk_mq_run_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; - hctx = container_of(work, struct blk_mq_hw_ctx, run_work); - - __blk_mq_run_hw_queue(hctx); -} - -static void blk_mq_delayed_run_work_fn(struct work_struct *work) -{ - struct blk_mq_hw_ctx *hctx; - - hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work); - + hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); __blk_mq_run_hw_queue(hctx); } @@ -1898,8 +1884,7 @@ static int blk_mq_init_hctx(struct request_queue *q, if (node == NUMA_NO_NODE) node = hctx->numa_node = set->numa_node; - INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); - INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn); + INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); |