diff options
author | Jens Axboe <axboe@fb.com> | 2014-03-14 10:43:15 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-03-14 10:43:15 -0600 |
commit | 95363efde193079541cb379eb47140e9c4d355d5 (patch) | |
tree | da7417035ca5f25d6818430feaa54cb09a2ba86d /block/blk-mq.c | |
parent | 89f8b33ca1ea881d1d84542282cb85d07d02e78d (diff) | |
download | lwn-95363efde193079541cb379eb47140e9c4d355d5.tar.gz lwn-95363efde193079541cb379eb47140e9c4d355d5.zip |
blk-mq: allow blk_mq_init_commands() to return failure
If drivers do dynamic allocation in the hardware command init
path, then we need to be able to handle and return failures.
And if they do allocations or mappings in the init command path,
then we need a cleanup function to free up that space at exit
time. So add blk_mq_free_commands() as the cleanup function.
This is required for the mtip32xx driver conversion to blk-mq.
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 52 |
1 files changed, 45 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 01d8735db8d3..92284af4e0df 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1058,8 +1058,46 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_put_ctx(ctx); } -static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, - void (*init)(void *, struct blk_mq_hw_ctx *, +static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, + int (*init)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int), + void *data) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < hctx->queue_depth; i++) { + struct request *rq = hctx->rqs[i]; + + ret = init(data, hctx, rq, i); + if (ret) + break; + } + + return ret; +} + +int blk_mq_init_commands(struct request_queue *q, + int (*init)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int), + void *data) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + int ret = 0; + + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_init_hw_commands(hctx, init, data); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL(blk_mq_init_commands); + +static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx, + void (*free)(void *, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data) { @@ -1068,12 +1106,12 @@ static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, for (i = 0; i < hctx->queue_depth; i++) { struct request *rq = hctx->rqs[i]; - init(data, hctx, rq, i); + free(data, hctx, rq, i); } } -void blk_mq_init_commands(struct request_queue *q, - void (*init)(void *, struct blk_mq_hw_ctx *, +void blk_mq_free_commands(struct request_queue *q, + void (*free)(void *, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data) { @@ -1081,9 +1119,9 @@ void blk_mq_init_commands(struct request_queue *q, unsigned int i; queue_for_each_hw_ctx(q, hctx, i) - blk_mq_init_hw_commands(hctx, init, data); + blk_mq_free_hw_commands(hctx, free, data); } -EXPORT_SYMBOL(blk_mq_init_commands); +EXPORT_SYMBOL(blk_mq_free_commands); static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx) { |