diff options
author | Christoph Hellwig <hch@lst.de> | 2014-05-28 18:11:06 +0200 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-28 10:18:31 -0600 |
commit | cdef54dd85ad66e77262ea57796a3e81683dd5d6 (patch) | |
tree | 55ae5b4f46a884b15879eaab19c7af72db23927b /block/blk-mq.c | |
parent | 75bb4625bb78d6a2d879dcb6a7d482861295765b (diff) | |
download | lwn-cdef54dd85ad66e77262ea57796a3e81683dd5d6.tar.gz lwn-cdef54dd85ad66e77262ea57796a3e81683dd5d6.zip |
blk-mq: remove alloc_hctx and free_hctx methods
There is no need for drivers to control hardware context allocation
now that we do the context to node mapping in common code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 26 |
1 files changed, 5 insertions, 21 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 5cc4b871cb11..f27fe44230c2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1335,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) } EXPORT_SYMBOL(blk_mq_map_queue); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, - unsigned int hctx_index, - int node) -{ - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); -} -EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); - -void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, - unsigned int hctx_index) -{ - kfree(hctx); -} -EXPORT_SYMBOL(blk_mq_free_single_hw_queue); - static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { @@ -1590,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q, queue_for_each_hw_ctx(q, hctx, i) { free_cpumask_var(hctx->cpumask); - set->ops->free_hctx(hctx, i); + kfree(hctx); } } @@ -1811,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) for (i = 0; i < set->nr_hw_queues; i++) { int node = blk_mq_hw_queue_to_node(map, i); - hctxs[i] = set->ops->alloc_hctx(set, i, node); + hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), + GFP_KERNEL, node); if (!hctxs[i]) goto err_hctxs; @@ -1898,7 +1884,7 @@ err_hctxs: if (!hctxs[i]) break; free_cpumask_var(hctxs[i]->cpumask); - set->ops->free_hctx(hctxs[i], i); + kfree(hctxs[i]); } err_map: kfree(hctxs); @@ -1983,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) return -EINVAL; - if (!set->nr_hw_queues || - !set->ops->queue_rq || !set->ops->map_queue || - !set->ops->alloc_hctx || !set->ops->free_hctx) + if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) return -EINVAL; |