summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-05-09 09:36:49 -0600
committerJens Axboe <axboe@fb.com>2014-05-09 09:36:49 -0600
commit4bb659b156996f2993dc16fad71fec9ee070153c (patch)
treebaff4b0882edfc01f082b321e7924674a0fe1e90 /block/blk-mq.c
parentaf76e555e5e29e08eb8ac1f7878e23dbf0d6741f (diff)
downloadlwn-4bb659b156996f2993dc16fad71fec9ee070153c.tar.gz
lwn-4bb659b156996f2993dc16fad71fec9ee070153c.zip
blk-mq: implement new and more efficient tagging scheme
blk-mq currently uses percpu_ida for tag allocation. But that only works well if the ratio between tag space and number of CPUs is sufficiently high. For most devices and systems, that is not the case. The end result if that we either only utilize the tag space partially, or we end up attempting to fully exhaust it and run into lots of lock contention with stealing between CPUs. This is not optimal. This new tagging scheme is a hybrid bitmap allocator. It uses two tricks to both be SMP friendly and allow full exhaustion of the space: 1) We cache the last allocated (or freed) tag on a per blk-mq software context basis. This allows us to limit the space we have to search. The key element here is not caching it in the shared tag structure, otherwise we end up dirtying more shared cache lines on each allocate/free operation. 2) The tag space is split into cache line sized groups, and each context will start off randomly in that space. Even up to full utilization of the space, this divides the tag users efficiently into cache line groups, avoiding dirtying the same one both between allocators and between allocator and freeer. This scheme shows drastically better behaviour, both on small tag spaces but on large ones as well. It has been tested extensively to show better performance for all the cases blk-mq cares about. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 492f49f96459..9f07a266f7ab 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -74,12 +74,13 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
}
static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx,
gfp_t gfp, bool reserved)
{
struct request *rq;
unsigned int tag;
- tag = blk_mq_get_tag(hctx->tags, gfp, reserved);
+ tag = blk_mq_get_tag(hctx->tags, hctx, &ctx->last_tag, gfp, reserved);
if (tag != BLK_MQ_TAG_FAIL) {
rq = hctx->tags->rqs[tag];
rq->tag = tag;
@@ -246,7 +247,8 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
- rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
+ rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT,
+ reserved);
if (rq) {
blk_mq_rq_ctx_init(q, ctx, rq, rw);
break;
@@ -260,7 +262,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
break;
}
- blk_mq_wait_for_tags(hctx->tags, reserved);
+ blk_mq_wait_for_tags(hctx->tags, hctx, reserved);
} while (1);
return rq;
@@ -278,6 +280,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
blk_mq_put_ctx(rq->mq_ctx);
return rq;
}
+EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
gfp_t gfp)
@@ -301,7 +304,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
struct request_queue *q = rq->q;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- blk_mq_put_tag(hctx->tags, tag);
+ blk_mq_put_tag(hctx->tags, tag, &ctx->last_tag);
blk_mq_queue_exit(q);
}
@@ -677,11 +680,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
queued++;
continue;
case BLK_MQ_RQ_QUEUE_BUSY:
- /*
- * FIXME: we should have a mechanism to stop the queue
- * like blk_stop_queue, otherwise we will waste cpu
- * time
- */
list_add(&rq->queuelist, &rq_list);
__blk_mq_requeue_request(rq);
break;
@@ -873,6 +871,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
list_add(&rq->queuelist, &ctx->rq_list);
else
list_add_tail(&rq->queuelist, &ctx->rq_list);
+
blk_mq_hctx_mark_pending(hctx, ctx);
/*
@@ -1046,7 +1045,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (is_sync)
rw |= REQ_SYNC;
trace_block_getrq(q, bio, rw);
- rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
+ rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false);
if (likely(rq))
blk_mq_rq_ctx_init(q, ctx, rq, rw);
else {
@@ -1130,8 +1129,8 @@ EXPORT_SYMBOL(blk_mq_map_queue);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
unsigned int hctx_index)
{
- return kmalloc_node(sizeof(struct blk_mq_hw_ctx),
- GFP_KERNEL | __GFP_ZERO, set->numa_node);
+ return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
+ set->numa_node);
}
EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);