summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>2015-12-02 16:57:06 +0530
committerJens Axboe <axboe@fb.com>2015-12-03 09:56:25 -0700
commite0e827b9fc71fbed1a9cd246067c2a4dbd3ea220 (patch)
tree9f3a29a597a6fd3d3e2b71604f30317177eae312 /block/blk-mq.c
parent6f3b0e8bcf3cbb87a7459b3ed018d31d918df3f8 (diff)
downloadlwn-e0e827b9fc71fbed1a9cd246067c2a4dbd3ea220.tar.gz
lwn-e0e827b9fc71fbed1a9cd246067c2a4dbd3ea220.zip
blk-mq: Reuse hardware context cpumask for tags
hctx->cpumask is already populated and let the tag cpumask follow that instead of going through a new for loop. Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c9
1 files changed, 1 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 93a4e1956915..35da31841eda 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1850,6 +1850,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
hctx->tags = set->tags[i];
WARN_ON(!hctx->tags);
+ cpumask_copy(hctx->tags->cpumask, hctx->cpumask);
/*
* Set the map size to the number of mapped software queues.
* This is more accurate and more efficient than looping
@@ -1863,14 +1864,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
hctx->next_cpu = cpumask_first(hctx->cpumask);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
-
- queue_for_each_ctx(q, ctx, i) {
- if (!cpumask_test_cpu(i, online_mask))
- continue;
-
- hctx = q->mq_ops->map_queue(q, i);
- cpumask_set_cpu(i, hctx->tags->cpumask);
- }
}
static void queue_set_hctx_shared(struct request_queue *q, bool shared)