summaryrefslogtreecommitdiff
path: root/include/linux/blk-mq.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-05-07 10:26:44 -0600
committerJens Axboe <axboe@fb.com>2014-05-07 10:26:44 -0600
commit506e931f92defdc60c1dc4aa2ff4a19a5dcd8618 (patch)
tree8c0fdc0c0c4186f927246b5164396da446fbc8e5 /include/linux/blk-mq.h
parent5cf8c2277576fcc48966b105bb42782d7929fc48 (diff)
downloadlwn-506e931f92defdc60c1dc4aa2ff4a19a5dcd8618.tar.gz
lwn-506e931f92defdc60c1dc4aa2ff4a19a5dcd8618.zip
blk-mq: add basic round-robin of what CPU to queue workqueue work on
Right now we just pick the first CPU in the mask, but that can easily overload that one. Add some basic batching and round-robin all the entries in the mask instead. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r--include/linux/blk-mq.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 3b561d651a02..5bd677e2dcb7 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -21,6 +21,8 @@ struct blk_mq_hw_ctx {
struct delayed_work run_work;
struct delayed_work delay_work;
cpumask_var_t cpumask;
+ int next_cpu;
+ int next_cpu_batch;
unsigned long flags; /* BLK_MQ_F_* flags */
@@ -126,6 +128,8 @@ enum {
BLK_MQ_S_STOPPED = 0,
BLK_MQ_MAX_DEPTH = 2048,
+
+ BLK_MQ_CPU_WORK_BATCH = 8,
};
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);