summaryrefslogtreecommitdiff
path: root/block/blk-stat.c
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2017-03-27 15:19:42 -0700
committerJens Axboe <axboe@fb.com>2017-03-28 08:02:20 -0600
commitb9147dd1bae2b15d6931ecd42f8606c775fecbc9 (patch)
tree9becbcfbf24e535538680bb53f38962808b4e28e /block/blk-stat.c
parent88eeca495ba7de749ff253376ec6be19bb05368d (diff)
downloadlwn-b9147dd1bae2b15d6931ecd42f8606c775fecbc9.tar.gz
lwn-b9147dd1bae2b15d6931ecd42f8606c775fecbc9.zip
blk-throttle: add a mechanism to estimate IO latency
User configures latency target, but the latency threshold for each request size isn't fixed. For a SSD, the IO latency highly depends on request size. To calculate latency threshold, we sample some data, eg, average latency for request size 4k, 8k, 16k, 32k .. 1M. The latency threshold of each request size will be the sample latency (I'll call it base latency) plus latency target. For example, the base latency for request size 4k is 80us and user configures latency target 60us. The 4k latency threshold will be 80 + 60 = 140us. To sample data, we calculate the order base 2 of rounded up IO sectors. If the IO size is bigger than 1M, it will be accounted as 1M. Since the calculation does round up, the base latency will be slightly smaller than actual value. Also if there isn't any IO dispatched for a specific IO size, we will use the base latency of smaller IO size for this IO size. But we shouldn't sample data at any time. The base latency is supposed to be latency where disk isn't congested, because we use latency threshold to schedule IOs between cgroups. If disk is congested, the latency is higher, using it for scheduling is meaningless. Hence we only do the sampling when block throttling is in the LOW limit, with assumption disk isn't congested in such state. If the assumption isn't true, eg, low limit is too high, calculated latency threshold will be higher. Hard disk is completely different. Latency depends on spindle seek instead of request size. Currently this feature is SSD only, we probably can use a fixed threshold like 4ms for hard disk though. Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-stat.c')
-rw-r--r--block/blk-stat.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 188b535cf4d6..e77ec52f5bb5 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -9,12 +9,14 @@
#include "blk-stat.h"
#include "blk-mq.h"
+#include "blk.h"
#define BLK_RQ_STAT_BATCH 64
struct blk_queue_stats {
struct list_head callbacks;
spinlock_t lock;
+ bool enable_accounting;
};
unsigned int blk_stat_rq_ddir(const struct request *rq)
@@ -96,6 +98,8 @@ void blk_stat_add(struct request *rq)
value = now - blk_stat_time(&rq->issue_stat);
+ blk_throtl_stat_add(rq, value);
+
rcu_read_lock();
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
if (blk_stat_is_active(cb)) {
@@ -190,7 +194,7 @@ void blk_stat_remove_callback(struct request_queue *q,
{
spin_lock(&q->stats->lock);
list_del_rcu(&cb->list);
- if (list_empty(&q->stats->callbacks))
+ if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
spin_unlock(&q->stats->lock);
@@ -215,6 +219,14 @@ void blk_stat_free_callback(struct blk_stat_callback *cb)
}
EXPORT_SYMBOL_GPL(blk_stat_free_callback);
+void blk_stat_enable_accounting(struct request_queue *q)
+{
+ spin_lock(&q->stats->lock);
+ q->stats->enable_accounting = true;
+ set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
+ spin_unlock(&q->stats->lock);
+}
+
struct blk_queue_stats *blk_alloc_queue_stats(void)
{
struct blk_queue_stats *stats;
@@ -225,6 +237,7 @@ struct blk_queue_stats *blk_alloc_queue_stats(void)
INIT_LIST_HEAD(&stats->callbacks);
spin_lock_init(&stats->lock);
+ stats->enable_accounting = false;
return stats;
}