summaryrefslogtreecommitdiff
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-04-06 14:48:01 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 08:04:53 -0700
commit1faa16d22877f4839bd433547d770c676d1d964c (patch)
tree9a0d50be1ef0358c1f53d7107413100904e7d526 /block/blk-sysfs.c
parent0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff)
downloadlwn-1faa16d22877f4839bd433547d770c676d1d964c.tar.gz
lwn-1faa16d22877f4839bd433547d770c676d1d964c.zip
block: change the request allocation/congestion logic to be sync/async based
This makes sure that we never wait on async IO for sync requests, instead of doing the split on writes vs reads. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e29ddfc73cf4..3ff9bba3379a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -48,28 +48,28 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
- if (rl->count[READ] >= queue_congestion_on_threshold(q))
- blk_set_queue_congested(q, READ);
- else if (rl->count[READ] < queue_congestion_off_threshold(q))
- blk_clear_queue_congested(q, READ);
-
- if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
- blk_set_queue_congested(q, WRITE);
- else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
- blk_clear_queue_congested(q, WRITE);
-
- if (rl->count[READ] >= q->nr_requests) {
- blk_set_queue_full(q, READ);
- } else if (rl->count[READ]+1 <= q->nr_requests) {
- blk_clear_queue_full(q, READ);
- wake_up(&rl->wait[READ]);
+ if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_SYNC);
+ else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_SYNC);
+
+ if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, BLK_RW_ASYNC);
+ else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, BLK_RW_ASYNC);
+
+ if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+ blk_set_queue_full(q, BLK_RW_SYNC);
+ } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
+ blk_clear_queue_full(q, BLK_RW_SYNC);
+ wake_up(&rl->wait[BLK_RW_SYNC]);
}
- if (rl->count[WRITE] >= q->nr_requests) {
- blk_set_queue_full(q, WRITE);
- } else if (rl->count[WRITE]+1 <= q->nr_requests) {
- blk_clear_queue_full(q, WRITE);
- wake_up(&rl->wait[WRITE]);
+ if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+ blk_set_queue_full(q, BLK_RW_ASYNC);
+ } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
+ blk_clear_queue_full(q, BLK_RW_ASYNC);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
}
spin_unlock_irq(q->queue_lock);
return ret;