summaryrefslogtreecommitdiff
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2006-12-13 13:02:26 +0100
committerJens Axboe <jens.axboe@oracle.com>2006-12-13 13:02:26 +0100
commit7749a8d423c483a51983b666613acda1a4dd9c1b (patch)
tree5a2a20640cca9ca519324b7933005f6fd9c4a6a5 /block/ll_rw_blk.c
parent445722f97a0ecd3aed3f53d9f0dcaacaef8c6223 (diff)
downloadlwn-7749a8d423c483a51983b666613acda1a4dd9c1b.tar.gz
lwn-7749a8d423c483a51983b666613acda1a4dd9c1b.zip
[PATCH] Propagate down request sync flag
We need to do this, otherwise the io schedulers don't get access to the sync flag. Then they cannot tell the difference between a regular write and an O_DIRECT write, which can cause a performance loss. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a541b42c08e3..79807dbc306e 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv)
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
*/
-static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
- gfp_t gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
+ const int rw = rw_flags & 0x01;
int may_queue, priv;
- may_queue = elv_may_queue(q, rw);
+ may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, priv, gfp_mask);
+ rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything
@@ -2162,12 +2163,13 @@ out:
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw,
+static struct request *get_request_wait(request_queue_t *q, int rw_flags,
struct bio *bio)
{
+ const int rw = rw_flags & 0x01;
struct request *rq;
- rq = get_request(q, rw, bio, GFP_NOIO);
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;
@@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw, bio, GFP_NOIO);
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
if (!rq) {
struct io_context *ioc;
@@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
int el_ret, nr_sectors, barrier, err;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
+ int rw_flags;
nr_sectors = bio_sectors(bio);
@@ -2984,10 +2987,19 @@ static int __make_request(request_queue_t *q, struct bio *bio)
get_rq:
/*
+ * This sync check and mask will be re-done in init_request_from_bio(),
+ * but we need to set it earlier to expose the sync flag to the
+ * rq allocator and io schedulers.
+ */
+ rw_flags = bio_data_dir(bio);
+ if (sync)
+ rw_flags |= REQ_RW_SYNC;
+
+ /*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request_wait(q, bio_data_dir(bio), bio);
+ req = get_request_wait(q, rw_flags, bio);
/*
* After dropping the lock and possibly sleeping here, our request