summaryrefslogtreecommitdiff
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-08-23 14:50:29 +0200
committerJens Axboe <jaxboe@fusionio.com>2011-08-23 14:50:29 +0200
commit65299a3b788bd274bed92f9fa3232082c9f3ea70 (patch)
tree191c0afe31c15ac4c1bab96e0a07fddb097d0de8 /block/cfq-iosched.c
parent5dc06c5a70b79a323152bec7e55783e705767e63 (diff)
downloadlwn-65299a3b788bd274bed92f9fa3232082c9f3ea70.tar.gz
lwn-65299a3b788bd274bed92f9fa3232082c9f3ea70.zip
block: separate priority boosting from REQ_META
Add a new REQ_PRIO to let requests preempt others in the cfq I/O schedule, and lave REQ_META purely for marking requests as metadata in blktrace. All existing callers of REQ_META except for XFS are updated to also set REQ_PRIO for now. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a33bd4377c61..16ace89613bc 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -130,8 +130,8 @@ struct cfq_queue {
unsigned long slice_end;
long slice_resid;
- /* pending metadata requests */
- int meta_pending;
+ /* pending priority requests */
+ int prio_pending;
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
@@ -684,8 +684,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
if (rq_is_sync(rq1) != rq_is_sync(rq2))
return rq_is_sync(rq1) ? rq1 : rq2;
- if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META)
- return rq1->cmd_flags & REQ_META ? rq1 : rq2;
+ if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
+ return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2);
@@ -1612,9 +1612,9 @@ static void cfq_remove_request(struct request *rq)
cfqq->cfqd->rq_queued--;
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq));
- if (rq->cmd_flags & REQ_META) {
- WARN_ON(!cfqq->meta_pending);
- cfqq->meta_pending--;
+ if (rq->cmd_flags & REQ_PRIO) {
+ WARN_ON(!cfqq->prio_pending);
+ cfqq->prio_pending--;
}
}
@@ -3372,7 +3372,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO.
*/
- if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
+ if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
return true;
/*
@@ -3439,8 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_context *cic = RQ_CIC(rq);
cfqd->rq_queued++;
- if (rq->cmd_flags & REQ_META)
- cfqq->meta_pending++;
+ if (rq->cmd_flags & REQ_PRIO)
+ cfqq->prio_pending++;
cfq_update_io_thinktime(cfqd, cfqq, cic);
cfq_update_io_seektime(cfqd, cfqq, rq);