diff options
author | Omar Sandoval <osandov@fb.com> | 2017-02-02 15:42:39 -0800 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-02-02 16:57:56 -0700 |
commit | 0cacba6cf8252438f8166bd3fa1c3370dd28a769 (patch) | |
tree | c2b1dc91a0dadcc07be4e2fded65622d176c2720 /block/blk-mq-sched.c | |
parent | e17354961bb50931ec7b33f59c0713dcf98ac7d2 (diff) | |
download | lwn-0cacba6cf8252438f8166bd3fa1c3370dd28a769.tar.gz lwn-0cacba6cf8252438f8166bd3fa1c3370dd28a769.zip |
blk-mq-sched: bypass the scheduler for flushes entirely
There's a weird inconsistency that flushes are mostly hidden from the
scheduler, but it needs to be aware of them in ->insert_requests().
Instead of having every scheduler call blk_mq_sched_bypass_insert(),
let's do it in the common framework.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r-- | block/blk-mq-sched.c | 25 |
1 files changed, 23 insertions, 2 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 114814ec3d49..3ec52f494094 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -289,7 +289,8 @@ void blk_mq_sched_request_inserted(struct request *rq) } EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); -bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq) +static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, + struct request *rq) { if (rq->tag == -1) { rq->rq_flags |= RQF_SORTED; @@ -305,7 +306,6 @@ bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq) spin_unlock(&hctx->lock); return true; } -EXPORT_SYMBOL_GPL(blk_mq_sched_bypass_insert); static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) { @@ -363,6 +363,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, return; } + if (e && blk_mq_sched_bypass_insert(hctx, rq)) + goto run; + if (e && e->type->ops.mq.insert_requests) { LIST_HEAD(list); @@ -374,6 +377,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, spin_unlock(&ctx->lock); } +run: if (run_queue) blk_mq_run_hw_queue(hctx, async); } @@ -385,6 +389,23 @@ void blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct elevator_queue *e = hctx->queue->elevator; + if (e) { + struct request *rq, *next; + + /* + * We bypass requests that already have a driver tag assigned, + * which should only be flushes. Flushes are only ever inserted + * as single requests, so we shouldn't ever hit the + * WARN_ON_ONCE() below (but let's handle it just in case). + */ + list_for_each_entry_safe(rq, next, list, queuelist) { + if (WARN_ON_ONCE(rq->tag != -1)) { + list_del_init(&rq->queuelist); + blk_mq_sched_bypass_insert(hctx, rq); + } + } + } + if (e && e->type->ops.mq.insert_requests) e->type->ops.mq.insert_requests(hctx, list, false); else |