summaryrefslogtreecommitdiff
path: root/block/blk-flush.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-03 11:56:17 +0200
committerJens Axboe <jaxboe@fusionio.com>2010-09-10 12:35:37 +0200
commit4fed947cb311e5aa51781d316cefca836352f6ce (patch)
treeeada83d5bf503244628e3c190e97e8c7af847e35 /block/blk-flush.c
parentdd4c133f387c48f526022860ad70354637a80f4c (diff)
downloadlwn-4fed947cb311e5aa51781d316cefca836352f6ce.tar.gz
lwn-4fed947cb311e5aa51781d316cefca836352f6ce.zip
block: implement REQ_FLUSH/FUA based interface for FLUSH/FUA requests
Now that the backend conversion is complete, export sequenced FLUSH/FUA capability through REQ_FLUSH/FUA flags. REQ_FLUSH means the device cache should be flushed before executing the request. REQ_FUA means that the data in the request should be on non-volatile media on completion. Block layer will choose the correct way of implementing the semantics and execute it. The request may be passed to the device directly if the device can handle it; otherwise, it will be sequenced using one or more proxy requests. Devices will never see REQ_FLUSH and/or FUA which it doesn't support. Also, unlike the original REQ_HARDBARRIER, REQ_FLUSH/FUA requests are never failed with -EOPNOTSUPP. If the underlying device doesn't support FLUSH/FUA, the block layer simply make those noop. IOW, it no longer distinguishes between writeback cache which doesn't support cache flush and writethrough/no cache. Devices which have WB cache w/o flush are very difficult to come by these days and there's nothing much we can do anyway, so it doesn't make sense to require everyone to implement -EOPNOTSUPP handling. This will simplify filesystems and block drivers as they can drop -EOPNOTSUPP retry logic for barriers. * QUEUE_ORDERED_* are removed and QUEUE_FSEQ_* are moved into blk-flush.c. * REQ_FLUSH w/o data can also be directly passed to drivers without sequencing but some drivers assume that zero length requests don't have rq->bio which isn't true for these requests requiring the use of proxy requests. * REQ_COMMON_MASK now includes REQ_FLUSH | REQ_FUA so that they are copied from bio to request. * WRITE_BARRIER is marked deprecated and WRITE_FLUSH, WRITE_FUA and WRITE_FLUSH_FUA are added. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r--block/blk-flush.c85
1 files changed, 46 insertions, 39 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index dd873225da97..452c552e9ead 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -1,5 +1,5 @@
/*
- * Functions related to barrier IO handling
+ * Functions to sequence FLUSH and FUA writes.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -9,6 +9,15 @@
#include "blk.h"
+/* FLUSH/FUA sequences */
+enum {
+ QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */
+ QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */
+ QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */
+ QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */
+ QUEUE_FSEQ_DONE = (1 << 4),
+};
+
static struct request *queue_next_fseq(struct request_queue *q);
unsigned blk_flush_cur_seq(struct request_queue *q)
@@ -79,6 +88,7 @@ static void queue_flush(struct request_queue *q, struct request *rq,
static struct request *queue_next_fseq(struct request_queue *q)
{
+ struct request *orig_rq = q->orig_flush_rq;
struct request *rq = &q->flush_rq;
switch (blk_flush_cur_seq(q)) {
@@ -87,12 +97,11 @@ static struct request *queue_next_fseq(struct request_queue *q)
break;
case QUEUE_FSEQ_DATA:
- /* initialize proxy request and queue it */
+ /* initialize proxy request, inherit FLUSH/FUA and queue it */
blk_rq_init(q, rq);
- init_request_from_bio(rq, q->orig_flush_rq->bio);
- rq->cmd_flags &= ~REQ_HARDBARRIER;
- if (q->ordered & QUEUE_ORDERED_DO_FUA)
- rq->cmd_flags |= REQ_FUA;
+ init_request_from_bio(rq, orig_rq->bio);
+ rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA);
+ rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA);
rq->end_io = flush_data_end_io;
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
@@ -110,60 +119,58 @@ static struct request *queue_next_fseq(struct request_queue *q)
struct request *blk_do_flush(struct request_queue *q, struct request *rq)
{
+ unsigned int fflags = q->flush_flags; /* may change, cache it */
+ bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA;
+ bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH);
+ bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA);
unsigned skip = 0;
- if (!(rq->cmd_flags & REQ_HARDBARRIER))
+ /*
+ * Special case. If there's data but flush is not necessary,
+ * the request can be issued directly.
+ *
+ * Flush w/o data should be able to be issued directly too but
+ * currently some drivers assume that rq->bio contains
+ * non-zero data if it isn't NULL and empty FLUSH requests
+ * getting here usually have bio's without data.
+ */
+ if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) {
+ rq->cmd_flags &= ~REQ_FLUSH;
+ if (!has_fua)
+ rq->cmd_flags &= ~REQ_FUA;
return rq;
+ }
+ /*
+ * Sequenced flushes can't be processed in parallel. If
+ * another one is already in progress, queue for later
+ * processing.
+ */
if (q->flush_seq) {
- /*
- * Sequenced flush is already in progress and they
- * can't be processed in parallel. Queue for later
- * processing.
- */
list_move_tail(&rq->queuelist, &q->pending_flushes);
return NULL;
}
- if (unlikely(q->next_ordered == QUEUE_ORDERED_NONE)) {
- /*
- * Queue ordering not supported. Terminate
- * with prejudice.
- */
- blk_dequeue_request(rq);
- __blk_end_request_all(rq, -EOPNOTSUPP);
- return NULL;
- }
-
/*
* Start a new flush sequence
*/
q->flush_err = 0;
- q->ordered = q->next_ordered;
q->flush_seq |= QUEUE_FSEQ_STARTED;
- /*
- * For an empty barrier, there's no actual BAR request, which
- * in turn makes POSTFLUSH unnecessary. Mask them off.
- */
- if (!blk_rq_sectors(rq))
- q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
- QUEUE_ORDERED_DO_POSTFLUSH);
-
- /* stash away the original request */
+ /* adjust FLUSH/FUA of the original request and stash it away */
+ rq->cmd_flags &= ~REQ_FLUSH;
+ if (!has_fua)
+ rq->cmd_flags &= ~REQ_FUA;
blk_dequeue_request(rq);
q->orig_flush_rq = rq;
- if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH))
+ /* skip unneded sequences and return the first one */
+ if (!do_preflush)
skip |= QUEUE_FSEQ_PREFLUSH;
-
- if (!(q->ordered & QUEUE_ORDERED_DO_BAR))
+ if (!blk_rq_sectors(rq))
skip |= QUEUE_FSEQ_DATA;
-
- if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH))
+ if (!do_postflush)
skip |= QUEUE_FSEQ_POSTFLUSH;
-
- /* complete skipped sequences and return the first sequence */
return blk_flush_complete_seq(q, skip, 0);
}