diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2008-08-05 18:01:53 +0100 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 08:56:01 +0200 |
commit | fb2dce862d9f9a68e6b9374579056ec9eca02a63 (patch) | |
tree | 888e0fd7248c9329fa1aa3981043a2dc2457d488 /block/blk-core.c | |
parent | d628eaef310533767ce68664873869c2d7f78f09 (diff) | |
download | lwn-fb2dce862d9f9a68e6b9374579056ec9eca02a63.tar.gz lwn-fb2dce862d9f9a68e6b9374579056ec9eca02a63.zip |
Add 'discard' request handling
Some block devices benefit from a hint that they can forget the contents
of certain sectors. Add basic support for this to the block core, along
with a 'blkdev_issue_discard()' helper function which issues such
requests.
The caller doesn't get to provide an end_io functio, since
blkdev_issue_discard() will automatically split the request up into
multiple bios if appropriate. Neither does the function wait for
completion -- it's expected that callers won't care about when, or even
_if_, the request completes. It's only a hint to the device anyway. By
definition, the file system doesn't _care_ about these sectors any more.
[With feedback from OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> and
Jens Axboe <jens.axboe@oracle.com]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index a496727df7ef..1e143c4f9d34 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1079,6 +1079,10 @@ void init_request_from_bio(struct request *req, struct bio *bio) */ if (unlikely(bio_barrier(bio))) req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + if (unlikely(bio_discard(bio))) { + req->cmd_flags |= (REQ_SOFTBARRIER | REQ_DISCARD); + req->q->prepare_discard_fn(req->q, req); + } if (bio_sync(bio)) req->cmd_flags |= REQ_RW_SYNC; @@ -1095,7 +1099,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) static int __make_request(struct request_queue *q, struct bio *bio) { struct request *req; - int el_ret, nr_sectors, barrier, err; + int el_ret, nr_sectors, barrier, discard, err; const unsigned short prio = bio_prio(bio); const int sync = bio_sync(bio); int rw_flags; @@ -1115,6 +1119,12 @@ static int __make_request(struct request_queue *q, struct bio *bio) goto end_io; } + discard = bio_discard(bio); + if (unlikely(discard) && !q->prepare_discard_fn) { + err = -EOPNOTSUPP; + goto end_io; + } + spin_lock_irq(q->queue_lock); if (unlikely(barrier) || elv_queue_empty(q)) @@ -1405,7 +1415,8 @@ end_io: if (bio_check_eod(bio, nr_sectors)) goto end_io; - if (bio_empty_barrier(bio) && !q->prepare_flush_fn) { + if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) || + (bio_discard(bio) && !q->prepare_discard_fn)) { err = -EOPNOTSUPP; goto end_io; } @@ -1487,7 +1498,6 @@ void submit_bio(int rw, struct bio *bio) * go through the normal accounting stuff before submission. */ if (bio_has_data(bio)) { - if (rw & WRITE) { count_vm_events(PGPGOUT, count); } else { @@ -1881,7 +1891,7 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, struct request_queue *q = rq->q; unsigned long flags = 0UL; - if (bio_has_data(rq->bio)) { + if (bio_has_data(rq->bio) || blk_discard_rq(rq)) { if (__end_that_request_first(rq, error, nr_bytes)) return 1; @@ -1939,7 +1949,7 @@ EXPORT_SYMBOL_GPL(blk_end_request); **/ int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { - if (bio_has_data(rq->bio) && + if ((bio_has_data(rq->bio) || blk_discard_rq(rq)) && __end_that_request_first(rq, error, nr_bytes)) return 1; @@ -2012,12 +2022,14 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */ rq->cmd_flags |= (bio->bi_rw & 3); - rq->nr_phys_segments = bio_phys_segments(q, bio); - rq->nr_hw_segments = bio_hw_segments(q, bio); + if (bio_has_data(bio)) { + rq->nr_phys_segments = bio_phys_segments(q, bio); + rq->nr_hw_segments = bio_hw_segments(q, bio); + rq->buffer = bio_data(bio); + } rq->current_nr_sectors = bio_cur_sectors(bio); rq->hard_cur_sectors = rq->current_nr_sectors; rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); - rq->buffer = bio_data(bio); rq->data_len = bio->bi_size; rq->bio = rq->biotail = bio; |