summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-02-08 14:46:49 +0100
committerJens Axboe <axboe@fb.com>2017-02-08 13:43:08 -0700
commit1e739730c5b9ea80a2f25e9cf6e1025d47e3d8ed (patch)
tree4f48ed6c92153fb9941185182e918232eb3472f4 /block/blk-core.c
parent34fe7c05400663e01e23cddd1fea68bb7a2b3d29 (diff)
downloadlwn-1e739730c5b9ea80a2f25e9cf6e1025d47e3d8ed.tar.gz
lwn-1e739730c5b9ea80a2f25e9cf6e1025d47e3d8ed.zip
block: optionally merge discontiguous discard bios into a single request
Add a new merge strategy that merges discard bios into a request until the maximum number of discard ranges (or the maximum discard size) is reached from the plug merging code. I/O scheduler merging is not wired up yet but might also be useful, although not for fast devices like NVMe which are the only user for now. Note that for now we don't support limiting the size of each discard range, but if needed that can be added later. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 00e053c704a1..c0e4d41d3d33 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1483,6 +1483,30 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
return true;
}
+bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ unsigned short segments = blk_rq_nr_discard_segments(req);
+
+ if (segments >= queue_max_discard_segments(q))
+ goto no_merge;
+ if (blk_rq_sectors(req) + bio_sectors(bio) >
+ blk_rq_get_max_sectors(req, blk_rq_pos(req)))
+ goto no_merge;
+
+ req->biotail->bi_next = bio;
+ req->biotail = bio;
+ req->__data_len += bio->bi_iter.bi_size;
+ req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
+ req->nr_phys_segments = segments + 1;
+
+ blk_account_io_start(req, false);
+ return true;
+no_merge:
+ req_set_nomerge(q, req);
+ return false;
+}
+
/**
* blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at
@@ -1547,6 +1571,9 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
case ELEVATOR_FRONT_MERGE:
merged = bio_attempt_front_merge(q, rq, bio);
break;
+ case ELEVATOR_DISCARD_MERGE:
+ merged = bio_attempt_discard_merge(q, rq, bio);
+ break;
default:
break;
}