summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-10-29 20:57:17 +0800
committerJens Axboe <axboe@kernel.dk>2018-11-09 06:23:14 -0700
commit1adfc5e4136f5967d591c399aff95b3b035f16b7 (patch)
tree8721fafeef167a567a2fe7838071e0f17e01d02c
parentd39aa4979219ca3d61c492f7460f1032b97b9ef2 (diff)
downloadlwn-1adfc5e4136f5967d591c399aff95b3b035f16b7.tar.gz
lwn-1adfc5e4136f5967d591c399aff95b3b035f16b7.zip
block: make sure discard bio is aligned with logical block size
Obviously the created discard bio has to be aligned with logical block size. This patch introduces the helper of bio_allowed_max_sectors() for this purpose. Cc: stable@vger.kernel.org Cc: Mike Snitzer <snitzer@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Xiao Ni <xni@redhat.com> Cc: Mariusz Dabrowski <mariusz.dabrowski@intel.com> Fixes: 744889b7cbb56a6 ("block: don't deal with discard limit in blkdev_issue_discard()") Fixes: a22c4d7e34402cc ("block: re-add discard_granularity and alignment checks") Reported-by: Rui Salvaterra <rsalvaterra@gmail.com> Tested-by: Rui Salvaterra <rsalvaterra@gmail.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-lib.c3
-rw-r--r--block/blk-merge.c3
-rw-r--r--block/blk.h10
3 files changed, 13 insertions, 3 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 76f867ea9a9b..d56fd159d2e8 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -57,8 +57,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!req_sects)
goto fail;
- if (req_sects > UINT_MAX >> 9)
- req_sects = UINT_MAX >> 9;
+ req_sects = min(req_sects, bio_allowed_max_sectors(q));
end_sect = sector + req_sects;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 208658a901c6..e7696c47489a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
- max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ max_discard_sectors = min(q->limits.max_discard_sectors,
+ bio_allowed_max_sectors(q));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
diff --git a/block/blk.h b/block/blk.h
index c85e53f21cdd..0089fefdf771 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -396,6 +396,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq)
}
/*
+ * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
+ * is defined as 'unsigned int', meantime it has to aligned to with logical
+ * block size which is the minimum accepted unit by hardware.
+ */
+static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
+{
+ return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
+}
+
+/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);