summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-07-06 09:52:18 +0200
committerJens Axboe <axboe@kernel.dk>2024-07-08 01:51:05 -0600
commit09595e0c9d654743483197b2f21dd4ec37c90a27 (patch)
treece3a94528720e56ff6c8d336b3b4881a709000b5 /block
parent25f76c3db2f08428b5acd082a52787164001eb6e (diff)
downloadlwn-09595e0c9d654743483197b2f21dd4ec37c90a27.tar.gz
lwn-09595e0c9d654743483197b2f21dd4ec37c90a27.zip
block: pass a phys_addr_t to get_max_segment_size
Work on a single address to simplify the logic, and prepare the callers from using better helpers. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Link: https://lore.kernel.org/r/20240706075228.2350978-3-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c25
1 files changed, 11 insertions, 14 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index cff20bcc0252..e41ea3318099 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -209,23 +209,22 @@ static inline unsigned get_max_io_size(struct bio *bio,
/**
* get_max_segment_size() - maximum number of bytes to add as a single segment
* @lim: Request queue limits.
- * @start_page: See below.
- * @offset: Offset from @start_page where to add a segment.
+ * @paddr: address of the range to add
+ * @max_len: maximum length available to add at @paddr
*
- * Returns the maximum number of bytes that can be added as a single segment.
+ * Returns the maximum number of bytes of the range starting at @paddr that can
+ * be added to a single segment.
*/
static inline unsigned get_max_segment_size(const struct queue_limits *lim,
- struct page *start_page, unsigned long offset)
+ phys_addr_t paddr, unsigned int len)
{
- unsigned long mask = lim->seg_boundary_mask;
-
- offset = mask & (page_to_phys(start_page) + offset);
-
/*
* Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
* after having calculated the minimum.
*/
- return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
+ return min_t(unsigned long, len,
+ min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
+ (unsigned long)lim->max_segment_size - 1) + 1);
}
/**
@@ -258,9 +257,7 @@ static bool bvec_split_segs(const struct queue_limits *lim,
unsigned seg_size = 0;
while (len && *nsegs < max_segs) {
- seg_size = get_max_segment_size(lim, bv->bv_page,
- bv->bv_offset + total_len);
- seg_size = min(seg_size, len);
+ seg_size = get_max_segment_size(lim, bvec_phys(bv) + total_len, len);
(*nsegs)++;
total_len += seg_size;
@@ -494,8 +491,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
- unsigned len = min(get_max_segment_size(&q->limits,
- bvec->bv_page, offset), nbytes);
+ unsigned len = get_max_segment_size(&q->limits, bvec_phys(bvec),
+ nbytes);
struct page *page = bvec->bv_page;
/*