diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2020-05-06 15:37:04 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:08:39 -0400 |
commit | c4dd7871ef71a7327b6b3834e2cdf6777e03eb0a (patch) | |
tree | 83cb54e1ddcff8451bafabe6c72d584e8a3570d0 /fs/bcachefs/compress.c | |
parent | bc970cecd86d5a77f58e3be017fd80276e71677e (diff) | |
download | lwn-c4dd7871ef71a7327b6b3834e2cdf6777e03eb0a.tar.gz lwn-c4dd7871ef71a7327b6b3834e2cdf6777e03eb0a.zip |
bcachefs: Some compression improvements
In __bio_map_or_bounce(), the check for if the bio is physically
contiguous is improved; it's now more readable and handles multi page
but contiguous bios.
Also when decompressing, we were doing a redundant memcpy in the case
where we were able to use vmap to map a bio contigiously.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/compress.c')
-rw-r--r-- | fs/bcachefs/compress.c | 53 |
1 files changed, 37 insertions, 16 deletions
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c index 89eb03a988f1..41d0b49d354f 100644 --- a/fs/bcachefs/compress.c +++ b/fs/bcachefs/compress.c @@ -39,6 +39,24 @@ static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw) BUG(); } +static bool bio_phys_contig(struct bio *bio, struct bvec_iter start) +{ + struct bio_vec bv; + struct bvec_iter iter; + void *expected_start = NULL; + + __bio_for_each_bvec(bv, bio, iter, start) { + if (expected_start && + expected_start != page_address(bv.bv_page) + bv.bv_offset) + return false; + + expected_start = page_address(bv.bv_page) + + bv.bv_offset + bv.bv_len; + } + + return true; +} + static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio, struct bvec_iter start, int rw) { @@ -48,27 +66,28 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio, unsigned nr_pages = 0; struct page *stack_pages[16]; struct page **pages = NULL; - bool first = true; - unsigned prev_end = PAGE_SIZE; void *data; BUG_ON(bvec_iter_sectors(start) > c->sb.encoded_extent_max); -#ifndef CONFIG_HIGHMEM - __bio_for_each_bvec(bv, bio, iter, start) { - if (bv.bv_len == start.bi_size) - return (struct bbuf) { - .b = page_address(bv.bv_page) + bv.bv_offset, - .type = BB_NONE, .rw = rw - }; - } -#endif + if (!IS_ENABLED(CONFIG_HIGHMEM) && + bio_phys_contig(bio, start)) + return (struct bbuf) { + .b = page_address(bio_iter_page(bio, start)) + + bio_iter_offset(bio, start), + .type = BB_NONE, .rw = rw + }; + + /* check if we can map the pages contiguously: */ __bio_for_each_segment(bv, bio, iter, start) { - if ((!first && bv.bv_offset) || - prev_end != PAGE_SIZE) + if (iter.bi_size != start.bi_size && + bv.bv_offset) + goto bounce; + + if (bv.bv_len < iter.bi_size && + bv.bv_offset + bv.bv_len < PAGE_SIZE) goto bounce; - prev_end = bv.bv_offset + bv.bv_len; nr_pages++; } @@ -264,7 +283,8 @@ int bch2_bio_uncompress(struct bch_fs *c, struct bio *src, if (ret) goto err; - if (dst_data.type != BB_NONE) + if (dst_data.type != BB_NONE && + dst_data.type != BB_VMAP) memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9)); err: bio_unmap_or_unbounce(c, dst_data); @@ -407,7 +427,8 @@ static unsigned __bio_compress(struct bch_fs *c, memset(dst_data.b + *dst_len, 0, pad); *dst_len += pad; - if (dst_data.type != BB_NONE) + if (dst_data.type != BB_NONE && + dst_data.type != BB_VMAP) memcpy_to_bio(dst, dst->bi_iter, dst_data.b); BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size); |