diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2019-07-09 12:56:43 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:08:24 -0400 |
commit | 06ed855862853dcdd365a12f7cf182961bec61de (patch) | |
tree | 12ff39c492ae211a461f86180efb57ab638c1ad9 /fs/bcachefs | |
parent | a4461c8a7fc33aa663b0b1b2b7144d5890b6887f (diff) | |
download | lwn-06ed855862853dcdd365a12f7cf182961bec61de.tar.gz lwn-06ed855862853dcdd365a12f7cf182961bec61de.zip |
bcachefs: Add offset_into_extent param to bch2_read_extent()
With reflink, we'll no longer be able to calculate the offset of the
data we want into the extent we're reading from from the extent pos and
the iter pos - we'll have to pass it in separately.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r-- | fs/bcachefs/fs-io.c | 31 | ||||
-rw-r--r-- | fs/bcachefs/io.c | 41 | ||||
-rw-r--r-- | fs/bcachefs/io.h | 14 | ||||
-rw-r--r-- | fs/bcachefs/io_types.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/move.c | 2 |
5 files changed, 54 insertions, 36 deletions
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 8858352eb42a..def470b5b959 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -1005,7 +1005,6 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter, struct readpages_iter *readpages_iter) { struct bch_fs *c = trans->c; - struct bio *bio = &rbio->bio; int flags = BCH_READ_RETRY_IF_STALE| BCH_READ_MAY_PROMOTE; @@ -1015,9 +1014,10 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter, while (1) { BKEY_PADDED(k) tmp; struct bkey_s_c k; - unsigned bytes; + unsigned bytes, offset_into_extent; - bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector)); + bch2_btree_iter_set_pos(iter, + POS(inum, rbio->bio.bi_iter.bi_sector)); k = bch2_btree_iter_peek_slot(iter); BUG_ON(!k.k); @@ -1025,8 +1025,8 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter, if (IS_ERR(k.k)) { int ret = btree_iter_err(iter); BUG_ON(!ret); - bcache_io_error(c, bio, "btree IO error %i", ret); - bio_endio(bio); + bcache_io_error(c, &rbio->bio, "btree IO error %i", ret); + bio_endio(&rbio->bio); return; } @@ -1034,6 +1034,9 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter, bch2_trans_unlock(trans); k = bkey_i_to_s_c(&tmp.k); + offset_into_extent = iter->pos.offset - + bkey_start_offset(k.k); + if (readpages_iter) { bool want_full_extent = false; @@ -1048,27 +1051,27 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter, } readpage_bio_extend(readpages_iter, - bio, k.k->p.offset, + &rbio->bio, k.k->p.offset, want_full_extent); } - bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) - - bio->bi_iter.bi_sector) << 9; - swap(bio->bi_iter.bi_size, bytes); + bytes = min_t(unsigned, bio_sectors(&rbio->bio), + (k.k->size - offset_into_extent)) << 9; + swap(rbio->bio.bi_iter.bi_size, bytes); - if (bytes == bio->bi_iter.bi_size) + if (rbio->bio.bi_iter.bi_size == bytes) flags |= BCH_READ_LAST_FRAGMENT; if (bkey_extent_is_allocation(k.k)) - bch2_add_page_sectors(bio, k); + bch2_add_page_sectors(&rbio->bio, k); - bch2_read_extent(c, rbio, k, flags); + bch2_read_extent(c, rbio, k, offset_into_extent, flags); if (flags & BCH_READ_LAST_FRAGMENT) return; - swap(bio->bi_iter.bi_size, bytes); - bio_advance(bio, bytes); + swap(rbio->bio.bi_iter.bi_size, bytes); + bio_advance(&rbio->bio, bytes); } } diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index 42071d0028ad..8f16e252d2f1 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -1240,7 +1240,7 @@ retry: goto out; } - ret = __bch2_read_extent(c, rbio, bvec_iter, k, failed, flags); + ret = __bch2_read_extent(c, rbio, bvec_iter, k, 0, failed, flags); if (ret == READ_RETRY) goto retry; if (ret) @@ -1272,17 +1272,22 @@ retry: POS(inode, bvec_iter.bi_sector), BTREE_ITER_SLOTS, k, ret) { BKEY_PADDED(k) tmp; - unsigned bytes; + unsigned bytes, offset_into_extent; bkey_reassemble(&tmp.k, k); k = bkey_i_to_s_c(&tmp.k); + bch2_trans_unlock(&trans); - bytes = min_t(unsigned, bvec_iter.bi_size, - (k.k->p.offset - bvec_iter.bi_sector) << 9); + offset_into_extent = iter->pos.offset - + bkey_start_offset(k.k); + + bytes = min_t(unsigned, bvec_iter_sectors(bvec_iter), + (k.k->size - offset_into_extent)) << 9; swap(bvec_iter.bi_size, bytes); - ret = __bch2_read_extent(c, rbio, bvec_iter, k, failed, flags); + ret = __bch2_read_extent(c, rbio, bvec_iter, k, + offset_into_extent, failed, flags); switch (ret) { case READ_RETRY: goto retry; @@ -1463,7 +1468,7 @@ static void __bch2_read_endio(struct work_struct *work) goto nodecode; /* Adjust crc to point to subset of data we want: */ - crc.offset += rbio->bvec_iter.bi_sector - rbio->pos.offset; + crc.offset += rbio->offset_into_extent; crc.live_size = bvec_iter_sectors(rbio->bvec_iter); if (crc.compression_type != BCH_COMPRESSION_NONE) { @@ -1574,6 +1579,7 @@ static void bch2_read_endio(struct bio *bio) int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig, struct bvec_iter iter, struct bkey_s_c k, + unsigned offset_into_extent, struct bch_io_failures *failed, unsigned flags) { struct extent_ptr_decoded pick; @@ -1606,7 +1612,6 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig, if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS) goto hole; - iter.bi_sector = pos.offset; iter.bi_size = pick.crc.compressed_size << 9; goto noclone; } @@ -1620,8 +1625,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig, if (narrow_crcs && (flags & BCH_READ_USER_MAPPED)) flags |= BCH_READ_MUST_BOUNCE; - EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector || - k.k->p.offset < bvec_iter_end_sector(iter)); + BUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size); if (pick.crc.compression_type != BCH_COMPRESSION_NONE || (pick.crc.csum_type != BCH_CSUM_NONE && @@ -1642,15 +1646,16 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig, (bvec_iter_sectors(iter) != pick.crc.uncompressed_size || bvec_iter_sectors(iter) != pick.crc.live_size || pick.crc.offset || - iter.bi_sector != pos.offset)); + offset_into_extent)); + pos.offset += offset_into_extent; pick.ptr.offset += pick.crc.offset + - (iter.bi_sector - pos.offset); + offset_into_extent; pick.crc.compressed_size = bvec_iter_sectors(iter); pick.crc.uncompressed_size = bvec_iter_sectors(iter); pick.crc.offset = 0; pick.crc.live_size = bvec_iter_sectors(iter); - pos.offset = iter.bi_sector; + offset_into_extent = 0; } if (rbio) { @@ -1707,6 +1712,7 @@ noclone: else rbio->end_io = orig->bio.bi_end_io; rbio->bvec_iter = iter; + rbio->offset_into_extent= offset_into_extent; rbio->flags = flags; rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ); rbio->narrow_crcs = narrow_crcs; @@ -1834,7 +1840,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode) POS(inode, rbio->bio.bi_iter.bi_sector), BTREE_ITER_SLOTS, k, ret) { BKEY_PADDED(k) tmp; - unsigned bytes; + unsigned bytes, offset_into_extent; /* * Unlock the iterator while the btree node's lock is still in @@ -1844,14 +1850,17 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode) k = bkey_i_to_s_c(&tmp.k); bch2_trans_unlock(&trans); - bytes = min_t(unsigned, rbio->bio.bi_iter.bi_size, - (k.k->p.offset - rbio->bio.bi_iter.bi_sector) << 9); + offset_into_extent = iter->pos.offset - + bkey_start_offset(k.k); + + bytes = min_t(unsigned, bio_sectors(&rbio->bio), + (k.k->size - offset_into_extent)) << 9; swap(rbio->bio.bi_iter.bi_size, bytes); if (rbio->bio.bi_iter.bi_size == bytes) flags |= BCH_READ_LAST_FRAGMENT; - bch2_read_extent(c, rbio, k, flags); + bch2_read_extent(c, rbio, k, offset_into_extent, flags); if (flags & BCH_READ_LAST_FRAGMENT) return; diff --git a/fs/bcachefs/io.h b/fs/bcachefs/io.h index 61c8b8b3a459..aa437cb05fe7 100644 --- a/fs/bcachefs/io.h +++ b/fs/bcachefs/io.h @@ -99,10 +99,6 @@ struct bch_devs_mask; struct cache_promote_op; struct extent_ptr_decoded; -int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *, struct bvec_iter, - struct bkey_s_c, struct bch_io_failures *, unsigned); -void bch2_read(struct bch_fs *, struct bch_read_bio *, u64); - enum bch_read_flags { BCH_READ_RETRY_IF_STALE = 1 << 0, BCH_READ_MAY_PROMOTE = 1 << 1, @@ -116,14 +112,22 @@ enum bch_read_flags { BCH_READ_IN_RETRY = 1 << 7, }; +int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *, + struct bvec_iter, struct bkey_s_c, unsigned, + struct bch_io_failures *, unsigned); + static inline void bch2_read_extent(struct bch_fs *c, struct bch_read_bio *rbio, struct bkey_s_c k, + unsigned offset_into_extent, unsigned flags) { - __bch2_read_extent(c, rbio, rbio->bio.bi_iter, k, NULL, flags); + __bch2_read_extent(c, rbio, rbio->bio.bi_iter, k, + offset_into_extent, NULL, flags); } +void bch2_read(struct bch_fs *, struct bch_read_bio *, u64); + static inline struct bch_read_bio *rbio_init(struct bio *bio, struct bch_io_opts opts) { diff --git a/fs/bcachefs/io_types.h b/fs/bcachefs/io_types.h index c697191172b0..50f2a5e57960 100644 --- a/fs/bcachefs/io_types.h +++ b/fs/bcachefs/io_types.h @@ -38,6 +38,8 @@ struct bch_read_bio { */ struct bvec_iter bvec_iter; + unsigned offset_into_extent; + u16 flags; union { struct { diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 96f9f5950438..27835e4f13fd 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -461,7 +461,7 @@ static int bch2_move_extent(struct bch_fs *c, * ctxt when doing wakeup */ closure_get(&ctxt->cl); - bch2_read_extent(c, &io->rbio, e.s_c, + bch2_read_extent(c, &io->rbio, e.s_c, 0, BCH_READ_NODECODE| BCH_READ_LAST_FRAGMENT); return 0; |