summaryrefslogtreecommitdiff
path: root/fs/bcachefs/io.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-11-07 15:04:13 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:31 -0400
commit70438dc3f0a2125ddaaa6ace99fc43d060b1c2e8 (patch)
tree9a309293919094ec3d186c5ead6452e0848fe986 /fs/bcachefs/io.c
parentc45376866aa1db911dfae2703ff919519757e780 (diff)
downloadlwn-70438dc3f0a2125ddaaa6ace99fc43d060b1c2e8.tar.gz
lwn-70438dc3f0a2125ddaaa6ace99fc43d060b1c2e8.zip
bcachefs: bch2_read_extent() microoptimizations
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/io.c')
-rw-r--r--fs/bcachefs/io.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index 7a2368407a80..272477fb558f 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -1272,7 +1272,6 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
closure_return_with_destructor(cl, promote_done);
}
-noinline
static struct promote_op *__promote_alloc(struct bch_fs *c,
enum btree_id btree_id,
struct bpos pos,
@@ -1346,7 +1345,8 @@ err:
return NULL;
}
-static inline struct promote_op *promote_alloc(struct bch_fs *c,
+noinline
+static struct promote_op *promote_alloc(struct bch_fs *c,
struct bvec_iter iter,
struct bkey_s_c k,
struct extent_ptr_decoded *pick,
@@ -1910,7 +1910,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
flags |= BCH_READ_MUST_BOUNCE;
- BUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
+ EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
if (pick.crc.compression_type != BCH_COMPRESSION_NONE ||
(pick.crc.csum_type != BCH_CSUM_NONE &&
@@ -1922,8 +1922,9 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
bounce = true;
}
- promote = promote_alloc(c, iter, k, &pick, orig->opts, flags,
- &rbio, &bounce, &read_full);
+ if (orig->opts.promote_target)
+ promote = promote_alloc(c, iter, k, &pick, orig->opts, flags,
+ &rbio, &bounce, &read_full);
if (!read_full) {
EBUG_ON(pick.crc.compression_type);
@@ -1951,7 +1952,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
* data in the write path, but we're not going to use it all
* here:
*/
- BUG_ON(rbio->bio.bi_iter.bi_size <
+ EBUG_ON(rbio->bio.bi_iter.bi_size <
pick.crc.compressed_size << 9);
rbio->bio.bi_iter.bi_size =
pick.crc.compressed_size << 9;
@@ -1986,10 +1987,10 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
noclone:
rbio = orig;
rbio->bio.bi_iter = iter;
- BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
+ EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
}
- BUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
+ EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
rbio->c = c;
rbio->submit_time = local_clock();
@@ -2005,6 +2006,7 @@ noclone:
rbio->hole = 0;
rbio->retry = 0;
rbio->context = 0;
+ /* XXX: only initialize this if needed */
rbio->devs_have = bch2_bkey_devs(k);
rbio->pick = pick;
rbio->pos = pos;
@@ -2021,11 +2023,11 @@ noclone:
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
- percpu_down_read(&c->mark_lock);
+ rcu_read_lock();
bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
- percpu_up_read(&c->mark_lock);
+ rcu_read_unlock();
- if (likely(!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT)))) {
+ if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
bio_inc_remaining(&orig->bio);
trace_read_split(&orig->bio);
}