diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-10-21 14:01:19 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:09:44 -0400 |
commit | ed80c5699a23c4005ba8e81d4b8fb3e1b922fa40 (patch) | |
tree | 3c2dd5e984480c39efbaa6ea19011bacbeec10d1 | |
parent | 2d485df3da368193dafc78be933669d427b7ddf7 (diff) | |
download | lwn-ed80c5699a23c4005ba8e81d4b8fb3e1b922fa40.tar.gz lwn-ed80c5699a23c4005ba8e81d4b8fb3e1b922fa40.zip |
bcachefs: Optimize bch2_dev_usage_read()
- add bch2_dev_usage_read_fast(), which doesn't return by value -
bch_dev_usage is big enough that we don't want the silent memcpy
- tweak the allocation path to only call bch2_dev_usage_read() once per
bucket allocated
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/alloc_foreground.c | 40 | ||||
-rw-r--r-- | fs/bcachefs/buckets.c | 9 | ||||
-rw-r--r-- | fs/bcachefs/buckets.h | 10 |
3 files changed, 38 insertions, 21 deletions
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index ab288176695e..5d7231979024 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -495,25 +495,25 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, struct bch_dev *ca, enum alloc_reserve reserve, bool may_alloc_partial, - struct closure *cl) + struct closure *cl, + struct bch_dev_usage *usage) { struct bch_fs *c = trans->c; struct open_bucket *ob = NULL; - struct bch_dev_usage usage; u64 avail; struct bucket_alloc_state s = { 0 }; bool waiting = false; again: - usage = bch2_dev_usage_read(ca); - avail = dev_buckets_free(ca, usage, reserve); + bch2_dev_usage_read_fast(ca, usage); + avail = dev_buckets_free(ca, *usage, reserve); - if (usage.d[BCH_DATA_need_discard].buckets > avail) + if (usage->d[BCH_DATA_need_discard].buckets > avail) bch2_do_discards(c); - if (usage.d[BCH_DATA_need_gc_gens].buckets > avail) + if (usage->d[BCH_DATA_need_gc_gens].buckets > avail) bch2_do_gc_gens(c); - if (should_invalidate_buckets(ca, usage)) + if (should_invalidate_buckets(ca, *usage)) bch2_do_invalidates(c); if (!avail) { @@ -554,7 +554,7 @@ err: bch2_alloc_reserves[reserve], may_alloc_partial, ob->bucket, - usage.d[BCH_DATA_free].buckets, + usage->d[BCH_DATA_free].buckets, avail, bch2_copygc_wait_amount(c), c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), @@ -566,7 +566,7 @@ err: bch2_alloc_reserves[reserve], may_alloc_partial, 0, - usage.d[BCH_DATA_free].buckets, + usage->d[BCH_DATA_free].buckets, avail, bch2_copygc_wait_amount(c), c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), @@ -582,11 +582,12 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, bool may_alloc_partial, struct closure *cl) { + struct bch_dev_usage usage; struct open_bucket *ob; bch2_trans_do(c, NULL, NULL, 0, PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve, - may_alloc_partial, cl))); + may_alloc_partial, cl, &usage))); return ob; } @@ -613,8 +614,9 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c, return ret; } -void bch2_dev_stripe_increment(struct bch_dev *ca, - struct dev_stripe_state *stripe) +static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca, + struct dev_stripe_state *stripe, + struct bch_dev_usage *usage) { u64 *v = stripe->next_alloc + ca->dev_idx; u64 free_space = dev_buckets_available(ca, RESERVE_none); @@ -633,6 +635,15 @@ void bch2_dev_stripe_increment(struct bch_dev *ca, *v = *v < scale ? 0 : *v - scale; } +void bch2_dev_stripe_increment(struct bch_dev *ca, + struct dev_stripe_state *stripe) +{ + struct bch_dev_usage usage; + + bch2_dev_usage_read_fast(ca, &usage); + bch2_dev_stripe_increment_inlined(ca, stripe, &usage); +} + #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0) #define BUCKET_ALLOC_USE_DURABILITY (1 << 1) @@ -677,6 +688,7 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans, BUG_ON(*nr_effective >= nr_replicas); for (i = 0; i < devs_sorted.nr; i++) { + struct bch_dev_usage usage; struct open_bucket *ob; dev = devs_sorted.devs[i]; @@ -696,9 +708,9 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans, } ob = bch2_bucket_alloc_trans(trans, ca, reserve, - flags & BUCKET_MAY_ALLOC_PARTIAL, cl); + flags & BUCKET_MAY_ALLOC_PARTIAL, cl, &usage); if (!IS_ERR(ob)) - bch2_dev_stripe_increment(ca, stripe); + bch2_dev_stripe_increment_inlined(ca, stripe, &usage); percpu_ref_put(&ca->ref); if (IS_ERR(ob)) { diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 5cb4a00166f9..1a1790ac01ae 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -88,20 +88,17 @@ static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca, : ca->usage[journal_seq & JOURNAL_BUF_MASK]); } -struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca) +void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage) { struct bch_fs *c = ca->fs; - struct bch_dev_usage ret; unsigned seq, i, u64s = dev_usage_u64s(); do { seq = read_seqcount_begin(&c->usage_lock); - memcpy(&ret, ca->usage_base, u64s * sizeof(u64)); + memcpy(usage, ca->usage_base, u64s * sizeof(u64)); for (i = 0; i < ARRAY_SIZE(ca->usage); i++) - acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s); + acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s); } while (read_seqcount_retry(&c->usage_lock, seq)); - - return ret; } static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c, diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index b4cf10a47c52..a43622193355 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -120,7 +120,15 @@ static inline u8 ptr_stale(struct bch_dev *ca, /* Device usage: */ -struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *); +void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *); +static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca) +{ + struct bch_dev_usage ret; + + bch2_dev_usage_read_fast(ca, &ret); + return ret; +} + void bch2_dev_usage_init(struct bch_dev *); static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reserve reserve) |