summaryrefslogtreecommitdiff
path: root/fs/bcachefs/alloc_foreground.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-04-13 09:49:23 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:54 -0400
commitcb66fc5fe4cc806d60d8884cb82b67c357b49640 (patch)
tree7259c200084c9baf05d68584f2346abe957165e8 /fs/bcachefs/alloc_foreground.c
parent006d69aa2655f1a0ca4e47666939669f27bb740f (diff)
downloadlwn-cb66fc5fe4cc806d60d8884cb82b67c357b49640.tar.gz
lwn-cb66fc5fe4cc806d60d8884cb82b67c357b49640.zip
bcachefs: Fix copygc threshold
Awhile back the meaning of is_available_bucket() and thus also bch_dev_usage->buckets_unavailable changed to include buckets that are owned by the allocator - this was so that the stat could be persisted like other allocation information, and wouldn't have to be regenerated by walking each bucket at mount time. This broke copygc, which needs to consider buckets that are reclaimable and haven't yet been grabbed by the allocator thread and moved onta freelist. This patch fixes that by adding dev_buckets_reclaimable() for copygc and the allocator thread, and cleans up some of the callers a bit. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/alloc_foreground.c')
-rw-r--r--fs/bcachefs/alloc_foreground.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 97b692bcfe46..4834ac798b9e 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -109,7 +109,9 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
spin_lock(&c->freelist_lock);
ob->freelist = c->open_buckets_freelist;
c->open_buckets_freelist = ob - c->open_buckets;
+
c->open_buckets_nr_free++;
+ ca->nr_open_buckets--;
spin_unlock(&c->freelist_lock);
closure_wake_up(&c->open_buckets_wait);
@@ -316,6 +318,7 @@ out:
c->blocked_allocate = 0;
}
+ ca->nr_open_buckets++;
spin_unlock(&c->freelist_lock);
bch2_wake_allocator(ca);
@@ -351,7 +354,7 @@ void bch2_dev_stripe_increment(struct bch_dev *ca,
struct dev_stripe_state *stripe)
{
u64 *v = stripe->next_alloc + ca->dev_idx;
- u64 free_space = dev_buckets_free(ca);
+ u64 free_space = dev_buckets_available(ca);
u64 free_space_inv = free_space
? div64_u64(1ULL << 48, free_space)
: 1ULL << 48;