summaryrefslogtreecommitdiff
path: root/fs/bcachefs/buckets.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-06-03 23:46:15 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:40 -0400
commit255adc515aeab4bd870e548bb4154c2682871c05 (patch)
tree438a54fcc1637dadaa70802b8cbfdc4bcc7c0027 /fs/bcachefs/buckets.c
parenta27443bc7652a37db1ac99f2c77b20ac15947cc5 (diff)
downloadlwn-255adc515aeab4bd870e548bb4154c2682871c05.tar.gz
lwn-255adc515aeab4bd870e548bb4154c2682871c05.zip
bcachefs: Always increment bucket gen on bucket reuse
Not doing so confuses copygc Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/buckets.c')
-rw-r--r--fs/bcachefs/buckets.c30
1 files changed, 19 insertions, 11 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 4074bc073cfe..08e8b578fff5 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -1496,6 +1496,8 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans,
struct bkey_s_c k_a;
struct bkey_alloc_unpacked u;
struct bkey_i_alloc *a;
+ struct bucket *g;
+ struct bucket_mark m;
int ret;
ret = trans_get_key(trans, BTREE_ID_ALLOC,
@@ -1504,26 +1506,32 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans,
if (ret < 0)
return ret;
- if (k_a.k->type != KEY_TYPE_alloc ||
- (!ret && unlikely(!test_bit(BCH_FS_ALLOC_WRITTEN, &c->flags)))) {
+ percpu_down_read(&c->mark_lock);
+ g = bucket(ca, iter->pos.offset);
+ m = READ_ONCE(g->mark);
+
+ if (unlikely(!test_bit(BCH_FS_ALLOC_WRITTEN, &c->flags) && !ret)) {
/*
* During journal replay, and if gc repairs alloc info at
* runtime, the alloc info in the btree might not be up to date
* yet - so, trust the in memory mark - unless we're already
* updating that key:
*/
- struct bucket *g;
- struct bucket_mark m;
-
- percpu_down_read(&c->mark_lock);
- g = bucket(ca, iter->pos.offset);
- m = READ_ONCE(g->mark);
- u = alloc_mem_to_key(g, m);
- percpu_up_read(&c->mark_lock);
+ u = alloc_mem_to_key(g, m);
} else {
- u = bch2_alloc_unpack(k_a);
+ u = bch2_alloc_unpack(k_a);
+ u.read_time = g->io_time[READ];
+ u.write_time = g->io_time[WRITE];
}
+ percpu_up_read(&c->mark_lock);
+
+ /*
+ * Incrementing the bucket gen can be done lazily:
+ */
+ if (gen_after(m.gen, u.gen) && !u.data_type)
+ u.gen = m.gen;
+
ret = __mark_pointer(c, k, p, sectors, data_type, u.gen, &u.data_type,
&u.dirty_sectors, &u.cached_sectors);
if (ret)