summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-12-30 19:30:42 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:19 -0400
commitc64740ef27cfe2092e3a56509b3bf44e9b10ae49 (patch)
tree813348a007f94ed35d9dd1161b76c3452bc7ede9 /fs
parent77170d0dd7020ed72cd748a0c354bf0c0345b6b3 (diff)
downloadlwn-c64740ef27cfe2092e3a56509b3bf44e9b10ae49.tar.gz
lwn-c64740ef27cfe2092e3a56509b3bf44e9b10ae49.zip
bcachefs: Don't start allocator threads too early
If the allocator threads start before journal replay has finished replaying alloc keys, journal replay might overwrite the allocator's btree updates. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/alloc_background.c3
-rw-r--r--fs/bcachefs/bcachefs.h1
-rw-r--r--fs/bcachefs/recovery.c9
3 files changed, 11 insertions, 2 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index e81e05629ffc..fe7bc3cdee30 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -900,7 +900,8 @@ static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
static bool allocator_thread_running(struct bch_dev *ca)
{
unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
- test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
+ test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags) &&
+ test_bit(BCH_FS_ALLOC_REPLAY_DONE, &ca->fs->flags)
? ALLOCATOR_running
: ALLOCATOR_stopped;
alloc_thread_set_state(ca, state);
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index c8c7f6b8ee21..5f18531dc34c 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -509,6 +509,7 @@ enum {
BCH_FS_INITIAL_GC_DONE,
BCH_FS_INITIAL_GC_UNFIXED,
BCH_FS_TOPOLOGY_REPAIR_DONE,
+ BCH_FS_ALLOC_REPLAY_DONE,
BCH_FS_BTREE_INTERIOR_REPLAY_DONE,
BCH_FS_FSCK_DONE,
BCH_FS_STARTED,
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 9916fad292be..d0ceac0f2b39 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -567,9 +567,10 @@ static int bch2_journal_replay(struct bch_fs *c,
struct journal_keys keys)
{
struct journal *j = &c->journal;
+ struct bch_dev *ca;
struct journal_key *i;
u64 seq;
- int ret;
+ int ret, idx;
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
@@ -593,6 +594,11 @@ static int bch2_journal_replay(struct bch_fs *c,
}
}
+ /* Now we can start the allocator threads: */
+ set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
+ for_each_member_device(ca, c, idx)
+ bch2_wake_allocator(ca);
+
/*
* Next replay updates to interior btree nodes:
*/
@@ -1391,6 +1397,7 @@ int bch2_fs_initialize(struct bch_fs *c)
for (i = 0; i < BTREE_ID_NR; i++)
bch2_btree_root_alloc(c, i);
+ set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);