summaryrefslogtreecommitdiff
path: root/fs/bcachefs/recovery.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-09-12 02:22:47 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:40 -0400
commit1ffb876fb0f31632b761ee721f633e0d7491ca7b (patch)
tree29d9639963207be239bef63aa9cec2a069b312ef /fs/bcachefs/recovery.c
parente87b0e4a7120eeca1850666351b75bf8ceb9d5c9 (diff)
downloadlwn-1ffb876fb0f31632b761ee721f633e0d7491ca7b.tar.gz
lwn-1ffb876fb0f31632b761ee721f633e0d7491ca7b.zip
bcachefs: Kill journal_keys->journal_seq_base
This removes an optimization that didn't actually save us any memory, due to alignment, but did make the code more complicated than it needed to be. We were also seeing a bug where journal_seq_base wasn't getting correctly initailized, so hopefully it'll fix that too. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/recovery.c')
-rw-r--r--fs/bcachefs/recovery.c14
1 files changed, 2 insertions, 12 deletions
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 2cf347530b65..ea8cc636a9e0 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -222,7 +222,6 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
struct journal_keys new_keys = {
.nr = keys->nr,
.size = max_t(size_t, keys->size, 8) * 2,
- .journal_seq_base = keys->journal_seq_base,
};
new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
@@ -493,9 +492,6 @@ static int journal_keys_sort(struct bch_fs *c)
if (!i || i->ignore)
continue;
- if (!keys->journal_seq_base)
- keys->journal_seq_base = le64_to_cpu(i->j.seq);
-
for_each_jset_key(k, _n, entry, &i->j)
nr_keys++;
}
@@ -515,15 +511,12 @@ static int journal_keys_sort(struct bch_fs *c)
if (!i || i->ignore)
continue;
- BUG_ON(le64_to_cpu(i->j.seq) - keys->journal_seq_base > U32_MAX);
-
for_each_jset_key(k, _n, entry, &i->j)
keys->d[keys->nr++] = (struct journal_key) {
.btree_id = entry->btree_id,
.level = entry->level,
.k = k,
- .journal_seq = le64_to_cpu(i->j.seq) -
- keys->journal_seq_base,
+ .journal_seq = le64_to_cpu(i->j.seq),
.journal_offset = k->_data - i->j._data,
};
}
@@ -617,15 +610,12 @@ static int bch2_journal_replay(struct bch_fs *c)
sizeof(keys_sorted[0]),
journal_sort_seq_cmp, NULL);
- if (keys->nr)
- replay_now_at(j, keys->journal_seq_base);
-
for (i = 0; i < keys->nr; i++) {
k = keys_sorted[i];
cond_resched();
- replay_now_at(j, keys->journal_seq_base + k->journal_seq);
+ replay_now_at(j, k->journal_seq);
ret = bch2_trans_do(c, NULL, NULL,
BTREE_INSERT_LAZY_RW|