summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-01-11 16:11:02 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:51 -0400
commitedfbba58e3e7a94900d24d266e6365b1ab531e3b (patch)
treee523610cc02bc6a6b17c13155f920155d3e40a3b
parent2a3731e34de9365038b25d76bb6e11cf5c40ac36 (diff)
downloadlwn-edfbba58e3e7a94900d24d266e6365b1ab531e3b.tar.gz
lwn-edfbba58e3e7a94900d24d266e6365b1ab531e3b.zip
bcachefs: Add btree node prefetching to bch2_btree_and_journal_walk()
bch2_btree_and_journal_walk() walks the btree overlaying keys from the journal; it was introduced so that we could read in the alloc btree prior to journal replay being done, when journalling of updates to interior btree nodes was introduced. But it didn't have btree node prefetching, which introduced a severe regression with mount times, particularly on spinning rust. This patch implements btree node prefetching for the btree + journal walk, hopefully fixing that. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/btree_cache.c8
-rw-r--r--fs/bcachefs/btree_cache.h2
-rw-r--r--fs/bcachefs/btree_iter.c3
-rw-r--r--fs/bcachefs/recovery.c37
4 files changed, 38 insertions, 12 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 904440f26d40..4b29be7234c7 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -1008,20 +1008,20 @@ out:
}
void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
- const struct bkey_i *k, unsigned level)
+ const struct bkey_i *k,
+ enum btree_id btree_id, unsigned level)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- BUG_ON(!btree_node_locked(iter, level + 1));
+ BUG_ON(iter && !btree_node_locked(iter, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH);
b = btree_cache_find(bc, k);
if (b)
return;
- bch2_btree_node_fill(c, iter, k, iter->btree_id,
- level, SIX_LOCK_read, false);
+ bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
}
void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index e766ef552ce7..0eeca0bcc48e 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -32,7 +32,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *, struct btree_iter *,
struct btree *, enum btree_node_sibling);
void bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
- const struct bkey_i *, unsigned);
+ const struct bkey_i *, enum btree_id, unsigned);
void bch2_fs_btree_cache_exit(struct bch_fs *);
int bch2_fs_btree_cache_init(struct bch_fs *);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 47d833f5ad56..196f346f0544 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1067,7 +1067,8 @@ static void btree_iter_prefetch(struct btree_iter *iter)
break;
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
- bch2_btree_node_prefetch(c, iter, tmp.k, iter->level - 1);
+ bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
+ iter->level - 1);
}
if (!was_locked)
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 5a43682c26ef..c700b12b2ac0 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -206,6 +206,31 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *i
/* Walk btree, overlaying keys from the journal: */
+static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
+ struct btree_and_journal_iter iter)
+{
+ unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
+ struct bkey_s_c k;
+ struct bkey_buf tmp;
+
+ BUG_ON(!b->c.level);
+
+ bch2_bkey_buf_init(&tmp);
+
+ while (i < nr &&
+ (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ bch2_bkey_buf_reassemble(&tmp, c, k);
+
+ bch2_btree_node_prefetch(c, NULL, tmp.k,
+ b->c.btree_id, b->c.level - 1);
+
+ bch2_btree_and_journal_iter_advance(&iter);
+ i++;
+ }
+
+ bch2_bkey_buf_exit(&tmp, c);
+}
+
static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
struct journal_keys *journal_keys,
enum btree_id btree_id,
@@ -214,8 +239,11 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b
{
struct btree_and_journal_iter iter;
struct bkey_s_c k;
+ struct bkey_buf tmp;
+ struct btree *child;
int ret = 0;
+ bch2_bkey_buf_init(&tmp);
bch2_btree_and_journal_iter_init_node_iter(&iter, journal_keys, b);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
@@ -224,23 +252,19 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b
break;
if (b->c.level) {
- struct btree *child;
- struct bkey_buf tmp;
-
- bch2_bkey_buf_init(&tmp);
bch2_bkey_buf_reassemble(&tmp, c, k);
- k = bkey_i_to_s_c(tmp.k);
bch2_btree_and_journal_iter_advance(&iter);
child = bch2_btree_node_get_noiter(c, tmp.k,
b->c.btree_id, b->c.level - 1);
- bch2_bkey_buf_exit(&tmp, c);
ret = PTR_ERR_OR_ZERO(child);
if (ret)
break;
+ btree_and_journal_iter_prefetch(c, b, iter);
+
ret = (node_fn ? node_fn(c, b) : 0) ?:
bch2_btree_and_journal_walk_recurse(c, child,
journal_keys, btree_id, node_fn, key_fn);
@@ -253,6 +277,7 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b
}
}
+ bch2_bkey_buf_exit(&tmp, c);
return ret;
}