summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_iter.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-03-05 18:44:59 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:36 -0400
commit57b0b3db475de6b724e4db3b827c00484cdde642 (patch)
treef69b862114340543184fa7bcafffd306f0134846 /fs/bcachefs/btree_iter.c
parent7d6f9b6409ef8c587d7395ba6c5674cae878c886 (diff)
downloadlwn-57b0b3db475de6b724e4db3b827c00484cdde642.tar.gz
lwn-57b0b3db475de6b724e4db3b827c00484cdde642.zip
bcachefs: btree_iter_peek_with_updates()
Introduce a new iterator method that provides a consistent view of the btree plus uncommitted updates. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r--fs/bcachefs/btree_iter.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 6daca5afb486..347477c62779 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -5,6 +5,7 @@
#include "btree_cache.h"
#include "btree_iter.h"
#include "btree_locking.h"
+#include "btree_update.h"
#include "debug.h"
#include "extents.h"
#include "trace.h"
@@ -1497,6 +1498,88 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
return bch2_btree_iter_peek(iter);
}
+static struct bkey_s_c __btree_trans_updates_peek(struct btree_iter *iter)
+{
+ struct bpos pos = btree_iter_search_key(iter);
+ struct btree_trans *trans = iter->trans;
+ struct btree_insert_entry *i;
+
+ trans_for_each_update(trans, i)
+ if ((cmp_int(iter->btree_id, i->iter->btree_id) ?:
+ bkey_cmp(pos, i->k->k.p)) <= 0)
+ break;
+
+ return i < trans->updates + trans->nr_updates &&
+ iter->btree_id == i->iter->btree_id
+ ? bkey_i_to_s_c(i->k)
+ : bkey_s_c_null;
+}
+
+static struct bkey_s_c __bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
+{
+ struct btree_iter_level *l = &iter->l[0];
+ struct bkey_s_c k = __btree_iter_peek(iter, l);
+ struct bkey_s_c u = __btree_trans_updates_peek(iter);
+
+ if (k.k && (!u.k || bkey_cmp(k.k->p, u.k->p) < 0))
+ return k;
+ if (u.k && bkey_cmp(u.k->p, l->b->key.k.p) <= 0) {
+ iter->k = *u.k;
+ return u;
+ }
+ return bkey_s_c_null;
+}
+
+struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
+{
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+
+ while (1) {
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+
+ k = __bch2_btree_iter_peek_with_updates(iter);
+
+ if (k.k && bkey_deleted(k.k)) {
+ bch2_btree_iter_set_pos(iter,
+ btree_type_successor(iter->btree_id, iter->k.p));
+ continue;
+ }
+
+ if (likely(k.k))
+ break;
+
+ if (!btree_iter_set_pos_to_next_leaf(iter))
+ return bkey_s_c_null;
+ }
+
+ /*
+ * iter->pos should always be equal to the key we just
+ * returned - except extents can straddle iter->pos:
+ */
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
+ bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ iter->pos = bkey_start_pos(k.k);
+
+ iter->uptodate = BTREE_ITER_UPTODATE;
+ return k;
+}
+
+struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
+{
+ if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
+ return bkey_s_c_null;
+
+ bch2_btree_iter_set_pos(iter,
+ btree_type_successor(iter->btree_id, iter->k.p));
+
+ return bch2_btree_iter_peek_with_updates(iter);
+}
+
/**
* bch2_btree_iter_peek_prev: returns first key less than or equal to
* iterator's current position