summaryrefslogtreecommitdiff
path: root/fs/bcachefs/buckets.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-06-02 00:18:34 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:06 -0400
commit8e6bbc4181c9eb1bc8dcb0a96522447c6b6ad76e (patch)
tree272d302979217f8a7bd6cab484f2b3def734f2cf /fs/bcachefs/buckets.c
parentb1d87f527d7e6eb89395d4a0218b7e4e3974ff1b (diff)
downloadlwn-8e6bbc4181c9eb1bc8dcb0a96522447c6b6ad76e.tar.gz
lwn-8e6bbc4181c9eb1bc8dcb0a96522447c6b6ad76e.zip
bcachefs: Move extent_handle_overwrites() to bch2_trans_update()
This lifts handling of overlapping extents out of __bch2_trans_commit() and moves it to where we first do the update - which means that BTREE_ITER_WITH_UPDATES can now work correctly in extents mode. Also, this patch reworks how extent triggers work: previously, on partial extent overwrite we would pass this information to the trigger, telling it what part of the extent was being overwritten. But, this approach has had too many subtle corner cases - now, we only mark whole extents, meaning on partial extent overwrite we unmark the old extent and mark the new extent. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/buckets.c')
-rw-r--r--fs/bcachefs/buckets.c145
1 files changed, 35 insertions, 110 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index db8c3b7f5fa1..3c5c73f97b8c 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -1519,29 +1519,6 @@ static struct btree_iter *trans_get_update(struct btree_trans *trans,
return NULL;
}
-static int trans_get_key(struct btree_trans *trans,
- enum btree_id btree_id, struct bpos pos,
- struct btree_iter **iter,
- struct bkey_s_c *k)
-{
- unsigned flags = btree_id != BTREE_ID_alloc
- ? BTREE_ITER_SLOTS
- : BTREE_ITER_CACHED;
- int ret;
-
- *iter = trans_get_update(trans, btree_id, pos, k);
- if (*iter)
- return 1;
-
- *iter = bch2_trans_get_iter(trans, btree_id, pos,
- flags|BTREE_ITER_INTENT);
- *k = __bch2_btree_iter_peek(*iter, flags);
- ret = bkey_err(*k);
- if (ret)
- bch2_trans_iter_put(trans, *iter);
- return ret;
-}
-
static struct bkey_alloc_buf *
bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter,
const struct bch_extent_ptr *ptr,
@@ -1621,9 +1598,13 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
struct bch_replicas_padded r;
int ret = 0;
- ret = trans_get_key(trans, BTREE_ID_stripes, POS(0, p.ec.idx), &iter, &k);
- if (ret < 0)
- return ret;
+ iter = bch2_trans_get_iter(trans, BTREE_ID_stripes, POS(0, p.ec.idx),
+ BTREE_ITER_INTENT|
+ BTREE_ITER_WITH_UPDATES);
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
if (k.k->type != KEY_TYPE_stripe) {
bch2_fs_inconsistent(c,
@@ -1631,7 +1612,7 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
(u64) p.ec.idx);
bch2_inconsistent_error(c);
ret = -EIO;
- goto out;
+ goto err;
}
if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
@@ -1639,13 +1620,13 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
"stripe pointer doesn't match stripe %llu",
(u64) p.ec.idx);
ret = -EIO;
- goto out;
+ goto err;
}
s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
ret = PTR_ERR_OR_ZERO(s);
if (ret)
- goto out;
+ goto err;
bkey_reassemble(&s->k_i, k);
stripe_blockcount_set(&s->v, p.ec.block,
@@ -1656,7 +1637,7 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
r.e.data_type = data_type;
update_replicas_list(trans, &r.e, sectors);
-out:
+err:
bch2_trans_iter_put(trans, iter);
return ret;
}
@@ -1838,10 +1819,13 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
int frags_referenced;
s64 ret;
- ret = trans_get_key(trans, BTREE_ID_reflink,
- POS(0, idx), &iter, &k);
- if (ret < 0)
- return ret;
+ iter = bch2_trans_get_iter(trans, BTREE_ID_reflink, POS(0, idx),
+ BTREE_ITER_INTENT|
+ BTREE_ITER_WITH_UPDATES);
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
sectors = min_t(u64, sectors, k.k->p.offset - idx);
@@ -1994,86 +1978,27 @@ int bch2_trans_mark_update(struct btree_trans *trans,
if (!btree_node_type_needs_gc(iter->btree_id))
return 0;
- if (!btree_node_type_is_extents(iter->btree_id)) {
- if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
- old = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(old);
- if (ret)
- return ret;
- } else {
- struct bkey_cached *ck = (void *) iter->l[0].b;
-
- BUG_ON(!ck->valid);
- old = bkey_i_to_s_c(ck->k);
- }
-
- if (old.k->type == new->k.type) {
- ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
- BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
- } else {
- ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
- BTREE_TRIGGER_INSERT|flags) ?:
- bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
- BTREE_TRIGGER_OVERWRITE|flags);
- }
- } else {
- struct btree_iter *copy;
- struct bkey _old;
-
- EBUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
-
- bkey_init(&_old);
- old = (struct bkey_s_c) { &_old, NULL };
-
- ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
- 0, new->k.size,
- BTREE_TRIGGER_INSERT);
+ if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
+ old = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(old);
if (ret)
return ret;
+ } else {
+ struct bkey_cached *ck = (void *) iter->l[0].b;
- copy = bch2_trans_copy_iter(trans, iter);
-
- for_each_btree_key_continue(copy, 0, old, ret) {
- unsigned offset = 0;
- s64 sectors = -((s64) old.k->size);
-
- flags |= BTREE_TRIGGER_OVERWRITE;
-
- if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
- break;
-
- switch (bch2_extent_overlap(&new->k, old.k)) {
- case BCH_EXTENT_OVERLAP_ALL:
- offset = 0;
- sectors = -((s64) old.k->size);
- break;
- case BCH_EXTENT_OVERLAP_BACK:
- offset = bkey_start_offset(&new->k) -
- bkey_start_offset(old.k);
- sectors = bkey_start_offset(&new->k) -
- old.k->p.offset;
- break;
- case BCH_EXTENT_OVERLAP_FRONT:
- offset = 0;
- sectors = bkey_start_offset(old.k) -
- new->k.p.offset;
- break;
- case BCH_EXTENT_OVERLAP_MIDDLE:
- offset = bkey_start_offset(&new->k) -
- bkey_start_offset(old.k);
- sectors = -((s64) new->k.size);
- flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
- break;
- }
-
- BUG_ON(sectors >= 0);
+ BUG_ON(!ck->valid);
+ old = bkey_i_to_s_c(ck->k);
+ }
- ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
- offset, sectors, flags);
- if (ret)
- break;
- }
- bch2_trans_iter_put(trans, copy);
+ if (old.k->type == new->k.type &&
+ !btree_node_type_is_extents(iter->btree_id)) {
+ ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
+ BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
+ } else {
+ ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, new->k.size,
+ BTREE_TRIGGER_INSERT|flags) ?:
+ bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, -((s64) old.k->size),
+ BTREE_TRIGGER_OVERWRITE|flags);
}
return ret;