summaryrefslogtreecommitdiff
path: root/fs/bcachefs/buckets.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-06-28 18:11:12 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:42 -0400
commit64f2a8803ec8d3702a4b5225726f9c1dc685f43a (patch)
tree1dd5b86c7a0f1ea53a5b0f6dc37425db9fc8b79a /fs/bcachefs/buckets.c
parentc61b7e21ecfff2096cdb84d86bd18f1ceab7de72 (diff)
downloadlwn-64f2a8803ec8d3702a4b5225726f9c1dc685f43a.tar.gz
lwn-64f2a8803ec8d3702a4b5225726f9c1dc685f43a.zip
bcachefs: Fix bch2_extent_can_insert() not being called
It's supposed to check whether we're splitting a compressed extent and if so get a bigger disk reservation - hence this fixes a "disk usage increased by x without a reservaiton" bug. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/buckets.c')
-rw-r--r--fs/bcachefs/buckets.c48
1 files changed, 27 insertions, 21 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 1198c7bbeab9..4ea84cbac5d3 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -1368,8 +1368,8 @@ int bch2_mark_update(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
+ struct btree *b = iter_l(iter)->b;
+ struct btree_node_iter node_iter = iter_l(iter)->iter;
struct bkey_packed *_k;
int ret = 0;
@@ -1431,32 +1431,38 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans,
disk_res_sectors);
trans_for_each_update(trans, i) {
- struct btree_iter *iter = i->iter;
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
- struct bkey_packed *_k;
-
pr_err("while inserting");
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
pr_err("%s", buf);
pr_err("overlapping with");
- node_iter = iter->l[0].iter;
- while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
- struct bkey unpacked;
- struct bkey_s_c k;
+ if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
+ struct btree *b = iter_l(i->iter)->b;
+ struct btree_node_iter node_iter = iter_l(i->iter)->iter;
+ struct bkey_packed *_k;
- k = bkey_disassemble(b, _k, &unpacked);
+ while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
+ struct bkey unpacked;
+ struct bkey_s_c k;
- if (btree_node_is_extents(b)
- ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
- : bkey_cmp(i->k->k.p, k.k->p))
- break;
+ pr_info("_k %px format %u", _k, _k->format);
+ k = bkey_disassemble(b, _k, &unpacked);
- bch2_bkey_val_to_text(&PBUF(buf), c, k);
- pr_err("%s", buf);
+ if (btree_node_is_extents(b)
+ ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
+ : bkey_cmp(i->k->k.p, k.k->p))
+ break;
+
+ bch2_bkey_val_to_text(&PBUF(buf), c, k);
+ pr_err("%s", buf);
- bch2_btree_node_iter_advance(&node_iter, b);
+ bch2_btree_node_iter_advance(&node_iter, b);
+ }
+ } else {
+ struct bkey_cached *ck = (void *) i->iter->l[0].b;
+
+ bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
+ pr_err("%s", buf);
}
}
}
@@ -1808,8 +1814,8 @@ int bch2_trans_mark_update(struct btree_trans *trans,
struct bkey_i *insert,
unsigned flags)
{
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
+ struct btree *b = iter_l(iter)->b;
+ struct btree_node_iter node_iter = iter_l(iter)->iter;
struct bkey_packed *_k;
int ret;