diff options
Diffstat (limited to 'fs/bcachefs/btree_update_interior.c')
-rw-r--r-- | fs/bcachefs/btree_update_interior.c | 50 |
1 files changed, 27 insertions, 23 deletions
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index af31819c88c7..2631b0732d4b 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -160,7 +160,6 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b, { struct bch_fs *c = as->c; struct pending_btree_node_free *d; - unsigned replicas; /* * btree_update lock is only needed here to avoid racing with @@ -179,15 +178,6 @@ found: d->index_update_done = true; /* - * Btree nodes are accounted as freed in bch_alloc_stats when they're - * freed from the index: - */ - replicas = bch2_extent_nr_dirty_ptrs(k); - if (replicas) - stats->replicas[replicas - 1].data[BCH_DATA_BTREE] -= - c->opts.btree_node_size * replicas; - - /* * We're dropping @k from the btree, but it's still live until the * index update is persistent so we need to keep a reference around for * mark and sweep to find - that's primarily what the @@ -208,15 +198,16 @@ found: * bch2_mark_key() compares the current gc pos to the pos we're * moving this reference from, hence one comparison here: */ - if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) { - struct bch_fs_usage tmp = { 0 }; + if (gc_pos_cmp(c->gc_pos, b + ? gc_pos_btree_node(b) + : gc_pos_btree_root(as->btree_id)) >= 0 && + gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) { + struct gc_pos pos = { 0 }; bch2_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&d->key), - false, 0, b - ? gc_pos_btree_node(b) - : gc_pos_btree_root(as->btree_id), - &tmp, 0, 0); + false, 0, pos, + NULL, 0, BCH_BUCKET_MARK_GC); /* * Don't apply tmp - pending deletes aren't tracked in * bch_alloc_stats: @@ -287,19 +278,13 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b, static void bch2_btree_node_free_ondisk(struct bch_fs *c, struct pending_btree_node_free *pending) { - struct bch_fs_usage stats = { 0 }; - BUG_ON(!pending->index_update_done); bch2_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&pending->key), false, 0, gc_phase(GC_PHASE_PENDING_DELETE), - &stats, 0, 0); - /* - * Don't apply stats - pending deletes aren't tracked in - * bch_alloc_stats: - */ + NULL, 0, 0); } static struct btree *__bch2_btree_node_alloc(struct bch_fs *c, @@ -1939,6 +1924,25 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, btree_interior_update_add_node_reference(as, b); + /* + * XXX: the rest of the update path treats this like we're actually + * inserting a new node and deleting the existing node, so the + * reservation needs to include enough space for @b + * + * that is actually sketch as fuck though and I am surprised the code + * seems to work like that, definitely need to go back and rework it + * into something saner. + * + * (I think @b is just getting double counted until the btree update + * finishes and "deletes" @b on disk) + */ + ret = bch2_disk_reservation_add(c, &as->reserve->disk_res, + c->opts.btree_node_size * + bch2_extent_nr_ptrs(extent_i_to_s_c(new_key)), + BCH_DISK_RESERVATION_NOFAIL| + BCH_DISK_RESERVATION_GC_LOCK_HELD); + BUG_ON(ret); + parent = btree_node_parent(iter, b); if (parent) { if (new_hash) { |