summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-02-26 21:46:41 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:26 -0400
commit82732ef510b8455bbf9e9292b6fd04cb724bdadf (patch)
tree9cbbade3d4d095cd0ac0573bbfae47cc29dc0b78
parent39dcace83889f43d5619d07c2ec76c286c88a85b (diff)
downloadlwn-82732ef510b8455bbf9e9292b6fd04cb724bdadf.tar.gz
lwn-82732ef510b8455bbf9e9292b6fd04cb724bdadf.zip
bcachefs: Improve btree_node_write_if_need()
btree_node_write_if_need() kicks off a btree node write only if need_write is set; this makes the locking easier to reason about by moving the check into the cmpxchg loop in __bch2_btree_node_write(). Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/btree_cache.c6
-rw-r--r--fs/bcachefs/btree_io.c22
-rw-r--r--fs/bcachefs/btree_io.h13
-rw-r--r--fs/bcachefs/btree_update_interior.c12
4 files changed, 29 insertions, 24 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index a6b8ca85fc94..7b264619c276 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -239,9 +239,9 @@ wait_on_io:
* the post write cleanup:
*/
if (bch2_verify_btree_ondisk)
- bch2_btree_node_write(c, b, SIX_LOCK_intent);
+ bch2_btree_node_write(c, b, SIX_LOCK_intent, 0);
else
- __bch2_btree_node_write(c, b, false);
+ __bch2_btree_node_write(c, b, 0);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
@@ -1064,7 +1064,7 @@ wait_on_io:
six_lock_write(&b->c.lock, NULL, NULL);
if (btree_node_dirty(b)) {
- __bch2_btree_node_write(c, b, false);
+ __bch2_btree_node_write(c, b, 0);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
goto wait_on_io;
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index f4d6a6c5096d..540bfe07c128 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -471,7 +471,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
};
if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
- bch2_btree_node_write(c, b, SIX_LOCK_write);
+ bch2_btree_node_write(c, b, SIX_LOCK_write, 0);
reinit_iter = true;
}
}
@@ -1620,7 +1620,7 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
} while ((v = cmpxchg(&b->flags, old, new)) != old);
if (new & (1U << BTREE_NODE_write_in_flight))
- __bch2_btree_node_write(c, b, true);
+ __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED);
}
static void btree_node_write_done(struct bch_fs *c, struct btree *b)
@@ -1741,7 +1741,7 @@ static void btree_write_submit(struct work_struct *work)
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k);
}
-void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_started)
+void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
{
struct btree_write_bio *wbio;
struct bset_tree *t;
@@ -1758,7 +1758,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_sta
void *data;
int ret;
- if (already_started)
+ if (flags & BTREE_WRITE_ALREADY_STARTED)
goto do_write;
/*
@@ -1774,13 +1774,18 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_sta
if (!(old & (1 << BTREE_NODE_dirty)))
return;
+ if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
+ !(old & (1 << BTREE_NODE_need_write)))
+ return;
+
if (!btree_node_may_write(b))
return;
if (old & (1 << BTREE_NODE_never_write))
return;
- BUG_ON(old & (1 << BTREE_NODE_write_in_flight));
+ if (old & (1 << BTREE_NODE_write_in_flight))
+ return;
new &= ~(1 << BTREE_NODE_dirty);
new &= ~(1 << BTREE_NODE_need_write);
@@ -2044,12 +2049,13 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
* Use this one if the node is intent locked:
*/
void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
- enum six_lock_type lock_type_held)
+ enum six_lock_type lock_type_held,
+ unsigned flags)
{
if (lock_type_held == SIX_LOCK_intent ||
(lock_type_held == SIX_LOCK_read &&
six_lock_tryupgrade(&b->c.lock))) {
- __bch2_btree_node_write(c, b, false);
+ __bch2_btree_node_write(c, b, flags);
/* don't cycle lock unnecessarily: */
if (btree_node_just_written(b) &&
@@ -2061,7 +2067,7 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
if (lock_type_held == SIX_LOCK_read)
six_lock_downgrade(&b->c.lock);
} else {
- __bch2_btree_node_write(c, b, false);
+ __bch2_btree_node_write(c, b, flags);
if (lock_type_held == SIX_LOCK_write &&
btree_node_just_written(b))
bch2_btree_post_write_cleanup(c, b);
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 638a9b30f0cb..3dbb518c4da4 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -143,20 +143,19 @@ int bch2_btree_root_read(struct bch_fs *, enum btree_id,
void bch2_btree_complete_write(struct bch_fs *, struct btree *,
struct btree_write *);
-void __bch2_btree_node_write(struct bch_fs *, struct btree *, bool);
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
+#define BTREE_WRITE_ONLY_IF_NEED (1U << 0)
+#define BTREE_WRITE_ALREADY_STARTED (1U << 1)
+
+void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
void bch2_btree_node_write(struct bch_fs *, struct btree *,
- enum six_lock_type);
+ enum six_lock_type, unsigned);
static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
enum six_lock_type lock_held)
{
- if (b->written &&
- btree_node_need_write(b) &&
- btree_node_may_write(b) &&
- !btree_node_write_in_flight(b))
- bch2_btree_node_write(c, b, lock_held);
+ bch2_btree_node_write(c, b, lock_held, BTREE_WRITE_ONLY_IF_NEED);
}
#define bch2_btree_node_write_cond(_c, _b, cond) \
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index f4ee78e84f71..fe0fc5ff1549 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1393,8 +1393,8 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
six_unlock_write(&n2->c.lock);
six_unlock_write(&n1->c.lock);
- bch2_btree_node_write(c, n1, SIX_LOCK_intent);
- bch2_btree_node_write(c, n2, SIX_LOCK_intent);
+ bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
+ bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0);
/*
* Note that on recursive parent_keys == keys, so we
@@ -1413,7 +1413,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
- bch2_btree_node_write(c, n3, SIX_LOCK_intent);
+ bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
}
} else {
trace_btree_compact(c, b);
@@ -1421,7 +1421,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
bch2_btree_build_aux_trees(n1);
six_unlock_write(&n1->c.lock);
- bch2_btree_node_write(c, n1, SIX_LOCK_intent);
+ bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
if (parent)
bch2_keylist_add(&as->parent_keys, &n1->key);
@@ -1709,7 +1709,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
bch2_btree_build_aux_trees(n);
six_unlock_write(&n->c.lock);
- bch2_btree_node_write(c, n, SIX_LOCK_intent);
+ bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
bkey_init(&delete.k);
delete.k.p = prev->key.k.p;
@@ -1783,7 +1783,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
trace_btree_gc_rewrite_node(c, b);
- bch2_btree_node_write(c, n, SIX_LOCK_intent);
+ bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
if (parent) {
bch2_keylist_add(&as->parent_keys, &n->key);