diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-10-28 17:08:41 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:09:45 -0400 |
commit | 46fee692eebb850b8478531e185fb5a5f942d3ea (patch) | |
tree | 332347a26a8887b98e5bcb40456aa69825413500 /fs/bcachefs/btree_io.c | |
parent | 8852501fe570c4956c0e29246e1e5636f09b58fb (diff) | |
download | lwn-46fee692eebb850b8478531e185fb5a5f942d3ea.tar.gz lwn-46fee692eebb850b8478531e185fb5a5f942d3ea.zip |
bcachefs: Improved btree write statistics
This replaces sysfs btree_avg_write_size with btree_write_stats, which
now breaks out statistics by the source of the btree write.
Btree writes that are too small are a source of inefficiency, and
excessive btree resort overhead - this will let us see what's causing
them.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_io.c')
-rw-r--r-- | fs/bcachefs/btree_io.c | 46 |
1 files changed, 42 insertions, 4 deletions
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index a322a8367688..56f9637d2ca6 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -471,7 +471,8 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) }; if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) { - bch2_btree_node_write(c, b, SIX_LOCK_write, 0); + bch2_btree_node_write(c, b, SIX_LOCK_write, + BTREE_WRITE_init_next_bset); reinit_iter = true; } } @@ -1646,7 +1647,7 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b) } while ((v = cmpxchg(&b->flags, old, new)) != old); if (new & (1U << BTREE_NODE_write_in_flight)) - __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED); + __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|b->write_type); else wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); } @@ -1795,6 +1796,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) bool used_mempool; unsigned long old, new; bool validate_before_checksum = false; + enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK; void *data; int ret; @@ -1841,6 +1843,12 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) if (new & (1U << BTREE_NODE_need_write)) return; do_write: + if ((flags & BTREE_WRITE_ONLY_IF_NEED)) + type = b->write_type; + b->write_type = 0; + + BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0)); + atomic_dec(&c->btree_cache.dirty); BUG_ON(btree_node_fake(b)); @@ -2015,8 +2023,8 @@ do_write: bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written = cpu_to_le16(b->written); - atomic64_inc(&c->btree_writes_nr); - atomic64_add(sectors_to_write, &c->btree_writes_sectors); + atomic64_inc(&c->btree_write_stats[type].nr); + atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); INIT_WORK(&wbio->work, btree_write_submit); queue_work(c->io_complete_wq, &wbio->work); @@ -2144,3 +2152,33 @@ bool bch2_btree_flush_all_writes(struct bch_fs *c) { return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight); } + +const char * const bch2_btree_write_types[] = { +#define x(t, n) [n] = #t, + BCH_BTREE_WRITE_TYPES() + NULL +}; + +void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c) +{ + printbuf_tabstop_push(out, 20); + printbuf_tabstop_push(out, 10); + + prt_tab(out); + prt_str(out, "nr"); + prt_tab(out); + prt_str(out, "size"); + prt_newline(out); + + for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) { + u64 nr = atomic64_read(&c->btree_write_stats[i].nr); + u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes); + + prt_printf(out, "%s:", bch2_btree_write_types[i]); + prt_tab(out); + prt_u64(out, nr); + prt_tab(out); + prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0); + prt_newline(out); + } +} |