From 46fee692eebb850b8478531e185fb5a5f942d3ea Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 28 Oct 2022 17:08:41 -0400 Subject: bcachefs: Improved btree write statistics This replaces sysfs btree_avg_write_size with btree_write_stats, which now breaks out statistics by the source of the btree write. Btree writes that are too small are a source of inefficiency, and excessive btree resort overhead - this will let us see what's causing them. Signed-off-by: Kent Overstreet --- fs/bcachefs/sysfs.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'fs/bcachefs/sysfs.c') diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index 76301209898f..db3d377ba10c 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -183,7 +183,7 @@ read_attribute(io_latency_stats_read); read_attribute(io_latency_stats_write); read_attribute(congested); -read_attribute(btree_avg_write_size); +read_attribute(btree_write_stats); read_attribute(btree_cache_size); read_attribute(compression_stats); @@ -250,14 +250,6 @@ static size_t bch2_btree_cache_size(struct bch_fs *c) return ret; } -static size_t bch2_btree_avg_write_size(struct bch_fs *c) -{ - u64 nr = atomic64_read(&c->btree_writes_nr); - u64 sectors = atomic64_read(&c->btree_writes_sectors); - - return nr ? div64_u64(sectors, nr) : 0; -} - static long data_progress_to_text(struct printbuf *out, struct bch_fs *c) { long ret = 0; @@ -396,7 +388,9 @@ SHOW(bch2_fs) sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b); sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c)); - sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c)); + + if (attr == &sysfs_btree_write_stats) + bch2_btree_write_stats_to_text(out, c); sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic); @@ -557,7 +551,7 @@ SYSFS_OPS(bch2_fs); struct attribute *bch2_fs_files[] = { &sysfs_minor, &sysfs_btree_cache_size, - &sysfs_btree_avg_write_size, + &sysfs_btree_write_stats, &sysfs_promote_whole_extents, -- cgit v1.2.3