summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-07-26 00:06:11 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:43 +0300
commit7807e143849e0f86fce6ce7d4907412915d29918 (patch)
tree410c1bdd413ebaf858d72bb6e33ec8ad152f5937 /fs
parent4580baec7fbee2fdceb9b5b2b337ea3734a6d2b8 (diff)
downloadlinux-7807e143849e0f86fce6ce7d4907412915d29918.tar.xz
bcachefs: Convert various code to printbuf
printbufs know how big the buffer is that was allocated, so we can get rid of the random PAGE_SIZEs all over the place. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/btree_io.c7
-rw-r--r--fs/bcachefs/btree_io.h2
-rw-r--r--fs/bcachefs/btree_update_interior.c7
-rw-r--r--fs/bcachefs/btree_update_interior.h2
-rw-r--r--fs/bcachefs/clock.c7
-rw-r--r--fs/bcachefs/clock.h2
-rw-r--r--fs/bcachefs/ec.c29
-rw-r--r--fs/bcachefs/ec.h1
-rw-r--r--fs/bcachefs/journal.c36
-rw-r--r--fs/bcachefs/journal.h4
-rw-r--r--fs/bcachefs/rebalance.c19
-rw-r--r--fs/bcachefs/rebalance.h2
-rw-r--r--fs/bcachefs/sysfs.c220
-rw-r--r--fs/bcachefs/util.c25
-rw-r--r--fs/bcachefs/util.h2
15 files changed, 177 insertions, 188 deletions
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index f80b93a54c08..d3ea43dd9fe6 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1835,9 +1835,8 @@ void bch2_btree_verify_flushed(struct bch_fs *c)
rcu_read_unlock();
}
-ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
+void bch2_dirty_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bucket_table *tbl;
struct rhash_head *pos;
struct btree *b;
@@ -1850,7 +1849,7 @@ ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
if (!(flags & (1 << BTREE_NODE_dirty)))
continue;
- pr_buf(&out, "%p d %u n %u l %u w %u b %u r %u:%lu\n",
+ pr_buf(out, "%p d %u n %u l %u w %u b %u r %u:%lu\n",
b,
(flags & (1 << BTREE_NODE_dirty)) != 0,
(flags & (1 << BTREE_NODE_need_write)) != 0,
@@ -1861,6 +1860,4 @@ ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
b->will_make_reachable & 1);
}
rcu_read_unlock();
-
- return out.pos - buf;
}
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index db013dc28eec..66ebdd39f5b3 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -140,7 +140,7 @@ do { \
void bch2_btree_flush_all_reads(struct bch_fs *);
void bch2_btree_flush_all_writes(struct bch_fs *);
void bch2_btree_verify_flushed(struct bch_fs *);
-ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *, char *);
+void bch2_dirty_btree_nodes_to_text(struct printbuf *, struct bch_fs *);
static inline void compat_bformat(unsigned level, enum btree_id btree_id,
unsigned version, unsigned big_endian,
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index d81aa039d27f..963213e78f31 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1974,22 +1974,19 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
six_unlock_intent(&b->c.lock);
}
-ssize_t bch2_btree_updates_print(struct bch_fs *c, char *buf)
+void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct btree_update *as;
mutex_lock(&c->btree_interior_update_lock);
list_for_each_entry(as, &c->btree_interior_update_list, list)
- pr_buf(&out, "%p m %u w %u r %u j %llu\n",
+ pr_buf(out, "%p m %u w %u r %u j %llu\n",
as,
as->mode,
as->nodes_written,
atomic_read(&as->cl.remaining) & CLOSURE_REMAINING_MASK,
as->journal.seq);
mutex_unlock(&c->btree_interior_update_lock);
-
- return out.pos - buf;
}
size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *c)
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 812bafdc2d04..7668225e72c6 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -317,7 +317,7 @@ static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
return u64s <= bch_btree_keys_u64s_remaining(c, b);
}
-ssize_t bch2_btree_updates_print(struct bch_fs *, char *);
+void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index 163058173252..869ba1887757 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -152,9 +152,8 @@ void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
timer->fn(timer);
}
-ssize_t bch2_io_timers_show(struct io_clock *clock, char *buf)
+void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
unsigned long now;
unsigned i;
@@ -162,12 +161,10 @@ ssize_t bch2_io_timers_show(struct io_clock *clock, char *buf)
now = atomic_long_read(&clock->now);
for (i = 0; i < clock->timers.used; i++)
- pr_buf(&out, "%ps:\t%li\n",
+ pr_buf(out, "%ps:\t%li\n",
clock->timers.data[i]->fn,
clock->timers.data[i]->expire - now);
spin_unlock(&clock->timer_lock);
-
- return out.pos - buf;
}
void bch2_io_clock_exit(struct io_clock *clock)
diff --git a/fs/bcachefs/clock.h b/fs/bcachefs/clock.h
index da50afe206cc..70a0f7436c84 100644
--- a/fs/bcachefs/clock.h
+++ b/fs/bcachefs/clock.h
@@ -30,7 +30,7 @@ void bch2_io_clock_schedule_timeout(struct io_clock *, unsigned long);
__ret; \
})
-ssize_t bch2_io_timers_show(struct io_clock *, char *);
+void bch2_io_timers_to_text(struct printbuf *, struct io_clock *);
void bch2_io_clock_exit(struct io_clock *);
int bch2_io_clock_init(struct io_clock *);
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 8c04e7ced88b..61bc34225bf1 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -1575,6 +1575,35 @@ void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
spin_unlock(&c->ec_stripes_heap_lock);
}
+void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct ec_stripe_head *h;
+ struct ec_stripe_new *s;
+
+ mutex_lock(&c->ec_stripe_head_lock);
+ list_for_each_entry(h, &c->ec_stripe_head_list, list) {
+ pr_buf(out, "target %u algo %u redundancy %u:\n",
+ h->target, h->algo, h->redundancy);
+
+ if (h->s)
+ pr_buf(out, "\tpending: blocks %u allocated %u\n",
+ h->s->blocks.nr,
+ bitmap_weight(h->s->blocks_allocated,
+ h->s->blocks.nr));
+ }
+ mutex_unlock(&c->ec_stripe_head_lock);
+
+ mutex_lock(&c->ec_stripe_new_lock);
+ list_for_each_entry(h, &c->ec_stripe_new_list, list) {
+ pr_buf(out, "\tin flight: blocks %u allocated %u pin %u\n",
+ s->blocks.nr,
+ bitmap_weight(s->blocks_allocated,
+ s->blocks.nr),
+ atomic_read(&s->pin));
+ }
+ mutex_unlock(&c->ec_stripe_new_lock);
+}
+
void bch2_fs_ec_exit(struct bch_fs *c)
{
struct ec_stripe_head *h;
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index ad9078fdb045..f8fc3d616cd7 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -161,6 +161,7 @@ int bch2_stripes_write(struct bch_fs *, unsigned, bool *);
int bch2_ec_mem_alloc(struct bch_fs *, bool);
void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
+void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
void bch2_fs_ec_exit(struct bch_fs *);
int bch2_fs_ec_init(struct bch_fs *);
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 127787cd3e03..8b0746e092de 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -1137,9 +1137,8 @@ out:
/* debug: */
-ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
+void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
union journal_res_state s;
struct bch_dev *ca;
@@ -1149,7 +1148,7 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
spin_lock(&j->lock);
s = READ_ONCE(j->reservations);
- pr_buf(&out,
+ pr_buf(out,
"active journal entries:\t%llu\n"
"seq:\t\t\t%llu\n"
"last_seq:\t\t%llu\n"
@@ -1167,31 +1166,31 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
switch (s.cur_entry_offset) {
case JOURNAL_ENTRY_ERROR_VAL:
- pr_buf(&out, "error\n");
+ pr_buf(out, "error\n");
break;
case JOURNAL_ENTRY_CLOSED_VAL:
- pr_buf(&out, "closed\n");
+ pr_buf(out, "closed\n");
break;
default:
- pr_buf(&out, "%u/%u\n",
+ pr_buf(out, "%u/%u\n",
s.cur_entry_offset,
j->cur_entry_u64s);
break;
}
- pr_buf(&out,
+ pr_buf(out,
"current entry refs:\t%u\n"
"prev entry unwritten:\t",
journal_state_count(s, s.idx));
if (s.prev_buf_unwritten)
- pr_buf(&out, "yes, ref %u sectors %u\n",
+ pr_buf(out, "yes, ref %u sectors %u\n",
journal_state_count(s, !s.idx),
journal_prev_buf(j)->sectors);
else
- pr_buf(&out, "no\n");
+ pr_buf(out, "no\n");
- pr_buf(&out,
+ pr_buf(out,
"need write:\t\t%i\n"
"replay done:\t\t%i\n",
test_bit(JOURNAL_NEED_WRITE, &j->flags),
@@ -1204,7 +1203,7 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
if (!ja->nr)
continue;
- pr_buf(&out,
+ pr_buf(out,
"dev %u:\n"
"\tnr\t\t%u\n"
"\tavailable\t%u:%u\n"
@@ -1223,34 +1222,29 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
spin_unlock(&j->lock);
rcu_read_unlock();
-
- return out.pos - buf;
}
-ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
+void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct journal_entry_pin_list *pin_list;
struct journal_entry_pin *pin;
u64 i;
spin_lock(&j->lock);
fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
- pr_buf(&out, "%llu: count %u\n",
+ pr_buf(out, "%llu: count %u\n",
i, atomic_read(&pin_list->count));
list_for_each_entry(pin, &pin_list->list, list)
- pr_buf(&out, "\t%px %ps\n",
+ pr_buf(out, "\t%px %ps\n",
pin, pin->flush);
if (!list_empty(&pin_list->flushed))
- pr_buf(&out, "flushed:\n");
+ pr_buf(out, "flushed:\n");
list_for_each_entry(pin, &pin_list->flushed, list)
- pr_buf(&out, "\t%px %ps\n",
+ pr_buf(out, "\t%px %ps\n",
pin, pin->flush);
}
spin_unlock(&j->lock);
-
- return out.pos - buf;
}
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index f14dfa59e702..26654b9cf0ea 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -501,8 +501,8 @@ static inline void bch2_journal_set_replay_done(struct journal *j)
void bch2_journal_unblock(struct journal *);
void bch2_journal_block(struct journal *);
-ssize_t bch2_journal_print_debug(struct journal *, char *);
-ssize_t bch2_journal_print_pins(struct journal *, char *);
+void bch2_journal_debug_to_text(struct printbuf *, struct journal *);
+void bch2_journal_pins_to_text(struct printbuf *, struct journal *);
int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
unsigned nr);
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index eb3f7d02c791..a0bbddeac623 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -249,45 +249,42 @@ static int bch2_rebalance_thread(void *arg)
return 0;
}
-ssize_t bch2_rebalance_work_show(struct bch_fs *c, char *buf)
+void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bch_fs_rebalance *r = &c->rebalance;
struct rebalance_work w = rebalance_work(c);
char h1[21], h2[21];
bch2_hprint(&PBUF(h1), w.dev_most_full_work << 9);
bch2_hprint(&PBUF(h2), w.dev_most_full_capacity << 9);
- pr_buf(&out, "fullest_dev (%i):\t%s/%s\n",
+ pr_buf(out, "fullest_dev (%i):\t%s/%s\n",
w.dev_most_full_idx, h1, h2);
bch2_hprint(&PBUF(h1), w.total_work << 9);
bch2_hprint(&PBUF(h2), c->capacity << 9);
- pr_buf(&out, "total work:\t\t%s/%s\n", h1, h2);
+ pr_buf(out, "total work:\t\t%s/%s\n", h1, h2);
- pr_buf(&out, "rate:\t\t\t%u\n", r->pd.rate.rate);
+ pr_buf(out, "rate:\t\t\t%u\n", r->pd.rate.rate);
switch (r->state) {
case REBALANCE_WAITING:
- pr_buf(&out, "waiting\n");
+ pr_buf(out, "waiting\n");
break;
case REBALANCE_THROTTLED:
bch2_hprint(&PBUF(h1),
(r->throttled_until_iotime -
atomic_long_read(&c->io_clock[WRITE].now)) << 9);
- pr_buf(&out, "throttled for %lu sec or %s io\n",
+ pr_buf(out, "throttled for %lu sec or %s io\n",
(r->throttled_until_cputime - jiffies) / HZ,
h1);
break;
case REBALANCE_RUNNING:
- pr_buf(&out, "running\n");
- pr_buf(&out, "pos %llu:%llu\n",
+ pr_buf(out, "running\n");
+ pr_buf(out, "pos %llu:%llu\n",
r->move_stats.pos.inode,
r->move_stats.pos.offset);
break;
}
-
- return out.pos - buf;
}
void bch2_rebalance_stop(struct bch_fs *c)
diff --git a/fs/bcachefs/rebalance.h b/fs/bcachefs/rebalance.h
index 99e2a1fb6084..7ade0bb81cce 100644
--- a/fs/bcachefs/rebalance.h
+++ b/fs/bcachefs/rebalance.h
@@ -19,7 +19,7 @@ void bch2_rebalance_add_key(struct bch_fs *, struct bkey_s_c,
struct bch_io_opts *);
void bch2_rebalance_add_work(struct bch_fs *, u64);
-ssize_t bch2_rebalance_work_show(struct bch_fs *, char *);
+void bch2_rebalance_work_to_text(struct printbuf *, struct bch_fs *);
void bch2_rebalance_stop(struct bch_fs *);
int bch2_rebalance_start(struct bch_fs *);
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 911c305d372c..deaafeecba64 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -75,7 +75,6 @@ do { \
#define sysfs_hprint(file, val) \
do { \
if (attr == &sysfs_ ## file) { \
- struct printbuf out = _PBUF(buf, PAGE_SIZE); \
bch2_hprint(&out, val); \
pr_buf(&out, "\n"); \
return out.pos - buf; \
@@ -239,24 +238,22 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
return ret;
}
-static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
+static int fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bch_fs_usage_online *fs_usage = bch2_fs_usage_read(c);
if (!fs_usage)
return -ENOMEM;
- bch2_fs_usage_to_text(&out, c, fs_usage);
+ bch2_fs_usage_to_text(out, c, fs_usage);
percpu_up_read(&c->mark_lock);
kfree(fs_usage);
-
- return out.pos - buf;
+ return 0;
}
-static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
+static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
{
struct btree_trans trans;
struct btree_iter *iter;
@@ -299,59 +296,26 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
if (ret)
return ret;
- return scnprintf(buf, PAGE_SIZE,
- "uncompressed data:\n"
- " nr extents: %llu\n"
- " size (bytes): %llu\n"
- "compressed data:\n"
- " nr extents: %llu\n"
- " compressed size (bytes): %llu\n"
- " uncompressed size (bytes): %llu\n",
- nr_uncompressed_extents,
- uncompressed_sectors << 9,
- nr_compressed_extents,
- compressed_sectors_compressed << 9,
- compressed_sectors_uncompressed << 9);
-}
-
-static ssize_t bch2_new_stripes(struct bch_fs *c, char *buf)
-{
- char *out = buf, *end = buf + PAGE_SIZE;
- struct ec_stripe_head *h;
- struct ec_stripe_new *s;
-
- mutex_lock(&c->ec_stripe_head_lock);
- list_for_each_entry(h, &c->ec_stripe_head_list, list) {
- out += scnprintf(out, end - out,
- "target %u algo %u redundancy %u:\n",
- h->target, h->algo, h->redundancy);
-
- if (h->s)
- out += scnprintf(out, end - out,
- "\tpending: blocks %u allocated %u\n",
- h->s->blocks.nr,
- bitmap_weight(h->s->blocks_allocated,
- h->s->blocks.nr));
- }
- mutex_unlock(&c->ec_stripe_head_lock);
-
- mutex_lock(&c->ec_stripe_new_lock);
- list_for_each_entry(h, &c->ec_stripe_new_list, list) {
- out += scnprintf(out, end - out,
- "\tin flight: blocks %u allocated %u pin %u\n",
- s->blocks.nr,
- bitmap_weight(s->blocks_allocated,
- s->blocks.nr),
- atomic_read(&s->pin));
- }
- mutex_unlock(&c->ec_stripe_new_lock);
-
- return out - buf;
+ pr_buf(out,
+ "uncompressed data:\n"
+ " nr extents: %llu\n"
+ " size (bytes): %llu\n"
+ "compressed data:\n"
+ " nr extents: %llu\n"
+ " compressed size (bytes): %llu\n"
+ " uncompressed size (bytes): %llu\n",
+ nr_uncompressed_extents,
+ uncompressed_sectors << 9,
+ nr_compressed_extents,
+ compressed_sectors_compressed << 9,
+ compressed_sectors_uncompressed << 9);
+ return 0;
}
SHOW(bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
+ struct printbuf out = _PBUF(buf, PAGE_SIZE);
sysfs_print(minor, c->minor);
sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
@@ -381,8 +345,10 @@ SHOW(bch2_fs)
sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
sysfs_pd_controller_show(copy_gc, &c->copygc_pd);
- if (attr == &sysfs_rebalance_work)
- return bch2_rebalance_work_show(c, buf);
+ if (attr == &sysfs_rebalance_work) {
+ bch2_rebalance_work_to_text(&out, c);
+ return out.pos - buf;
+ }
sysfs_print(promote_whole_extents, c->promote_whole_extents);
@@ -392,51 +358,61 @@ SHOW(bch2_fs)
/* Debugging: */
if (attr == &sysfs_alloc_debug)
- return show_fs_alloc_debug(c, buf);
+ return fs_alloc_debug_to_text(&out, c) ?: out.pos - buf;
- if (attr == &sysfs_journal_debug)
- return bch2_journal_print_debug(&c->journal, buf);
+ if (attr == &sysfs_journal_debug) {
+ bch2_journal_debug_to_text(&out, &c->journal);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_journal_pins)
- return bch2_journal_print_pins(&c->journal, buf);
+ if (attr == &sysfs_journal_pins) {
+ bch2_journal_pins_to_text(&out, &c->journal);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_btree_updates)
- return bch2_btree_updates_print(c, buf);
+ if (attr == &sysfs_btree_updates) {
+ bch2_btree_updates_to_text(&out, c);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_dirty_btree_nodes)
- return bch2_dirty_btree_nodes_print(c, buf);
+ if (attr == &sysfs_dirty_btree_nodes) {
+ bch2_dirty_btree_nodes_to_text(&out, c);
+ return out.pos - buf;
+ }
if (attr == &sysfs_btree_key_cache) {
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
-
bch2_btree_key_cache_to_text(&out, &c->btree_key_cache);
return out.pos - buf;
}
if (attr == &sysfs_btree_transactions) {
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
-
bch2_btree_trans_to_text(&out, c);
return out.pos - buf;
}
if (attr == &sysfs_stripes_heap) {
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
-
bch2_stripes_heap_to_text(&out, c);
return out.pos - buf;
}
- if (attr == &sysfs_compression_stats)
- return bch2_compression_stats(c, buf);
+ if (attr == &sysfs_compression_stats) {
+ bch2_compression_stats_to_text(&out, c);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_new_stripes)
- return bch2_new_stripes(c, buf);
+ if (attr == &sysfs_new_stripes) {
+ bch2_new_stripes_to_text(&out, c);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_io_timers_read)
- return bch2_io_timers_show(&c->io_clock[READ], buf);
- if (attr == &sysfs_io_timers_write)
- return bch2_io_timers_show(&c->io_clock[WRITE], buf);
+ if (attr == &sysfs_io_timers_read) {
+ bch2_io_timers_to_text(&out, &c->io_clock[READ]);
+ return out.pos - buf;
+ }
+ if (attr == &sysfs_io_timers_write) {
+ bch2_io_timers_to_text(&out, &c->io_clock[WRITE]);
+ return out.pos - buf;
+ }
#define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
BCH_DEBUG_PARAMS()
@@ -705,11 +681,13 @@ int bch2_opts_create_sysfs_files(struct kobject *kobj)
SHOW(bch2_fs_time_stats)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
+ struct printbuf out = _PBUF(buf, PAGE_SIZE);
-#define x(name) \
- if (attr == &sysfs_time_stat_##name) \
- return bch2_time_stats_print(&c->times[BCH_TIME_##name],\
- buf, PAGE_SIZE);
+#define x(name) \
+ if (attr == &sysfs_time_stat_##name) { \
+ bch2_time_stats_to_text(&out, &c->times[BCH_TIME_##name]);\
+ return out.pos - buf; \
+ }
BCH_TIME_STATS()
#undef x
@@ -762,13 +740,13 @@ static int unsigned_cmp(const void *_l, const void *_r)
return cmp_int(*l, *r);
}
-static ssize_t show_quantiles(struct bch_fs *c, struct bch_dev *ca,
- char *buf, bucket_map_fn *fn, void *private)
+static int quantiles_to_text(struct printbuf *out,
+ struct bch_fs *c, struct bch_dev *ca,
+ bucket_map_fn *fn, void *private)
{
size_t i, n;
/* Compute 31 quantiles */
unsigned q[31], *p;
- ssize_t ret = 0;
down_read(&ca->bucket_lock);
n = ca->mi.nbuckets;
@@ -795,35 +773,30 @@ static ssize_t show_quantiles(struct bch_fs *c, struct bch_dev *ca,
vfree(p);
for (i = 0; i < ARRAY_SIZE(q); i++)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "%u ", q[i]);
- buf[ret - 1] = '\n';
-
- return ret;
+ pr_buf(out, "%u ", q[i]);
+ pr_buf(out, "\n");
+ return 0;
}
-static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
+static void reserve_stats_to_text(struct printbuf *out, struct bch_dev *ca)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
enum alloc_reserve i;
spin_lock(&ca->fs->freelist_lock);
- pr_buf(&out, "free_inc:\t%zu\t%zu\n",
+ pr_buf(out, "free_inc:\t%zu\t%zu\n",
fifo_used(&ca->free_inc),
ca->free_inc.size);
for (i = 0; i < RESERVE_NR; i++)
- pr_buf(&out, "free[%u]:\t%zu\t%zu\n", i,
+ pr_buf(out, "free[%u]:\t%zu\t%zu\n", i,
fifo_used(&ca->free[i]),
ca->free[i].size);
spin_unlock(&ca->fs->freelist_lock);
-
- return out.pos - buf;
}
-static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
+static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
struct bch_dev_usage stats = bch2_dev_usage_read(ca);
@@ -834,7 +807,7 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
nr[c->open_buckets[i].type]++;
- return scnprintf(buf, PAGE_SIZE,
+ pr_buf(out,
"free_inc: %zu/%zu\n"
"free[RESERVE_BTREE]: %zu/%zu\n"
"free[RESERVE_MOVINGGC]: %zu/%zu\n"
@@ -898,21 +871,18 @@ static const char * const bch2_rw[] = {
NULL
};
-static ssize_t show_dev_iodone(struct bch_dev *ca, char *buf)
+static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
int rw, i;
for (rw = 0; rw < 2; rw++) {
- pr_buf(&out, "%s:\n", bch2_rw[rw]);
+ pr_buf(out, "%s:\n", bch2_rw[rw]);
for (i = 1; i < BCH_DATA_NR; i++)
- pr_buf(&out, "%-12s:%12llu\n",
+ pr_buf(out, "%-12s:%12llu\n",
bch2_data_types[i],
percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
}
-
- return out.pos - buf;
}
SHOW(bch2_dev)
@@ -964,34 +934,44 @@ SHOW(bch2_dev)
return out.pos - buf;
}
- if (attr == &sysfs_iodone)
- return show_dev_iodone(ca, buf);
+ if (attr == &sysfs_iodone) {
+ dev_iodone_to_text(&out, ca);
+ return out.pos - buf;
+ }
sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
- if (attr == &sysfs_io_latency_stats_read)
- return bch2_time_stats_print(&ca->io_latency[READ], buf, PAGE_SIZE);
- if (attr == &sysfs_io_latency_stats_write)
- return bch2_time_stats_print(&ca->io_latency[WRITE], buf, PAGE_SIZE);
+ if (attr == &sysfs_io_latency_stats_read) {
+ bch2_time_stats_to_text(&out, &ca->io_latency[READ]);
+ return out.pos - buf;
+ }
+ if (attr == &sysfs_io_latency_stats_write) {
+ bch2_time_stats_to_text(&out, &ca->io_latency[WRITE]);
+ return out.pos - buf;
+ }
sysfs_printf(congested, "%u%%",
clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
* 100 / CONGESTED_MAX);
if (attr == &sysfs_bucket_quantiles_last_read)
- return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 0);
+ return quantiles_to_text(&out, c, ca, bucket_last_io_fn, (void *) 0) ?: out.pos - buf;
if (attr == &sysfs_bucket_quantiles_last_write)
- return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 1);
+ return quantiles_to_text(&out, c, ca, bucket_last_io_fn, (void *) 1) ?: out.pos - buf;
if (attr == &sysfs_bucket_quantiles_fragmentation)
- return show_quantiles(c, ca, buf, bucket_sectors_used_fn, NULL);
+ return quantiles_to_text(&out, c, ca, bucket_sectors_used_fn, NULL) ?: out.pos - buf;
if (attr == &sysfs_bucket_quantiles_oldest_gen)
- return show_quantiles(c, ca, buf, bucket_oldest_gen_fn, NULL);
+ return quantiles_to_text(&out, c, ca, bucket_oldest_gen_fn, NULL) ?: out.pos - buf;
- if (attr == &sysfs_reserve_stats)
- return show_reserve_stats(ca, buf);
- if (attr == &sysfs_alloc_debug)
- return show_dev_alloc_debug(ca, buf);
+ if (attr == &sysfs_reserve_stats) {
+ reserve_stats_to_text(&out, ca);
+ return out.pos - buf;
+ }
+ if (attr == &sysfs_alloc_debug) {
+ dev_alloc_debug_to_text(&out, ca);
+ return out.pos - buf;
+ }
return 0;
}
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index a05ebe475c5a..6e665f7f25a3 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -320,43 +320,40 @@ static void pr_time_units(struct printbuf *out, u64 ns)
pr_buf(out, "%llu %s", div_u64(ns, u->nsecs), u->name);
}
-size_t bch2_time_stats_print(struct bch2_time_stats *stats, char *buf, size_t len)
+void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats)
{
- struct printbuf out = _PBUF(buf, len);
const struct time_unit *u;
u64 freq = READ_ONCE(stats->average_frequency);
u64 q, last_q = 0;
int i;
- pr_buf(&out, "count:\t\t%llu\n",
+ pr_buf(out, "count:\t\t%llu\n",
stats->count);
- pr_buf(&out, "rate:\t\t%llu/sec\n",
+ pr_buf(out, "rate:\t\t%llu/sec\n",
freq ? div64_u64(NSEC_PER_SEC, freq) : 0);
- pr_buf(&out, "frequency:\t");
- pr_time_units(&out, freq);
+ pr_buf(out, "frequency:\t");
+ pr_time_units(out, freq);
- pr_buf(&out, "\navg duration:\t");
- pr_time_units(&out, stats->average_duration);
+ pr_buf(out, "\navg duration:\t");
+ pr_time_units(out, stats->average_duration);
- pr_buf(&out, "\nmax duration:\t");
- pr_time_units(&out, stats->max_duration);
+ pr_buf(out, "\nmax duration:\t");
+ pr_time_units(out, stats->max_duration);
i = eytzinger0_first(NR_QUANTILES);
u = pick_time_units(stats->quantiles.entries[i].m);
- pr_buf(&out, "\nquantiles (%s):\t", u->name);
+ pr_buf(out, "\nquantiles (%s):\t", u->name);
eytzinger0_for_each(i, NR_QUANTILES) {
bool is_last = eytzinger0_next(i, NR_QUANTILES) == -1;
q = max(stats->quantiles.entries[i].m, last_q);
- pr_buf(&out, "%llu%s",
+ pr_buf(out, "%llu%s",
div_u64(q, u->nsecs),
is_last ? "\n" : " ");
last_q = q;
}
-
- return out.pos - buf;
}
void bch2_time_stats_exit(struct bch2_time_stats *stats)
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 1780a6831136..7b7c638d8904 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -390,7 +390,7 @@ static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 sta
__bch2_time_stats_update(stats, start, local_clock());
}
-size_t bch2_time_stats_print(struct bch2_time_stats *, char *, size_t);
+void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *);
void bch2_time_stats_exit(struct bch2_time_stats *);
void bch2_time_stats_init(struct bch2_time_stats *);