summaryrefslogtreecommitdiff
path: root/fs/bcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-11-14 02:36:33 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:49 +0300
commitf299d57350b2450c522dc7780400ce811f4847ec (patch)
tree4775bbb7ebd4eff2c44d8144ec85e121bf9e51f0 /fs/bcachefs
parent7bfbbd88024d70947761e482c856522b43a98d87 (diff)
downloadlinux-f299d57350b2450c522dc7780400ce811f4847ec.tar.xz
bcachefs: Refactor filesystem usage accounting
Various filesystem usage counters are kept in percpu counters, with one set per in flight journal buffer. Right now all the code that deals with it assumes that there's only two buffers/sets of counters, but the number of journal bufs is getting increased to 4 in the next patch - so refactor that code to not assume a constant. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r--fs/bcachefs/bcachefs.h2
-rw-r--r--fs/bcachefs/btree_gc.c1
-rw-r--r--fs/bcachefs/buckets.c28
-rw-r--r--fs/bcachefs/journal_types.h4
-rw-r--r--fs/bcachefs/replicas.c30
-rw-r--r--fs/bcachefs/super-io.c2
-rw-r--r--fs/bcachefs/super.c4
7 files changed, 42 insertions, 29 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 4fe3f9257752..6db04dc9d2d3 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -676,7 +676,7 @@ struct bch_fs {
seqcount_t usage_lock;
struct bch_fs_usage *usage_base;
- struct bch_fs_usage __percpu *usage[2];
+ struct bch_fs_usage __percpu *usage[JOURNAL_BUF_NR];
struct bch_fs_usage __percpu *usage_gc;
u64 __percpu *online_reserved;
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index df018a2e463e..5f5686466d7d 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -603,7 +603,6 @@ static int bch2_gc_done(struct bch_fs *c,
struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0);
struct genradix_iter src_iter = genradix_iter_init(&c->stripes[1], 0);
struct stripe *dst, *src;
- unsigned i;
c->ec_stripes_heap.used = 0;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 7cc31b0e02e4..4762c5465ef0 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -207,13 +207,13 @@ static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
{
return this_cpu_ptr(gc
? c->usage_gc
- : c->usage[journal_seq & 1]);
+ : c->usage[journal_seq & JOURNAL_BUF_MASK]);
}
u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
{
ssize_t offset = v - (u64 *) c->usage_base;
- unsigned seq;
+ unsigned i, seq;
u64 ret;
BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
@@ -221,9 +221,10 @@ u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
do {
seq = read_seqcount_begin(&c->usage_lock);
- ret = *v +
- percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
- percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
+ ret = *v;
+
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
} while (read_seqcount_retry(&c->usage_lock, seq));
return ret;
@@ -232,15 +233,20 @@ u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
{
struct bch_fs_usage_online *ret;
- unsigned seq, i, u64s;
+ unsigned seq, i, v, u64s = fs_usage_u64s(c);
+retry:
+ ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
+ if (unlikely(!ret))
+ return NULL;
percpu_down_read(&c->mark_lock);
- ret = kmalloc(sizeof(struct bch_fs_usage_online) +
- sizeof(u64) + c->replicas.nr, GFP_NOFS);
- if (unlikely(!ret)) {
+ v = fs_usage_u64s(c);
+ if (unlikely(u64s != v)) {
+ u64s = v;
percpu_up_read(&c->mark_lock);
- return NULL;
+ kfree(ret);
+ goto retry;
}
ret->online_reserved = percpu_u64_get(c->online_reserved);
@@ -248,7 +254,7 @@ struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
u64s = fs_usage_u64s(c);
do {
seq = read_seqcount_begin(&c->usage_lock);
- memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
+ memcpy(ret, c->usage_base, u64s * sizeof(u64));
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
} while (read_seqcount_retry(&c->usage_lock, seq));
diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h
index 6312a7f06d87..7e328ccc0a8f 100644
--- a/fs/bcachefs/journal_types.h
+++ b/fs/bcachefs/journal_types.h
@@ -11,6 +11,10 @@
struct journal_res;
+#define JOURNAL_BUF_BITS 1
+#define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS)
+#define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1)
+
/*
* We put two of these in struct journal; we used them for writes to the
* journal that are being staged or in flight.
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index f46aa1d70e35..85c97f67936a 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -275,7 +275,7 @@ static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
static int replicas_table_update(struct bch_fs *c,
struct bch_replicas_cpu *new_r)
{
- struct bch_fs_usage __percpu *new_usage[2];
+ struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
struct bch_fs_usage_online *new_scratch = NULL;
struct bch_fs_usage __percpu *new_gc = NULL;
struct bch_fs_usage *new_base = NULL;
@@ -283,7 +283,14 @@ static int replicas_table_update(struct bch_fs *c,
sizeof(u64) * new_r->nr;
unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
sizeof(u64) * new_r->nr;
- int ret = -ENOMEM;
+ int ret = 0;
+
+ memset(new_usage, 0, sizeof(new_usage));
+
+ for (i = 0; i < ARRAY_SIZE(new_usage); i++)
+ if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
+ sizeof(u64), GFP_NOIO)))
+ goto err;
memset(new_usage, 0, sizeof(new_usage));
@@ -295,10 +302,8 @@ static int replicas_table_update(struct bch_fs *c,
if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
!(new_scratch = kmalloc(scratch_bytes, GFP_NOIO)) ||
(c->usage_gc &&
- !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO)))) {
- bch_err(c, "error updating replicas table: memory allocation failure");
+ !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO))))
goto err;
- }
for (i = 0; i < ARRAY_SIZE(new_usage); i++)
if (c->usage[i])
@@ -317,14 +322,17 @@ static int replicas_table_update(struct bch_fs *c,
swap(c->usage_scratch, new_scratch);
swap(c->usage_gc, new_gc);
swap(c->replicas, *new_r);
- ret = 0;
-err:
+out:
free_percpu(new_gc);
kfree(new_scratch);
free_percpu(new_usage[1]);
free_percpu(new_usage[0]);
kfree(new_base);
return ret;
+err:
+ bch_err(c, "error updating replicas table: memory allocation failure");
+ ret = -ENOMEM;
+ goto out;
}
static unsigned reserve_journal_replicas(struct bch_fs *c,
@@ -499,9 +507,7 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret)
struct bch_replicas_cpu n;
if (!__replicas_has_entry(&c->replicas_gc, e) &&
- (c->usage_base->replicas[i] ||
- percpu_u64_get(&c->usage[0]->replicas[i]) ||
- percpu_u64_get(&c->usage[1]->replicas[i]))) {
+ bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
n = cpu_replicas_add_entry(&c->replicas_gc, e);
if (!n.entries) {
ret = -ENOSPC;
@@ -606,9 +612,7 @@ retry:
cpu_replicas_entry(&c->replicas, i);
if (e->data_type == BCH_DATA_journal ||
- c->usage_base->replicas[i] ||
- percpu_u64_get(&c->usage[0]->replicas[i]) ||
- percpu_u64_get(&c->usage[1]->replicas[i]))
+ bch2_fs_usage_read_one(c, &c->usage_base->replicas[i]))
memcpy(cpu_replicas_entry(&new, new.nr++),
e, new.entry_size);
}
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 5406315340e1..e25ff75b97f3 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -998,7 +998,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
bch2_fs_usage_acc_to_base(c, i);
} else {
- bch2_fs_usage_acc_to_base(c, journal_seq & 1);
+ bch2_fs_usage_acc_to_base(c, journal_seq & JOURNAL_BUF_MASK);
}
{
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 98a875e08e9a..f46b4b05b4aa 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -483,8 +483,8 @@ static void __bch2_fs_free(struct bch_fs *c)
percpu_free_rwsem(&c->mark_lock);
free_percpu(c->online_reserved);
kfree(c->usage_scratch);
- free_percpu(c->usage[1]);
- free_percpu(c->usage[0]);
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ free_percpu(c->usage[i]);
kfree(c->usage_base);
if (c->btree_iters_bufs)