summaryrefslogtreecommitdiff
path: root/fs/bcachefs/buckets.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-02-11 03:26:55 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:29 +0300
commit5735608c14e791c10ebcb6a20fab1c8fa4cf3123 (patch)
tree034f074d10d4d68c2bb84719fe0fa0b5507b3ee0 /fs/bcachefs/buckets.c
parent5f43f99c6ef74f592c380b39069ee68dcfe3ee58 (diff)
downloadlinux-5735608c14e791c10ebcb6a20fab1c8fa4cf3123.tar.xz
bcachefs: Kill main in-memory bucket array
All code using the in-memory bucket array, excluding GC, has now been converted to use the alloc btree directly - so we can finally delete it. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/buckets.c')
-rw-r--r--fs/bcachefs/buckets.c80
1 files changed, 25 insertions, 55 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 60ad873da54f..572d56676c69 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -512,8 +512,6 @@ int bch2_mark_alloc(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a, new_a;
struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
- struct bucket *g;
- struct bucket_mark old_m, m;
int ret = 0;
if (bch2_trans_inconsistent_on(new.k->p.offset < ca->mi.first_bucket ||
@@ -587,21 +585,22 @@ int bch2_mark_alloc(struct btree_trans *trans,
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
- g = __bucket(ca, new.k->p.offset, gc);
-
- old_m = bucket_cmpxchg(g, m, ({
- m.gen = new_a.gen;
- m.data_type = new_a.data_type;
- m.dirty_sectors = new_a.dirty_sectors;
- m.cached_sectors = new_a.cached_sectors;
- m.stripe = new_a.stripe != 0;
- }));
-
- g->io_time[READ] = new_a.io_time[READ];
- g->io_time[WRITE] = new_a.io_time[WRITE];
- g->gen_valid = 1;
- g->stripe = new_a.stripe;
- g->stripe_redundancy = new_a.stripe_redundancy;
+ if (gc) {
+ struct bucket_mark old_m, m;
+ struct bucket *g = gc_bucket(ca, new.k->p.offset);
+
+ old_m = bucket_cmpxchg(g, m, ({
+ m.gen = new_a.gen;
+ m.data_type = new_a.data_type;
+ m.dirty_sectors = new_a.dirty_sectors;
+ m.cached_sectors = new_a.cached_sectors;
+ m.stripe = new_a.stripe != 0;
+ }));
+
+ g->gen_valid = 1;
+ g->stripe = new_a.stripe;
+ g->stripe_redundancy = new_a.stripe_redundancy;
+ }
percpu_up_read(&c->mark_lock);
/*
@@ -610,9 +609,9 @@ int bch2_mark_alloc(struct btree_trans *trans,
*/
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
- old_m.cached_sectors) {
+ old_a.cached_sectors) {
ret = update_cached_sectors(c, new, ca->dev_idx,
- -old_m.cached_sectors,
+ -old_a.cached_sectors,
journal_seq, gc);
if (ret) {
bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
@@ -620,7 +619,7 @@ int bch2_mark_alloc(struct btree_trans *trans,
}
trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
- old_m.cached_sectors);
+ old_a.cached_sectors);
}
return 0;
@@ -2039,16 +2038,6 @@ recalculate:
/* Startup/shutdown: */
-static void buckets_free_rcu(struct rcu_head *rcu)
-{
- struct bucket_array *buckets =
- container_of(rcu, struct bucket_array, rcu);
-
- kvpfree(buckets,
- sizeof(*buckets) +
- buckets->nbuckets * sizeof(struct bucket));
-}
-
static void bucket_gens_free_rcu(struct rcu_head *rcu)
{
struct bucket_gens *buckets =
@@ -2059,16 +2048,12 @@ static void bucket_gens_free_rcu(struct rcu_head *rcu)
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
{
- struct bucket_array *buckets = NULL, *old_buckets = NULL;
struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
unsigned long *buckets_nouse = NULL;
- bool resize = ca->buckets[0] != NULL;
+ bool resize = ca->bucket_gens != NULL;
int ret = -ENOMEM;
- if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
- nbuckets * sizeof(struct bucket),
- GFP_KERNEL|__GFP_ZERO)) ||
- !(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
+ if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
GFP_KERNEL|__GFP_ZERO)) ||
(c->opts.buckets_nouse &&
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
@@ -2076,8 +2061,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
GFP_KERNEL|__GFP_ZERO))))
goto err;
- buckets->first_bucket = ca->mi.first_bucket;
- buckets->nbuckets = nbuckets;
bucket_gens->first_bucket = ca->mi.first_bucket;
bucket_gens->nbuckets = nbuckets;
@@ -2089,15 +2072,11 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
percpu_down_write(&c->mark_lock);
}
- old_buckets = bucket_array(ca);
old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
if (resize) {
- size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
+ size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
- memcpy(buckets->b,
- old_buckets->b,
- n * sizeof(struct bucket));
memcpy(bucket_gens->b,
old_bucket_gens->b,
n);
@@ -2107,31 +2086,25 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
BITS_TO_LONGS(n) * sizeof(unsigned long));
}
- rcu_assign_pointer(ca->buckets[0], buckets);
rcu_assign_pointer(ca->bucket_gens, bucket_gens);
- buckets = old_buckets;
bucket_gens = old_bucket_gens;
swap(ca->buckets_nouse, buckets_nouse);
+ nbuckets = ca->mi.nbuckets;
+
if (resize) {
percpu_up_write(&c->mark_lock);
+ up_write(&ca->bucket_lock);
up_write(&c->gc_lock);
}
- nbuckets = ca->mi.nbuckets;
-
- if (resize)
- up_write(&ca->bucket_lock);
-
ret = 0;
err:
kvpfree(buckets_nouse,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
if (bucket_gens)
call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
- if (buckets)
- call_rcu(&buckets->rcu, buckets_free_rcu);
return ret;
}
@@ -2144,9 +2117,6 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
sizeof(struct bucket_gens) + ca->mi.nbuckets);
- kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
- sizeof(struct bucket_array) +
- ca->mi.nbuckets * sizeof(struct bucket));
for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
free_percpu(ca->usage[i]);