summaryrefslogtreecommitdiff
path: root/fs/bcachefs/movinggc.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-01-31 04:58:43 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:49 +0300
commit19a614d2e4beed7faf52ab95cb48ce38a3c38c04 (patch)
tree2b2a71e8adaae57def4d6cd93f3628cd8fbc8573 /fs/bcachefs/movinggc.c
parentadf6360b5d6071ea268fa6f5f03befba4909ffaa (diff)
downloadlinux-19a614d2e4beed7faf52ab95cb48ce38a3c38c04.tar.xz
bcachefs: Better inlining for bch2_alloc_to_v4_mut
This separates out the slowpath into a separate function, and inlines bch2_alloc_v4_mut into bch2_trans_start_alloc_update(), the main place it's called. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/movinggc.c')
-rw-r--r--fs/bcachefs/movinggc.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 9c55a88a2b08..a04e2330d0e6 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -117,7 +117,6 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
- struct bch_alloc_v4 a;
int ret;
bch2_trans_init(&trans, c, 0, 0);
@@ -126,21 +125,23 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
BTREE_ITER_PREFETCH, k, ret) {
struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
struct copygc_heap_entry e;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
- bch2_alloc_to_v4(k, &a);
+ a = bch2_alloc_to_v4(k, &a_convert);
- if (a.data_type != BCH_DATA_user ||
- a.dirty_sectors >= ca->mi.bucket_size ||
+ if (a->data_type != BCH_DATA_user ||
+ a->dirty_sectors >= ca->mi.bucket_size ||
bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
continue;
e = (struct copygc_heap_entry) {
.dev = iter.pos.inode,
- .gen = a.gen,
- .replicas = 1 + a.stripe_redundancy,
- .fragmentation = div_u64((u64) a.dirty_sectors * (1ULL << 31),
+ .gen = a->gen,
+ .replicas = 1 + a->stripe_redundancy,
+ .fragmentation = div_u64((u64) a->dirty_sectors * (1ULL << 31),
ca->mi.bucket_size),
- .sectors = a.dirty_sectors,
+ .sectors = a->dirty_sectors,
.offset = bucket_to_sector(ca, iter.pos.offset),
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);