summaryrefslogtreecommitdiff
path: root/fs/bcachefs/buckets.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-01-31 04:58:43 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:49 +0300
commit19a614d2e4beed7faf52ab95cb48ce38a3c38c04 (patch)
tree2b2a71e8adaae57def4d6cd93f3628cd8fbc8573 /fs/bcachefs/buckets.c
parentadf6360b5d6071ea268fa6f5f03befba4909ffaa (diff)
downloadlinux-19a614d2e4beed7faf52ab95cb48ce38a3c38c04.tar.xz
bcachefs: Better inlining for bch2_alloc_to_v4_mut
This separates out the slowpath into a separate function, and inlines bch2_alloc_v4_mut into bch2_trans_start_alloc_update(), the main place it's called. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/buckets.c')
-rw-r--r--fs/bcachefs/buckets.c64
1 files changed, 34 insertions, 30 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 6c9dcfd54be6..153987376b89 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -490,8 +490,10 @@ int bch2_mark_alloc(struct btree_trans *trans,
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
+ u64 bucket_journal_seq;
struct bch_fs *c = trans->c;
- struct bch_alloc_v4 old_a, new_a;
+ struct bch_alloc_v4 old_a_convert, new_a_convert;
+ const struct bch_alloc_v4 *old_a, *new_a;
struct bch_dev *ca;
int ret = 0;
@@ -508,36 +510,38 @@ int bch2_mark_alloc(struct btree_trans *trans,
ca = bch_dev_bkey_exists(c, new.k->p.inode);
- bch2_alloc_to_v4(old, &old_a);
- bch2_alloc_to_v4(new, &new_a);
+ old_a = bch2_alloc_to_v4(old, &old_a_convert);
+ new_a = bch2_alloc_to_v4(new, &new_a_convert);
+
+ bucket_journal_seq = new_a->journal_seq;
if ((flags & BTREE_TRIGGER_INSERT) &&
- data_type_is_empty(old_a.data_type) !=
- data_type_is_empty(new_a.data_type) &&
+ data_type_is_empty(old_a->data_type) !=
+ data_type_is_empty(new_a->data_type) &&
new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
- BUG_ON(!journal_seq);
+ EBUG_ON(!journal_seq);
/*
* If the btree updates referring to a bucket weren't flushed
* before the bucket became empty again, then the we don't have
* to wait on a journal flush before we can reuse the bucket:
*/
- new_a.journal_seq = data_type_is_empty(new_a.data_type) &&
+ v->journal_seq = bucket_journal_seq =
+ data_type_is_empty(new_a->data_type) &&
(journal_seq == v->journal_seq ||
bch2_journal_noflush_seq(&c->journal, v->journal_seq))
? 0 : journal_seq;
- v->journal_seq = new_a.journal_seq;
}
- if (!data_type_is_empty(old_a.data_type) &&
- data_type_is_empty(new_a.data_type) &&
- new_a.journal_seq) {
+ if (!data_type_is_empty(old_a->data_type) &&
+ data_type_is_empty(new_a->data_type) &&
+ bucket_journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new.k->p.inode, new.k->p.offset,
- new_a.journal_seq);
+ bucket_journal_seq);
if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
@@ -546,10 +550,10 @@ int bch2_mark_alloc(struct btree_trans *trans,
}
percpu_down_read(&c->mark_lock);
- if (!gc && new_a.gen != old_a.gen)
- *bucket_gen(ca, new.k->p.offset) = new_a.gen;
+ if (!gc && new_a->gen != old_a->gen)
+ *bucket_gen(ca, new.k->p.offset) = new_a->gen;
- bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
+ bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
if (gc) {
struct bucket *g = gc_bucket(ca, new.k->p.offset);
@@ -557,12 +561,12 @@ int bch2_mark_alloc(struct btree_trans *trans,
bucket_lock(g);
g->gen_valid = 1;
- g->gen = new_a.gen;
- g->data_type = new_a.data_type;
- g->stripe = new_a.stripe;
- g->stripe_redundancy = new_a.stripe_redundancy;
- g->dirty_sectors = new_a.dirty_sectors;
- g->cached_sectors = new_a.cached_sectors;
+ g->gen = new_a->gen;
+ g->data_type = new_a->data_type;
+ g->stripe = new_a->stripe;
+ g->stripe_redundancy = new_a->stripe_redundancy;
+ g->dirty_sectors = new_a->dirty_sectors;
+ g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g);
}
@@ -574,9 +578,9 @@ int bch2_mark_alloc(struct btree_trans *trans,
*/
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
- old_a.cached_sectors) {
+ old_a->cached_sectors) {
ret = update_cached_sectors(c, new, ca->dev_idx,
- -((s64) old_a.cached_sectors),
+ -((s64) old_a->cached_sectors),
journal_seq, gc);
if (ret) {
bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
@@ -585,20 +589,20 @@ int bch2_mark_alloc(struct btree_trans *trans,
}
}
- if (new_a.data_type == BCH_DATA_free &&
- (!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
+ if (new_a->data_type == BCH_DATA_free &&
+ (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait);
- if (new_a.data_type == BCH_DATA_need_discard &&
- (!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
+ if (new_a->data_type == BCH_DATA_need_discard &&
+ (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
bch2_do_discards(c);
- if (old_a.data_type != BCH_DATA_cached &&
- new_a.data_type == BCH_DATA_cached &&
+ if (old_a->data_type != BCH_DATA_cached &&
+ new_a->data_type == BCH_DATA_cached &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
bch2_do_invalidates(c);
- if (new_a.data_type == BCH_DATA_need_gc_gens)
+ if (new_a->data_type == BCH_DATA_need_gc_gens)
bch2_do_gc_gens(c);
return 0;