From dafff7e57508e87b473d91bac3e38181e2b829f2 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 23 Nov 2023 18:05:18 -0500 Subject: bcachefs: New bucket sector count helpers This introduces bch2_bucket_sectors() and bch2_bucket_sectors_dirty(), prep work for separately accounting stripe sectors. Signed-off-by: Kent Overstreet --- fs/bcachefs/alloc_background.c | 22 ++++++++---------- fs/bcachefs/alloc_background.h | 23 +++++++++++++++++-- fs/bcachefs/buckets.c | 52 +++++++++++++++++------------------------- fs/bcachefs/move.c | 2 +- fs/bcachefs/movinggc.c | 2 +- 5 files changed, 53 insertions(+), 48 deletions(-) (limited to 'fs') diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index dcfe26fdb500..3b1ddb8397b0 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -261,10 +261,8 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k, case BCH_DATA_free: case BCH_DATA_need_gc_gens: case BCH_DATA_need_discard: - bkey_fsck_err_on(a.v->dirty_sectors || - a.v->cached_sectors || - a.v->stripe, c, err, - alloc_key_empty_but_have_data, + bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe, + c, err, alloc_key_empty_but_have_data, "empty data type free but have data"); break; case BCH_DATA_sb: @@ -272,22 +270,21 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k, case BCH_DATA_btree: case BCH_DATA_user: case BCH_DATA_parity: - bkey_fsck_err_on(!a.v->dirty_sectors, c, err, - alloc_key_dirty_sectors_0, + bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v), + c, err, alloc_key_dirty_sectors_0, "data_type %s but dirty_sectors==0", bch2_data_types[a.v->data_type]); break; case BCH_DATA_cached: bkey_fsck_err_on(!a.v->cached_sectors || - a.v->dirty_sectors || - a.v->stripe, c, err, - alloc_key_cached_inconsistency, + bch2_bucket_sectors_dirty(*a.v) || + a.v->stripe, + c, err, alloc_key_cached_inconsistency, "data type inconsistency"); bkey_fsck_err_on(!a.v->io_time[READ] && c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, - c, err, - alloc_key_cached_but_read_time_zero, + c, err, alloc_key_cached_but_read_time_zero, "cached bucket with read_time == 0"); break; case BCH_DATA_stripe: @@ -790,8 +787,7 @@ int bch2_trans_mark_alloc(struct btree_trans *trans, new_a->data_type = alloc_data_type(*new_a, new_a->data_type); - if (new_a->dirty_sectors > old_a->dirty_sectors || - new_a->cached_sectors > old_a->cached_sectors) { + if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) { new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now)); SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true); diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h index 73faf99a222a..96671f166dd8 100644 --- a/fs/bcachefs/alloc_background.h +++ b/fs/bcachefs/alloc_background.h @@ -71,6 +71,24 @@ static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type) return data_type == BCH_DATA_stripe ? BCH_DATA_user : data_type; } +static inline unsigned bch2_bucket_sectors(struct bch_alloc_v4 a) +{ + return a.dirty_sectors + a.cached_sectors; +} + +static inline unsigned bch2_bucket_sectors_dirty(struct bch_alloc_v4 a) +{ + return a.dirty_sectors; +} + +static inline unsigned bch2_bucket_sectors_fragmented(struct bch_dev *ca, + struct bch_alloc_v4 a) +{ + int d = bch2_bucket_sectors_dirty(a); + + return d ? max(0, ca->mi.bucket_size - d) : 0; +} + static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a) { return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0; @@ -90,10 +108,11 @@ static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a, struct bch_dev *ca) { if (!data_type_movable(a.data_type) || - a.dirty_sectors >= ca->mi.bucket_size) + !bch2_bucket_sectors_fragmented(ca, a)) return 0; - return div_u64((u64) a.dirty_sectors * (1ULL << 31), ca->mi.bucket_size); + u64 d = bch2_bucket_sectors_dirty(a); + return div_u64(d * (1ULL << 31), ca->mi.bucket_size); } static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a) diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index ee3a2f5271c3..bb2ded894b7e 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -277,14 +277,6 @@ void bch2_dev_usage_init(struct bch_dev *ca) ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket; } -static inline int bucket_sectors_fragmented(struct bch_dev *ca, - struct bch_alloc_v4 a) -{ - return a.dirty_sectors - ? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors) - : 0; -} - static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, struct bch_alloc_v4 old, struct bch_alloc_v4 new, @@ -306,41 +298,40 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, u->d[old.data_type].buckets--; u->d[new.data_type].buckets++; - u->buckets_ec -= (int) !!old.stripe; - u->buckets_ec += (int) !!new.stripe; + u->buckets_ec -= !!old.stripe; + u->buckets_ec += !!new.stripe; - u->d[old.data_type].sectors -= old.dirty_sectors; - u->d[new.data_type].sectors += new.dirty_sectors; + u->d[old.data_type].sectors -= bch2_bucket_sectors_dirty(old); + u->d[new.data_type].sectors += bch2_bucket_sectors_dirty(new); u->d[BCH_DATA_cached].sectors += new.cached_sectors; u->d[BCH_DATA_cached].sectors -= old.cached_sectors; - u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old); - u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new); + u->d[old.data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, old); + u->d[new.data_type].fragmented += bch2_bucket_sectors_fragmented(ca, new); preempt_enable(); } +static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b) +{ + return (struct bch_alloc_v4) { + .gen = b.gen, + .data_type = b.data_type, + .dirty_sectors = b.dirty_sectors, + .cached_sectors = b.cached_sectors, + .stripe = b.stripe, + }; +} + static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca, struct bucket old, struct bucket new, u64 journal_seq, bool gc) { - struct bch_alloc_v4 old_a = { - .gen = old.gen, - .data_type = old.data_type, - .dirty_sectors = old.dirty_sectors, - .cached_sectors = old.cached_sectors, - .stripe = old.stripe, - }; - struct bch_alloc_v4 new_a = { - .gen = new.gen, - .data_type = new.data_type, - .dirty_sectors = new.dirty_sectors, - .cached_sectors = new.cached_sectors, - .stripe = new.stripe, - }; - - bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc); + bch2_dev_usage_update(c, ca, + bucket_m_to_alloc(old), + bucket_m_to_alloc(new), + journal_seq, gc); } static inline int __update_replicas(struct bch_fs *c, @@ -640,7 +631,6 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, goto err; } - g->data_type = data_type; g->dirty_sectors += sectors; new = *g; diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index d99742fbdb19..615f18a0311a 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -677,7 +677,7 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, } a = bch2_alloc_to_v4(k, &a_convert); - dirty_sectors = a->dirty_sectors; + dirty_sectors = bch2_bucket_sectors_dirty(*a); bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size; fragmentation = a->fragmentation_lru; diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index a84e79f79e5e..b8d0542222c3 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -91,7 +91,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, a = bch2_alloc_to_v4(k, &_a); b->k.gen = a->gen; - b->sectors = a->dirty_sectors; + b->sectors = bch2_bucket_sectors_dirty(*a); ret = data_type_movable(a->data_type) && a->fragmentation_lru && -- cgit v1.2.3