diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2022-04-01 08:29:59 +0300 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-23 00:09:30 +0300 |
commit | 822835ffeae411bbc8af104da9331fdf63a7bc12 (patch) | |
tree | 57d2b7fd487fc05d58464ea179d435ff4d149bc9 /fs/bcachefs/alloc_foreground.c | |
parent | 8058ea64c31c8700eaab48c38a143d1c3817f1de (diff) | |
download | linux-822835ffeae411bbc8af104da9331fdf63a7bc12.tar.xz |
bcachefs: Fold bucket_state in to BCH_DATA_TYPES()
Previously, we were missing accounting for buckets in need_gc_gens and
need_discard states. This matters because buckets in those states need
other btree operations done before they can be used, so they can't be
conuted when checking current number of free buckets against the
allocation watermark.
Also, we weren't directly counting free buckets at all. Now, data type 0
== BCH_DATA_free, and free buckets are counted; this means we can get
rid of the separate (poorly defined) count of unavailable buckets.
This is a new on disk format version, with upgrade and fsck required for
the accounting changes.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/alloc_foreground.c')
-rw-r--r-- | fs/bcachefs/alloc_foreground.c | 45 |
1 files changed, 29 insertions, 16 deletions
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 01abcf43341f..14162dd4d696 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -331,7 +331,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc } - if (a.data_type != BUCKET_free) { + if (a.data_type != BCH_DATA_free) { pr_buf(&buf, "non free bucket in freespace btree\n" " freespace key "); bch2_bkey_val_to_text(&buf, c, freespace_k); @@ -417,7 +417,7 @@ again: bch2_alloc_to_v4(k, &a); - if (bucket_state(a) != BUCKET_free) + if (a.data_type != BCH_DATA_free) continue; (*buckets_seen)++; @@ -517,27 +517,31 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct open_bucket *ob = NULL; - u64 avail = dev_buckets_available(ca, reserve); + struct bch_dev_usage usage; + u64 avail; u64 buckets_seen = 0; u64 skipped_open = 0; u64 skipped_need_journal_commit = 0; u64 skipped_nouse = 0; - - if (may_alloc_partial) { - ob = try_alloc_partial_bucket(c, ca, reserve); - if (ob) - return ob; - } + bool waiting = false; again: + usage = bch2_dev_usage_read(ca); + avail = __dev_buckets_available(ca, usage,reserve); + + if (usage.d[BCH_DATA_need_discard].buckets > avail) + bch2_do_discards(c); + + if (usage.d[BCH_DATA_need_gc_gens].buckets > avail) + bch2_do_gc_gens(c); + + if (should_invalidate_buckets(ca, usage)) + bch2_do_invalidates(c); + if (!avail) { - if (cl) { + if (cl && !waiting) { closure_wait(&c->freelist_wait, cl); - /* recheck after putting ourself on waitlist */ - avail = dev_buckets_available(ca, reserve); - if (avail) { - closure_wake_up(&c->freelist_wait); - goto again; - } + waiting = true; + goto again; } if (!c->blocked_allocate) @@ -547,6 +551,15 @@ again: goto err; } + if (waiting) + closure_wake_up(&c->freelist_wait); + + if (may_alloc_partial) { + ob = try_alloc_partial_bucket(c, ca, reserve); + if (ob) + return ob; + } + ob = likely(ca->mi.freespace_initialized) ? bch2_bucket_alloc_freelist(trans, ca, reserve, &buckets_seen, |