summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_key_cache.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-03-25 06:37:33 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:57 +0300
commit331194a230f5fb266a64880e905c0364aa834964 (patch)
tree6a838a4d76a001463671e54da58b9a1f1660f066 /fs/bcachefs/btree_key_cache.h
parent2649b514b6cad329da0a4c8cafbd48c32bbc1b9d (diff)
downloadlinux-331194a230f5fb266a64880e905c0364aa834964.tar.xz
bcachefs: btree key cache locking improvements
The btree key cache mutex was becoming a significant bottleneck - it was mainly used to protect the lists of dirty, clean and freed cached keys. This patch eliminates the dirty and clean lists - instead, when we need to scan for keys to drop from the cache we iterate over the rhashtable, and thus we're able to remove most uses of that lock. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_key_cache.h')
-rw-r--r--fs/bcachefs/btree_key_cache.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h
index 2f8b5521718a..02715cd258ab 100644
--- a/fs/bcachefs/btree_key_cache.h
+++ b/fs/bcachefs/btree_key_cache.h
@@ -3,8 +3,8 @@
static inline size_t bch2_nr_btree_keys_need_flush(struct bch_fs *c)
{
- size_t nr_dirty = READ_ONCE(c->btree_key_cache.nr_dirty);
- size_t nr_keys = READ_ONCE(c->btree_key_cache.nr_keys);
+ size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty);
+ size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys);
size_t max_dirty = 1024 + nr_keys / 2;
return max_t(ssize_t, 0, nr_dirty - max_dirty);
@@ -12,8 +12,8 @@ static inline size_t bch2_nr_btree_keys_need_flush(struct bch_fs *c)
static inline bool bch2_btree_key_cache_must_wait(struct bch_fs *c)
{
- size_t nr_dirty = READ_ONCE(c->btree_key_cache.nr_dirty);
- size_t nr_keys = READ_ONCE(c->btree_key_cache.nr_keys);
+ size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty);
+ size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys);
size_t max_dirty = 4096 + (nr_keys * 3) / 4;
return nr_dirty > max_dirty &&