summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_cache.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-04-04 03:36:32 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:30 +0300
commit7c7e071d90ac278e462640570d739dd165d3acd0 (patch)
treecf32fcc59e4b382afe0776565de25bffebea0021 /fs/bcachefs/btree_cache.c
parent4254f5bf6e3d62ab7108a556d5afc54188e17041 (diff)
downloadlinux-7c7e071d90ac278e462640570d739dd165d3acd0.tar.xz
bcachefs: Don't normalize to pages in btree cache shrinker
This behavior dates from the early, early days of bcache, and upon further delving appears to not make any sense. The shrinker only works in terms of 'objects' of unknown size; normalizing to pages only had the effect of changing the batch size, which we could do directly - if we wanted; we probably don't. Normalizing to pages meant our batch size was very small, which seems to have been keeping us from doing as much shrinking as we should be under heavy memory pressure; this patch appears to alleviate some OOMs we've been seeing. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/btree_cache.c')
-rw-r--r--fs/bcachefs/btree_cache.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 92a8cc704cab..0e3db9ee65d2 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -280,7 +280,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
struct btree_cache *bc = &c->btree_cache;
struct btree *b, *t;
unsigned long nr = sc->nr_to_scan;
- unsigned long can_free;
+ unsigned long can_free = 0;
unsigned long touched = 0;
unsigned long freed = 0;
unsigned i, flags;
@@ -304,7 +304,6 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
* succeed, so that inserting keys into the btree can always succeed and
* IO can always make forward progress:
*/
- nr /= btree_pages(c);
can_free = btree_cache_can_free(bc);
nr = min_t(unsigned long, nr, can_free);
@@ -374,13 +373,10 @@ touched:
mutex_unlock(&bc->lock);
out:
- ret = (unsigned long) freed * btree_pages(c);
+ ret = freed;
memalloc_nofs_restore(flags);
out_norestore:
- trace_btree_cache_scan(sc->nr_to_scan,
- sc->nr_to_scan / btree_pages(c),
- btree_cache_can_free(bc),
- ret);
+ trace_btree_cache_scan(sc->nr_to_scan, can_free, ret);
return ret;
}
@@ -394,7 +390,7 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
if (bch2_btree_shrinker_disabled)
return 0;
- return btree_cache_can_free(bc) * btree_pages(c);
+ return btree_cache_can_free(bc);
}
void bch2_fs_btree_cache_exit(struct bch_fs *c)
@@ -481,7 +477,6 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
bc->shrink.count_objects = bch2_btree_cache_count;
bc->shrink.scan_objects = bch2_btree_cache_scan;
bc->shrink.seeks = 4;
- bc->shrink.batch = btree_pages(c) * 2;
ret = register_shrinker(&bc->shrink, "%s/btree_cache", c->name);
out:
pr_verbose_init(c->opts, "ret %i", ret);