summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_key_cache.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-28 10:44:38 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:10:03 +0300
commitd95dd378c207ddec7551cce2e047e6067c3c27ab (patch)
tree559134f6a725b13cff6de65a4ddc859213247c98 /fs/bcachefs/btree_key_cache.c
parent3ebfc8fe95c5ec560d2d5c7e7bef62ebaa33a9c4 (diff)
downloadlinux-d95dd378c207ddec7551cce2e047e6067c3c27ab.tar.xz
bcachefs: allocate_dropping_locks()
Add two new helpers for allocating memory with btree locks held: The idea is to first try the allocation with GFP_NOWAIT|__GFP_NOWARN, then if that fails - unlock, retry with GFP_KERNEL, and then call trans_relock(). Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_key_cache.c')
-rw-r--r--fs/bcachefs/btree_key_cache.c13
1 files changed, 3 insertions, 10 deletions
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 37977b774d61..37beb75e2571 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -264,15 +264,8 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
return ck;
}
- ck = kmem_cache_zalloc(bch2_key_cache, GFP_NOWAIT|__GFP_NOWARN);
- if (likely(ck))
- goto init;
-
- bch2_trans_unlock(trans);
-
- ck = kmem_cache_zalloc(bch2_key_cache, GFP_KERNEL);
-
- ret = bch2_trans_relock(trans);
+ ck = allocate_dropping_locks(trans, ret,
+ kmem_cache_zalloc(bch2_key_cache, _gfp));
if (ret) {
kmem_cache_free(bch2_key_cache, ck);
return ERR_PTR(ret);
@@ -280,7 +273,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
if (!ck)
return NULL;
-init:
+
INIT_LIST_HEAD(&ck->list);
bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);