summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_cache.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-28 09:35:34 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:10:03 +0300
commite1d29c5fa1205c75dd96303f76d7aa4991555aaa (patch)
tree646b7b130460597769928e29509f16c3d8816e6d /fs/bcachefs/btree_cache.c
parent70d41c9e276c5e7b130d328a6ece92c9130a6572 (diff)
downloadlinux-e1d29c5fa1205c75dd96303f76d7aa4991555aaa.tar.xz
bcachefs: Ensure bch2_btree_node_get() calls relock() after unlock()
Fix a bug where bch2_btree_node_get() might call bch2_trans_unlock() (in fill) without calling bch2_trans_relock(); this is a bug when it's done in the core btree code. Also, twea bch2_btree_node_mem_alloc() to drop btree locks before doing a blocking memory allocation. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_cache.c')
-rw-r--r--fs/bcachefs/btree_cache.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 58ef9e7b4bdf..681a47b70a65 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -587,9 +587,10 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
goto got_node;
}
- b = __btree_node_mem_alloc(c, __GFP_NOWARN);
+ b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
if (!b) {
mutex_unlock(&bc->lock);
+ bch2_trans_unlock(trans);
b = __btree_node_mem_alloc(c, GFP_KERNEL);
if (!b)
goto err;
@@ -618,8 +619,11 @@ got_node:
mutex_unlock(&bc->lock);
- if (btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL))
- goto err;
+ if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) {
+ bch2_trans_unlock(trans);
+ if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN))
+ goto err;
+ }
mutex_lock(&bc->lock);
bc->used++;
@@ -812,6 +816,7 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
+ bool need_relock = false;
int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH);
@@ -825,6 +830,7 @@ retry:
*/
b = bch2_btree_node_fill(trans, path, k, path->btree_id,
level, lock_type, true);
+ need_relock = true;
/* We raced and found the btree node in the cache */
if (!b)
@@ -863,6 +869,7 @@ retry:
six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(trans);
+ need_relock = true;
bch2_btree_node_wait_on_read(b);
@@ -870,19 +877,19 @@ retry:
* should_be_locked is not set on this path yet, so we need to
* relock it specifically:
*/
- if (trans) {
- int ret = bch2_trans_relock(trans) ?:
- bch2_btree_path_relock_intent(trans, path);
- if (ret) {
- BUG_ON(!trans->restarted);
- return ERR_PTR(ret);
- }
- }
-
if (!six_relock_type(&b->c.lock, lock_type, seq))
goto retry;
}
+ if (unlikely(need_relock)) {
+ int ret = bch2_trans_relock(trans) ?:
+ bch2_btree_path_relock_intent(trans, path);
+ if (ret) {
+ six_unlock_type(&b->c.lock, lock_type);
+ return ERR_PTR(ret);
+ }
+ }
+
prefetch(b->aux_data);
for_each_bset(b, t) {