summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_iter.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-08-14 01:15:53 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:10:11 +0300
commitff5b741c25fb9546d876ca4c0c1d8720f6a2471c (patch)
tree0e2d24c85bb82e2b0c7bffdb5bffe2b841b78267 /fs/bcachefs/btree_iter.c
parente9679b4a0618b0b55d22ec555bc9c6b2dab39809 (diff)
downloadlinux-ff5b741c25fb9546d876ca4c0c1d8720f6a2471c.tar.xz
bcachefs: Zero btree_paths on allocation
This fixes a bug in the cycle detector, bch2_check_for_deadlock() - we have to make sure the node pointers in the btree paths array are set to something not-garbage before another thread may see them. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r--fs/bcachefs/btree_iter.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index ad95849845a5..d22412dc5b46 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -2898,12 +2898,14 @@ static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
#ifdef __KERNEL__
p = this_cpu_xchg(c->btree_paths_bufs->path, NULL);
#endif
- if (!p)
+ if (!p) {
p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
- /*
- * paths need to be zeroed, bch2_check_for_deadlock looks at paths in
- * other threads
- */
+ /*
+ * paths need to be zeroed, bch2_check_for_deadlock looks at
+ * paths in other threads
+ */
+ memset(p, 0, paths_bytes);
+ }
trans->paths = p; p += paths_bytes;
trans->updates = p; p += updates_bytes;