summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_locking.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-03-06 16:58:02 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:41 +0300
commit2ec254c098da677295c2487ae36e75a26d557222 (patch)
treed6c3ffceb92f16978c9e526803a2d8058fa8630b /fs/bcachefs/btree_locking.c
parent0d7009d7ca99ad9261a7cffcecd515108377a6ac (diff)
downloadlinux-2ec254c098da677295c2487ae36e75a26d557222.tar.xz
bcachefs: Ensure bch2_btree_node_lock_write_nofail() never fails
In order for bch2_btree_node_lock_write_nofail() to never produce a deadlock, we must ensure we're never holding read locks when using it. Fortunately, it's only used from code paths where any read locks may be safely dropped. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_locking.c')
-rw-r--r--fs/bcachefs/btree_locking.c34
1 files changed, 34 insertions, 0 deletions
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 11f83a936dc7..6793d7dd18d7 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -320,6 +320,40 @@ int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *p
return ret;
}
+void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_bkey_cached_common *b)
+{
+ struct btree_path *linked;
+ unsigned i;
+ int ret;
+
+ /*
+ * XXX BIG FAT NOTICE
+ *
+ * Drop all read locks before taking a write lock:
+ *
+ * This is a hack, because bch2_btree_node_lock_write_nofail() is a
+ * hack - but by dropping read locks first, this should never fail, and
+ * we only use this in code paths where whatever read locks we've
+ * already taken are no longer needed:
+ */
+
+ trans_for_each_path(trans, linked) {
+ if (!linked->nodes_locked)
+ continue;
+
+ for (i = 0; i < BTREE_MAX_DEPTH; i++)
+ if (btree_node_read_locked(linked, i)) {
+ btree_node_unlock(trans, linked, i);
+ btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
+ }
+ }
+
+ ret = __btree_node_lock_write(trans, path, b, true);
+ BUG_ON(ret);
+}
+
/* relock */
static inline bool btree_path_get_locks(struct btree_trans *trans,