summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_locking.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-08-26 21:55:00 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:40 +0300
commit546180874ade7225676bc0cd5ea4e2388e2374bc (patch)
tree7e20a1ce5a97ca2b956f38b99ef01862bf55fead /fs/bcachefs/btree_locking.h
parent534a591e4cf98d036e478b93de4a95ff126fb018 (diff)
downloadlinux-546180874ade7225676bc0cd5ea4e2388e2374bc.tar.xz
bcachefs: Mark write locks before taking lock
six locks are unfair: while a thread is blocked trying to take a write lock, new read locks will fail. The new deadlock cycle detector makes use of our existing lock tracing, so we need to tell it we're holding a write lock before we take the lock for it to work correctly. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_locking.h')
-rw-r--r--fs/bcachefs/btree_locking.h9
1 files changed, 7 insertions, 2 deletions
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 1e4c81d8084b..ab3161c1b1f4 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -271,10 +271,15 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
EBUG_ON(!btree_node_intent_locked(path, b->c.level));
+ /*
+ * six locks are unfair, and read locks block while a thread wants a
+ * write lock: thus, we need to tell the cycle detector we have a write
+ * lock _before_ taking the lock:
+ */
+ mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
+
if (unlikely(!six_trylock_write(&b->c.lock)))
__bch2_btree_node_lock_write(trans, b);
-
- mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
}
/* relock: */