summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-08-26 21:55:00 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:40 +0300
commit546180874ade7225676bc0cd5ea4e2388e2374bc (patch)
tree7e20a1ce5a97ca2b956f38b99ef01862bf55fead
parent534a591e4cf98d036e478b93de4a95ff126fb018 (diff)
downloadlinux-546180874ade7225676bc0cd5ea4e2388e2374bc.tar.xz
bcachefs: Mark write locks before taking lock
six locks are unfair: while a thread is blocked trying to take a write lock, new read locks will fail. The new deadlock cycle detector makes use of our existing lock tracing, so we need to tell it we're holding a write lock before we take the lock for it to work correctly. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/btree_locking.h9
-rw-r--r--fs/bcachefs/btree_update_leaf.c11
2 files changed, 16 insertions, 4 deletions
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 1e4c81d8084b..ab3161c1b1f4 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -271,10 +271,15 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
EBUG_ON(!btree_node_intent_locked(path, b->c.level));
+ /*
+ * six locks are unfair, and read locks block while a thread wants a
+ * write lock: thus, we need to tell the cycle detector we have a write
+ * lock _before_ taking the lock:
+ */
+ mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
+
if (unlikely(!six_trylock_write(&b->c.lock)))
__bch2_btree_node_lock_write(trans, b);
-
- mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
}
/* relock: */
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 732d09d45041..a8306b16956d 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -817,6 +817,13 @@ static inline int trans_lock_write(struct btree_trans *trans)
if (same_leaf_as_prev(trans, i))
continue;
+ /*
+ * six locks are unfair, and read locks block while a thread
+ * wants a write lock: thus, we need to tell the cycle detector
+ * we have a write lock _before_ taking the lock:
+ */
+ mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_write);
+
if (!six_trylock_write(&insert_l(i)->b->c.lock)) {
if (have_conflicting_read_lock(trans, i->path))
goto fail;
@@ -828,13 +835,13 @@ static inline int trans_lock_write(struct btree_trans *trans)
BUG_ON(ret);
}
- mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_write);
-
bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
}
return 0;
fail:
+ mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_intent);
+
while (--i >= trans->updates) {
if (same_leaf_as_prev(trans, i))
continue;