summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_locking.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-08-23 04:05:31 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:39 +0300
commit131dcd5af7e2f1b13c2c0baf3095d7e449eb9859 (patch)
tree6a69d5c203e63d9b1a637f38519c5b4486a881fb /fs/bcachefs/btree_locking.h
parentc240c3a94427346f27a7ff48f02cbe03f2c2ebd6 (diff)
downloadlinux-131dcd5af7e2f1b13c2c0baf3095d7e449eb9859.tar.xz
bcachefs: Track held write locks
The upcoming lock cycle detection code will need to know precisely which locks every btree_trans is holding, including write locks - this patch updates btree_node_locked_type to include write locks. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_locking.h')
-rw-r--r--fs/bcachefs/btree_locking.h26
1 files changed, 18 insertions, 8 deletions
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 3a9a4a0d61c4..a221c4fd1bf9 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -32,6 +32,7 @@ enum btree_node_locked_type {
BTREE_NODE_UNLOCKED = -1,
BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
+ BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write,
};
static inline int btree_node_locked_type(struct btree_path *path,
@@ -40,16 +41,19 @@ static inline int btree_node_locked_type(struct btree_path *path,
return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
}
-static inline bool btree_node_intent_locked(struct btree_path *path,
- unsigned level)
+static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
+{
+ return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
+}
+
+static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
{
- return btree_node_locked_type(path, level) == BTREE_NODE_INTENT_LOCKED;
+ return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
}
-static inline bool btree_node_read_locked(struct btree_path *path,
- unsigned level)
+static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
{
- return btree_node_locked_type(path, level) == BTREE_NODE_READ_LOCKED;
+ return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
}
static inline bool btree_node_locked(struct btree_path *path, unsigned level)
@@ -72,6 +76,7 @@ static inline void mark_btree_node_locked_noreset(struct btree_path *path,
static inline void mark_btree_node_unlocked(struct btree_path *path,
unsigned level)
{
+ EBUG_ON(btree_node_write_locked(path, level));
mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
}
@@ -179,6 +184,9 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat
EBUG_ON(path->l[b->c.level].b != b);
EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
+ EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
+
+ mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
trans_for_each_path_with_node(trans, b, linked)
linked->l[b->c.level].lock_seq += 2;
@@ -288,6 +296,8 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
if (unlikely(!six_trylock_write(&b->c.lock)))
__bch2_btree_node_lock_write(trans, b);
+
+ mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
}
/* relock: */
@@ -311,8 +321,8 @@ static inline bool bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
EBUG_ON(btree_node_locked(path, level) &&
- btree_node_locked_type(path, level) !=
- __btree_lock_want(path, level));
+ !btree_node_write_locked(path, level) &&
+ btree_node_locked_type(path, level) != __btree_lock_want(path, level));
return likely(btree_node_locked(path, level)) ||
__bch2_btree_node_relock(trans, path, level);