summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-08-22 00:20:42 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:39 +0300
commit2e27f6567b2662a2f7440a651e007ebc77cdcc7a (patch)
treeb9da2166440f9db2ec005c908e0eb0d7e8c7e582 /fs
parentd4263e563879f6dda86052881fbbc9e21e6e07f5 (diff)
downloadlinux-2e27f6567b2662a2f7440a651e007ebc77cdcc7a.tar.xz
bcachefs: Kill nodes_intent_locked
Previously, we used two different bit arrays for tracking held btree node locks. This patch switches to an array of two bit integers, which will let us track, in a future patch, when we hold a write lock. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/btree_iter.c2
-rw-r--r--fs/bcachefs/btree_locking.h26
-rw-r--r--fs/bcachefs/btree_types.h5
3 files changed, 9 insertions, 24 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 147250ce3af8..1dc243f63b2d 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1519,7 +1519,6 @@ static struct btree_path *btree_path_alloc(struct btree_trans *trans,
path->ref = 0;
path->intent_ref = 0;
path->nodes_locked = 0;
- path->nodes_intent_locked = 0;
btree_path_list_add(trans, pos, path);
trans->paths_sorted = false;
@@ -1574,7 +1573,6 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
path->level = level;
path->locks_want = locks_want;
path->nodes_locked = 0;
- path->nodes_intent_locked = 0;
for (i = 0; i < ARRAY_SIZE(path->l); i++)
path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
#ifdef CONFIG_BCACHEFS_DEBUG
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index f00abaaa0ab5..2253a15d61c9 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -37,14 +37,7 @@ enum btree_node_locked_type {
static inline int btree_node_locked_type(struct btree_path *path,
unsigned level)
{
- /*
- * We're relying on the fact that if nodes_intent_locked is set
- * nodes_locked must be set as well, so that we can compute without
- * branches:
- */
- return BTREE_NODE_UNLOCKED +
- ((path->nodes_locked >> level) & 1) +
- ((path->nodes_intent_locked >> level) & 1);
+ return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
}
static inline bool btree_node_intent_locked(struct btree_path *path,
@@ -65,20 +58,15 @@ static inline bool btree_node_locked(struct btree_path *path, unsigned level)
}
static inline void mark_btree_node_locked_noreset(struct btree_path *path,
- unsigned level,
- enum btree_node_locked_type type)
+ unsigned level,
+ enum btree_node_locked_type type)
{
/* relying on this to avoid a branch */
BUILD_BUG_ON(SIX_LOCK_read != 0);
BUILD_BUG_ON(SIX_LOCK_intent != 1);
- path->nodes_locked &= ~(1 << level);
- path->nodes_intent_locked &= ~(1 << level);
-
- if (type != BTREE_NODE_UNLOCKED) {
- path->nodes_locked |= 1 << level;
- path->nodes_intent_locked |= type << level;
- }
+ path->nodes_locked &= ~(3U << (level << 1));
+ path->nodes_locked |= (type + 1) << (level << 1);
}
static inline void mark_btree_node_unlocked(struct btree_path *path,
@@ -162,12 +150,12 @@ static inline void btree_node_unlock(struct btree_trans *trans,
static inline int btree_path_lowest_level_locked(struct btree_path *path)
{
- return __ffs(path->nodes_locked);
+ return __ffs(path->nodes_locked) >> 1;
}
static inline int btree_path_highest_level_locked(struct btree_path *path)
{
- return __fls(path->nodes_locked);
+ return __fls(path->nodes_locked) >> 1;
}
static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 73aaa1196faf..ce148c21fd3b 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -232,9 +232,8 @@ struct btree_path {
*/
bool should_be_locked:1;
unsigned level:3,
- locks_want:4,
- nodes_locked:4,
- nodes_intent_locked:4;
+ locks_want:4;
+ u8 nodes_locked;
struct btree_path_level {
struct btree *b;