summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_iter.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-06-06 19:28:01 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:21 +0300
commitc43a6ef9a0747ef1094ff14e173513070ed91600 (patch)
treed207064b67f5222cd7ce6a87557ef96101d13007 /fs/bcachefs/btree_iter.h
parent5e82a9a1f4f82e273530b90d107638a5969d1de0 (diff)
downloadlinux-c43a6ef9a0747ef1094ff14e173513070ed91600.tar.xz
bcachefs: btree_bkey_cached_common
This is prep work for the btree key cache: btree iterators will point to either struct btree, or a new struct bkey_cached. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_iter.h')
-rw-r--r--fs/bcachefs/btree_iter.h27
1 files changed, 16 insertions, 11 deletions
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index dc15d1b831a8..171e729ed3ea 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -17,10 +17,23 @@ static inline struct btree *btree_iter_node(struct btree_iter *iter,
return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
}
+static inline bool btree_node_lock_seq_matches(const struct btree_iter *iter,
+ const struct btree *b, unsigned level)
+{
+ /*
+ * We don't compare the low bits of the lock sequence numbers because
+ * @iter might have taken a write lock on @b, and we don't want to skip
+ * the linked iterator if the sequence numbers were equal before taking
+ * that write lock. The lock sequence number is incremented by taking
+ * and releasing write locks and is even when unlocked:
+ */
+ return iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
+}
+
static inline struct btree *btree_node_parent(struct btree_iter *iter,
struct btree *b)
{
- return btree_iter_node(iter, b->level + 1);
+ return btree_iter_node(iter, b->c.level + 1);
}
static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
@@ -55,16 +68,8 @@ __trans_next_iter(struct btree_trans *trans, unsigned idx)
static inline bool __iter_has_node(const struct btree_iter *iter,
const struct btree *b)
{
- /*
- * We don't compare the low bits of the lock sequence numbers because
- * @iter might have taken a write lock on @b, and we don't want to skip
- * the linked iterator if the sequence numbers were equal before taking
- * that write lock. The lock sequence number is incremented by taking
- * and releasing write locks and is even when unlocked:
- */
-
- return iter->l[b->level].b == b &&
- iter->l[b->level].lock_seq >> 1 == b->lock.state.seq >> 1;
+ return iter->l[b->c.level].b == b &&
+ btree_node_lock_seq_matches(iter, b, b->c.level);
}
static inline struct btree_iter *