summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_iter.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-03-11 20:31:52 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:27 +0300
commit85d8cf161f98993f544c0b2c614873caf7b9c14f (patch)
treebc6998212112729d53a479dfb3778d5316adf858 /fs/bcachefs/btree_iter.h
parentd4d24a6509548a6457f185fddd927df7d148464a (diff)
downloadlinux-85d8cf161f98993f544c0b2c614873caf7b9c14f.tar.xz
bcachefs: bch2_btree_iter_peek_upto()
In BTREE_ITER_FILTER_SNAPHOTS mode, we skip over keys in unrelated snapshots. When we hit the end of an inode, if the next inode(s) are in a different subvolume, we could potentially have to skip past many keys before finding a key we can return to the caller, so they can terminate the iteration. This adds a peek_upto() variant to solve this problem, to be used when we know the range we're searching within. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/btree_iter.h')
-rw-r--r--fs/bcachefs/btree_iter.h30
1 files changed, 28 insertions, 2 deletions
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 1e3172a2885a..27b3b82f7df3 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -245,9 +245,14 @@ int __must_check bch2_btree_iter_traverse(struct btree_iter *);
struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
struct btree *bch2_btree_iter_next_node(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
+static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
+{
+ return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
+}
+
struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
@@ -342,13 +347,26 @@ static inline int bkey_err(struct bkey_s_c k)
}
static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
- unsigned flags)
+ unsigned flags)
{
return flags & BTREE_ITER_SLOTS
? bch2_btree_iter_peek_slot(iter)
: bch2_btree_iter_peek(iter);
}
+static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
+ struct bpos end,
+ unsigned flags)
+{
+ if (!(flags & BTREE_ITER_SLOTS))
+ return bch2_btree_iter_peek_upto(iter, end);
+
+ if (bkey_cmp(iter->pos, end) > 0)
+ return bkey_s_c_null;
+
+ return bch2_btree_iter_peek_slot(iter);
+}
+
static inline int btree_trans_too_many_iters(struct btree_trans *trans)
{
return hweight64(trans->paths_allocated) > BTREE_ITER_MAX / 2
@@ -385,6 +403,14 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
!((_ret) = bkey_err(_k)) && (_k).k; \
bch2_btree_iter_advance(&(_iter)))
+#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
+ _start, _end, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
+
#define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
for (; \
(_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\