summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_iter.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-01-09 09:11:18 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:49 +0300
commitee2c6ea7760eceee3051ef2f2046d16dc5ab06ec (patch)
treee442537fa030ead75a5bfdf689fc8929eb09bc22 /fs/bcachefs/btree_iter.c
parent6c36318cc702f05d302fb98a99636e320392bdf1 (diff)
downloadlinux-ee2c6ea7760eceee3051ef2f2046d16dc5ab06ec.tar.xz
bcachefs: btree_iter->ip_allocated
In debug mode, we now track where btree iterators and paths are initialized/allocated - helpful in tracking down btree path overflows. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r--fs/bcachefs/btree_iter.c69
1 files changed, 47 insertions, 22 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 0a0d3aa05395..5034f8ebfb04 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -24,6 +24,15 @@ static inline void btree_path_list_remove(struct btree_trans *, struct btree_pat
static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
struct btree_path *);
+static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ return iter->ip_allocated;
+#else
+ return 0;
+#endif
+}
+
static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
/*
@@ -1221,7 +1230,8 @@ static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btr
__flatten
struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
- struct btree_path *path, bool intent)
+ struct btree_path *path, bool intent,
+ unsigned long ip)
{
__btree_path_put(path, intent);
path = btree_path_clone(trans, path, intent);
@@ -1231,15 +1241,15 @@ struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
struct btree_path * __must_check
__bch2_btree_path_set_pos(struct btree_trans *trans,
- struct btree_path *path, struct bpos new_pos,
- bool intent, int cmp)
+ struct btree_path *path, struct bpos new_pos,
+ bool intent, unsigned long ip, int cmp)
{
unsigned level = path->level;
EBUG_ON(trans->restarted);
EBUG_ON(!path->ref);
- path = bch2_btree_path_make_mut(trans, path, intent);
+ path = bch2_btree_path_make_mut(trans, path, intent, ip);
path->pos = new_pos;
trans->paths_sorted = false;
@@ -1524,7 +1534,7 @@ static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
struct btree_path *bch2_path_get(struct btree_trans *trans,
enum btree_id btree_id, struct bpos pos,
unsigned locks_want, unsigned level,
- unsigned flags)
+ unsigned flags, unsigned long ip)
{
struct btree_path *path, *path_pos = NULL;
bool cached = flags & BTREE_ITER_CACHED;
@@ -1552,7 +1562,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
path_pos->btree_id == btree_id &&
path_pos->level == level) {
__btree_path_get(path_pos, intent);
- path = bch2_btree_path_set_pos(trans, path_pos, pos, intent);
+ path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
} else {
path = btree_path_alloc(trans, path_pos);
path_pos = NULL;
@@ -1569,7 +1579,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
for (i = 0; i < ARRAY_SIZE(path->l); i++)
path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
#ifdef CONFIG_BCACHEFS_DEBUG
- path->ip_allocated = _RET_IP_;
+ path->ip_allocated = ip;
#endif
trans->paths_sorted = false;
}
@@ -1651,7 +1661,8 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
btree_iter_search_key(iter),
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
if (ret)
@@ -1686,7 +1697,8 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
iter->k.p = iter->pos = b->key.k.p;
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
out:
bch2_btree_iter_verify_entry_exit(iter);
@@ -1740,7 +1752,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
*/
path = iter->path =
bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
btree_path_set_level_down(trans, path, iter->min_depth);
@@ -1755,7 +1768,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
iter->k.p = iter->pos = b->key.k.p;
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
BUG_ON(iter->path->uptodate);
out:
@@ -1907,10 +1921,12 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
iter->flags & BTREE_ITER_INTENT, 0,
iter->flags|BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL);
+ BTREE_ITER_CACHED_NOFILL,
+ _THIS_IP_);
iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
iter->flags|BTREE_ITER_CACHED) ?:
@@ -1942,7 +1958,8 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
struct btree_path_level *l;
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
@@ -2092,7 +2109,8 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->update_path = bch2_btree_path_set_pos(trans,
iter->update_path, pos,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ _THIS_IP_);
ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
if (unlikely(ret)) {
k = bkey_s_c_err(ret);
@@ -2124,7 +2142,8 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->pos = iter_pos;
iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
out_no_locked:
@@ -2170,7 +2189,8 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
@@ -2283,7 +2303,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
@@ -2413,7 +2434,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
search_key = btree_iter_search_key(iter);
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
@@ -2678,7 +2700,8 @@ static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans,
unsigned flags)
{
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
- bch2_btree_iter_flags(trans, btree_id, flags));
+ bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
@@ -2687,7 +2710,8 @@ void bch2_trans_iter_init_outlined(struct btree_trans *trans,
unsigned flags)
{
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
- bch2_btree_iter_flags(trans, btree_id, flags));
+ bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
}
void bch2_trans_node_iter_init(struct btree_trans *trans,
@@ -2703,7 +2727,8 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
flags |= BTREE_ITER_ALL_SNAPSHOTS;
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
- __bch2_btree_iter_flags(trans, btree_id, flags));
+ __bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
iter->min_depth = depth;