summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_iter.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-11-24 11:12:22 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:47 +0300
commite88a75ebe86c1df42f0ca9ab6e8fa50db26e7cef (patch)
tree3b8608b0ae6e06d405bf6ef63e098416c68830db /fs/bcachefs/btree_iter.c
parente15382125948523cd5c887c5fe4fa4303e9a9dc1 (diff)
downloadlinux-e88a75ebe86c1df42f0ca9ab6e8fa50db26e7cef.tar.xz
bcachefs: New bpos_cmp(), bkey_cmp() replacements
This patch introduces - bpos_eq() - bpos_lt() - bpos_le() - bpos_gt() - bpos_ge() and equivalent replacements for bkey_cmp(). Looking at the generated assembly these could probably be improved further, but we already see a significant code size improvement with this patch. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r--fs/bcachefs/btree_iter.c86
1 files changed, 43 insertions, 43 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 238ba10d34e4..8a18b55cab26 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -93,7 +93,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
struct bpos pos = iter->pos;
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
- bkey_cmp(pos, POS_MAX))
+ !bkey_eq(pos, POS_MAX))
pos = bkey_successor(iter, pos);
return pos;
}
@@ -101,13 +101,13 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
static inline bool btree_path_pos_before_node(struct btree_path *path,
struct btree *b)
{
- return bpos_cmp(path->pos, b->data->min_key) < 0;
+ return bpos_lt(path->pos, b->data->min_key);
}
static inline bool btree_path_pos_after_node(struct btree_path *path,
struct btree *b)
{
- return bpos_cmp(b->key.k.p, path->pos) < 0;
+ return bpos_gt(path->pos, b->key.k.p);
}
static inline bool btree_path_pos_in_node(struct btree_path *path,
@@ -133,7 +133,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans,
ck = (void *) path->l[0].b;
BUG_ON(ck->key.btree_id != path->btree_id ||
- bkey_cmp(ck->key.pos, path->pos));
+ !bkey_eq(ck->key.pos, path->pos));
if (!locked)
btree_node_unlock(trans, path, 0);
@@ -278,8 +278,8 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
iter->pos.snapshot != iter->snapshot);
- BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
- bkey_cmp(iter->pos, iter->k.p) > 0);
+ BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
+ bkey_gt(iter->pos, iter->k.p));
}
static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
@@ -313,7 +313,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
if (ret)
goto out;
- if (!bkey_cmp(prev.k->p, k.k->p) &&
+ if (bkey_eq(prev.k->p, k.k->p) &&
bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
prev.k->p.snapshot) > 0) {
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
@@ -355,11 +355,11 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
continue;
if (!key_cache) {
- if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
- bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
+ if (bkey_ge(pos, path->l[0].b->data->min_key) &&
+ bkey_le(pos, path->l[0].b->key.k.p))
return;
} else {
- if (!bkey_cmp(pos, path->pos))
+ if (bkey_eq(pos, path->pos))
return;
}
}
@@ -1571,16 +1571,16 @@ struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *
_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
- EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
+ EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
- if (!k.k || bpos_cmp(path->pos, k.k->p))
+ if (!k.k || !bpos_eq(path->pos, k.k->p))
goto hole;
} else {
struct bkey_cached *ck = (void *) path->l[0].b;
EBUG_ON(ck &&
(path->btree_id != ck->key.btree_id ||
- bkey_cmp(path->pos, ck->key.pos)));
+ !bkey_eq(path->pos, ck->key.pos)));
EBUG_ON(!ck || !ck->valid);
*u = ck->k->k;
@@ -1638,7 +1638,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
if (!b)
goto out;
- BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
+ BUG_ON(bpos_lt(b->key.k.p, iter->pos));
bkey_init(&iter->k);
iter->k.p = iter->pos = b->key.k.p;
@@ -1689,7 +1689,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
b = btree_path_node(path, path->level + 1);
- if (!bpos_cmp(iter->pos, b->key.k.p)) {
+ if (bpos_eq(iter->pos, b->key.k.p)) {
__btree_path_set_level_up(trans, path, path->level++);
} else {
/*
@@ -1732,9 +1732,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
{
if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
struct bpos pos = iter->k.p;
- bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
- ? bpos_cmp(pos, SPOS_MAX)
- : bkey_cmp(pos, SPOS_MAX)) != 0;
+ bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_eq(pos, SPOS_MAX)
+ : bkey_eq(pos, SPOS_MAX));
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_successor(iter, pos);
@@ -1752,9 +1752,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
- bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
- ? bpos_cmp(pos, POS_MIN)
- : bkey_cmp(pos, POS_MIN)) != 0;
+ bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_eq(pos, POS_MIN)
+ : bkey_eq(pos, POS_MIN));
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_predecessor(iter, pos);
@@ -1773,11 +1773,11 @@ struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
continue;
if (i->btree_id > iter->btree_id)
break;
- if (bpos_cmp(i->k->k.p, iter->path->pos) < 0)
+ if (bpos_lt(i->k->k.p, iter->path->pos))
continue;
if (i->key_cache_already_flushed)
continue;
- if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
+ if (!ret || bpos_lt(i->k->k.p, ret->k.p))
ret = i->k;
}
@@ -1797,7 +1797,7 @@ struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
{
struct bkey_i *k;
- if (bpos_cmp(iter->path->pos, iter->journal_pos) < 0)
+ if (bpos_lt(iter->path->pos, iter->journal_pos))
iter->journal_idx = 0;
k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
@@ -1936,8 +1936,8 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
next_update = btree_trans_peek_updates(iter);
if (next_update &&
- bpos_cmp(next_update->k.p,
- k.k ? k.k->p : l->b->key.k.p) <= 0) {
+ bpos_le(next_update->k.p,
+ k.k ? k.k->p : l->b->key.k.p)) {
iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
}
@@ -1950,7 +1950,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
* whiteout, with a real key at the same position, since
* in the btree deleted keys sort before non deleted.
*/
- search_key = bpos_cmp(search_key, k.k->p)
+ search_key = !bpos_eq(search_key, k.k->p)
? k.k->p
: bpos_successor(k.k->p);
continue;
@@ -1958,7 +1958,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
if (likely(k.k)) {
break;
- } else if (likely(bpos_cmp(l->b->key.k.p, SPOS_MAX))) {
+ } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
/* Advance to next leaf node: */
search_key = bpos_successor(l->b->key.k.p);
} else {
@@ -2008,19 +2008,19 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
*/
if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
iter_pos = k.k->p;
- else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ else if (bkey_gt(bkey_start_pos(k.k), iter->pos))
iter_pos = bkey_start_pos(k.k);
else
iter_pos = iter->pos;
- if (bkey_cmp(iter_pos, end) > 0) {
+ if (bkey_gt(iter_pos, end)) {
bch2_btree_iter_set_pos(iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
if (iter->update_path &&
- bkey_cmp(iter->update_path->pos, k.k->p)) {
+ !bkey_eq(iter->update_path->pos, k.k->p)) {
bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
iter->update_path = NULL;
@@ -2143,7 +2143,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
/* Check if we should go up to the parent node: */
if (!k.k ||
(iter->advanced &&
- !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
+ bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
iter->pos = path_l(iter->path)->b->key.k.p;
btree_path_set_level_up(trans, iter->path);
iter->advanced = false;
@@ -2159,7 +2159,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
if (iter->path->level != iter->min_depth &&
(iter->advanced ||
!k.k ||
- bpos_cmp(iter->pos, k.k->p))) {
+ !bpos_eq(iter->pos, k.k->p))) {
btree_path_set_level_down(trans, iter->path, iter->min_depth);
iter->pos = bpos_successor(iter->pos);
iter->advanced = false;
@@ -2170,7 +2170,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
if (iter->path->level == iter->min_depth &&
iter->advanced &&
k.k &&
- !bpos_cmp(iter->pos, k.k->p)) {
+ bpos_eq(iter->pos, k.k->p)) {
iter->pos = bpos_successor(iter->pos);
iter->advanced = false;
continue;
@@ -2178,7 +2178,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
if (iter->advanced &&
iter->path->level == iter->min_depth &&
- bpos_cmp(k.k->p, iter->pos))
+ !bpos_eq(k.k->p, iter->pos))
iter->advanced = false;
BUG_ON(iter->advanced);
@@ -2248,8 +2248,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
&iter->path->l[0], &iter->k);
if (!k.k ||
((iter->flags & BTREE_ITER_IS_EXTENTS)
- ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
- : bpos_cmp(k.k->p, search_key) > 0))
+ ? bpos_ge(bkey_start_pos(k.k), search_key)
+ : bpos_gt(k.k->p, search_key)))
k = btree_path_level_prev(trans, iter->path,
&iter->path->l[0], &iter->k);
@@ -2263,7 +2263,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
* longer at the same _key_ (not pos), return
* that candidate
*/
- if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
+ if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
bch2_path_put_nokeep(trans, iter->path,
iter->flags & BTREE_ITER_INTENT);
iter->path = saved_path;
@@ -2298,7 +2298,7 @@ got_key:
}
break;
- } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
+ } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */
search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
} else {
@@ -2309,10 +2309,10 @@ got_key:
}
}
- EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
+ EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
/* Extents can straddle iter->pos: */
- if (bkey_cmp(k.k->p, iter->pos) < 0)
+ if (bkey_lt(k.k->p, iter->pos))
iter->pos = k.k->p;
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
@@ -2377,7 +2377,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
struct bkey_i *next_update;
if ((next_update = btree_trans_peek_updates(iter)) &&
- !bpos_cmp(next_update->k.p, iter->pos)) {
+ bpos_eq(next_update->k.p, iter->pos)) {
iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
goto out;
@@ -2433,7 +2433,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
next = k.k ? bkey_start_pos(k.k) : POS_MAX;
- if (bkey_cmp(iter->pos, next) < 0) {
+ if (bkey_lt(iter->pos, next)) {
bkey_init(&iter->k);
iter->k.p = iter->pos;