summaryrefslogtreecommitdiff
path: root/fs/bcachefs/bset.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-07-22 06:05:06 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:09 +0300
commit271a3d3a4b30dcd9fd274a923fb382f5f113d279 (patch)
treeb16887dfafd9b011a14ae842f1029ce1211c9aa5 /fs/bcachefs/bset.c
parent0fdf18047fd38e7b5cc6adba3a81704c88333e1c (diff)
downloadlinux-271a3d3a4b30dcd9fd274a923fb382f5f113d279.tar.xz
bcachefs: lift ordering restriction on 0 size extents
This lifts the restriction that 0 size extents must not overlap with other extents, which means we can now sort extents and non extents the same way, and will let us simplify a bunch of other stuff as well. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/bset.c')
-rw-r--r--fs/bcachefs/bset.c193
1 files changed, 93 insertions, 100 deletions
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index cf83911b3f5d..27fa3e230e6e 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -18,6 +18,9 @@
#include <linux/random.h>
#include <linux/prefetch.h>
+static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
+ struct btree *);
+
struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
{
unsigned offset = __btree_node_key_to_offset(b, k);
@@ -63,8 +66,8 @@ void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
_n = bkey_next(_k);
bch2_bkey_to_text(buf, sizeof(buf), &k);
- printk(KERN_ERR "block %u key %zi/%u: %s\n", set,
- _k->_data - i->_data, i->u64s, buf);
+ printk(KERN_ERR "block %u key %5u: %s\n", set,
+ __btree_node_key_to_offset(b, _k), buf);
if (_n == vstruct_last(i))
continue;
@@ -120,20 +123,6 @@ void bch2_dump_btree_node_iter(struct btree *b,
#ifdef CONFIG_BCACHEFS_DEBUG
-static bool keys_out_of_order(struct btree *b,
- const struct bkey_packed *prev,
- const struct bkey_packed *next,
- bool is_extents)
-{
- struct bkey nextu = bkey_unpack_key(b, next);
-
- return bkey_cmp_left_packed_byval(b, prev, bkey_start_pos(&nextu)) > 0 ||
- ((is_extents
- ? !bkey_deleted(next)
- : !bkey_deleted(prev)) &&
- !bkey_cmp_packed(b, prev, next));
-}
-
void __bch2_verify_btree_nr_keys(struct btree *b)
{
struct bset_tree *t;
@@ -150,16 +139,21 @@ void __bch2_verify_btree_nr_keys(struct btree *b)
BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
}
-static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
- struct btree *b,
- struct bkey_packed *k)
+static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
+ struct btree *b)
{
- const struct bkey_packed *n = bch2_btree_node_iter_peek_all(iter, b);
+ struct btree_node_iter iter = *_iter;
+ const struct bkey_packed *k, *n;
+
+ k = bch2_btree_node_iter_peek_all(&iter, b);
+ __bch2_btree_node_iter_advance(&iter, b);
+ n = bch2_btree_node_iter_peek_all(&iter, b);
bkey_unpack_key(b, k);
if (n &&
- keys_out_of_order(b, k, n, iter->is_extents)) {
+ __btree_node_iter_cmp(b, k, n) > 0) {
+ struct btree_node_iter_set *set;
struct bkey ku = bkey_unpack_key(b, k);
struct bkey nu = bkey_unpack_key(b, n);
char buf1[80], buf2[80];
@@ -167,12 +161,22 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
bch2_dump_btree_node(b);
bch2_bkey_to_text(buf1, sizeof(buf1), &ku);
bch2_bkey_to_text(buf2, sizeof(buf2), &nu);
- panic("out of order/overlapping:\n%s\n%s\n", buf1, buf2);
+ printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
+ buf1, buf2);
+ printk(KERN_ERR "iter was:");
+
+ btree_node_iter_for_each(_iter, set) {
+ struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
+ struct bset_tree *t = bch2_bkey_to_bset(b, k);
+ printk(" [%zi %zi]", t - b->set,
+ k->_data - bset(b, t)->_data);
+ }
+ panic("\n");
}
}
void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
- struct btree *b)
+ struct btree *b)
{
struct btree_node_iter_set *set, *s2;
struct bset_tree *t;
@@ -196,72 +200,72 @@ found:
/* Verify iterator is sorted: */
btree_node_iter_for_each(iter, set)
BUG_ON(set != iter->data &&
- btree_node_iter_cmp(iter, b, set[-1], set[0]) > 0);
+ btree_node_iter_cmp(b, set[-1], set[0]) > 0);
}
-void bch2_verify_key_order(struct btree *b,
- struct btree_node_iter *iter,
- struct bkey_packed *where)
+void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
+ struct bkey_packed *insert, unsigned clobber_u64s)
{
struct bset_tree *t = bch2_bkey_to_bset(b, where);
- struct bkey_packed *k, *prev;
- struct bkey uk, uw = bkey_unpack_key(b, where);
-
- k = bch2_bkey_prev_all(b, t, where);
- if (k &&
- keys_out_of_order(b, k, where, iter->is_extents)) {
- char buf1[100], buf2[100];
+ struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
+ struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
+#if 0
+ BUG_ON(prev &&
+ __btree_node_iter_cmp(b, prev, insert) > 0);
+#else
+ if (prev &&
+ __btree_node_iter_cmp(b, prev, insert) > 0) {
+ struct bkey k1 = bkey_unpack_key(b, prev);
+ struct bkey k2 = bkey_unpack_key(b, insert);
+ char buf1[100];
+ char buf2[100];
bch2_dump_btree_node(b);
- uk = bkey_unpack_key(b, k);
- bch2_bkey_to_text(buf1, sizeof(buf1), &uk);
- bch2_bkey_to_text(buf2, sizeof(buf2), &uw);
- panic("out of order with prev:\n%s\n%s\n",
- buf1, buf2);
+ bch2_bkey_to_text(buf1, sizeof(buf1), &k1);
+ bch2_bkey_to_text(buf2, sizeof(buf2), &k2);
+
+ panic("prev > insert:\n"
+ "prev key %5u %s\n"
+ "insert key %5u %s\n",
+ __btree_node_key_to_offset(b, prev), buf1,
+ __btree_node_key_to_offset(b, insert), buf2);
}
+#endif
+#if 0
+ BUG_ON(next != btree_bkey_last(b, t) &&
+ __btree_node_iter_cmp(b, insert, next) > 0);
+#else
+ if (next != btree_bkey_last(b, t) &&
+ __btree_node_iter_cmp(b, insert, next) > 0) {
+ struct bkey k1 = bkey_unpack_key(b, insert);
+ struct bkey k2 = bkey_unpack_key(b, next);
+ char buf1[100];
+ char buf2[100];
- k = bkey_next(where);
- BUG_ON(k != btree_bkey_last(b, t) &&
- keys_out_of_order(b, where, k, iter->is_extents));
-
- for_each_bset(b, t) {
- if (where >= btree_bkey_first(b, t) ||
- where < btree_bkey_last(b, t))
- continue;
-
- k = bch2_btree_node_iter_bset_pos(iter, b, t);
-
- if (k == btree_bkey_last(b, t))
- k = bch2_bkey_prev_all(b, t, k);
-
- while (bkey_cmp_left_packed_byval(b, k, bkey_start_pos(&uw)) > 0 &&
- (prev = bch2_bkey_prev_all(b, t, k)))
- k = prev;
-
- for (;
- k != btree_bkey_last(b, t);
- k = bkey_next(k)) {
- uk = bkey_unpack_key(b, k);
-
- if (iter->is_extents) {
- BUG_ON(!(bkey_cmp(uw.p, bkey_start_pos(&uk)) <= 0 ||
- bkey_cmp(uk.p, bkey_start_pos(&uw)) <= 0));
- } else {
- BUG_ON(!bkey_cmp(uw.p, uk.p) &&
- !bkey_deleted(&uk));
- }
-
- if (bkey_cmp(uw.p, bkey_start_pos(&uk)) <= 0)
- break;
- }
+ bch2_dump_btree_node(b);
+ bch2_bkey_to_text(buf1, sizeof(buf1), &k1);
+ bch2_bkey_to_text(buf2, sizeof(buf2), &k2);
+
+ panic("insert > next:\n"
+ "insert key %5u %s\n"
+ "next key %5u %s\n",
+ __btree_node_key_to_offset(b, insert), buf1,
+ __btree_node_key_to_offset(b, next), buf2);
}
+#endif
+}
+
+void bch2_verify_key_order(struct btree *b,
+ struct btree_node_iter *_iter,
+ struct bkey_packed *where)
+{
+ bch2_verify_insert_pos(b, where, where, where->u64s);
}
#else
static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
- struct btree *b,
- struct bkey_packed *k) {}
+ struct btree *b) {}
#endif
@@ -1229,6 +1233,7 @@ void bch2_bset_insert(struct btree *b,
struct bkey_packed packed, *src = bkey_to_packed(insert);
bch2_bset_verify_rw_aux_tree(b, t);
+ bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
if (bch2_bkey_pack_key(&packed, &insert->k, f))
src = &packed;
@@ -1255,7 +1260,6 @@ void bch2_bset_insert(struct btree *b,
bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
- bch2_verify_key_order(b, iter, where);
bch2_verify_btree_nr_keys(b);
}
@@ -1461,7 +1465,7 @@ void bch2_btree_node_iter_push(struct btree_node_iter *iter,
noinline __flatten __attribute__((cold))
static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
struct btree *b, struct bpos search,
- bool strictly_greater, bool is_extents)
+ bool strictly_greater)
{
struct bset_tree *t;
@@ -1518,7 +1522,7 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
*/
void bch2_btree_node_iter_init(struct btree_node_iter *iter,
struct btree *b, struct bpos search,
- bool strictly_greater, bool is_extents)
+ bool strictly_greater)
{
struct bset_tree *t;
struct bkey_packed p, *packed_search = NULL;
@@ -1526,7 +1530,7 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
EBUG_ON(bkey_cmp(search, b->data->min_key) < 0);
bset_aux_tree_verify(b);
- __bch2_btree_node_iter_init(iter, is_extents);
+ memset(iter, 0, sizeof(*iter));
switch (bch2_bkey_pack_pos_lossy(&p, search, b)) {
case BKEY_PACK_POS_EXACT:
@@ -1537,7 +1541,7 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
break;
case BKEY_PACK_POS_FAIL:
btree_node_iter_init_pack_failed(iter, b, search,
- strictly_greater, is_extents);
+ strictly_greater);
return;
}
@@ -1552,12 +1556,11 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
}
void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
- struct btree *b,
- bool is_extents)
+ struct btree *b)
{
struct bset_tree *t;
- __bch2_btree_node_iter_init(iter, is_extents);
+ memset(iter, 0, sizeof(*iter));
for_each_bset(b, t)
__bch2_btree_node_iter_push(iter, b,
@@ -1585,7 +1588,7 @@ static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
{
bool ret;
- if ((ret = (btree_node_iter_cmp(iter, b,
+ if ((ret = (btree_node_iter_cmp(b,
iter->data[first],
iter->data[first + 1]) > 0)))
swap(iter->data[first], iter->data[first + 1]);
@@ -1640,23 +1643,14 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
btree_node_iter_sort_two(iter, b, 1);
}
-/**
- * bch_btree_node_iter_advance - advance @iter by one key
- *
- * Doesn't do debugchecks - for cases where (insert_fixup_extent()) a bset might
- * momentarily have out of order extents.
- */
void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
struct btree *b)
{
#ifdef CONFIG_BCACHEFS_DEBUG
- struct bkey_packed *k = bch2_btree_node_iter_peek_all(iter, b);
-
- __bch2_btree_node_iter_advance(iter, b);
- bch2_btree_node_iter_next_check(iter, b, k);
-#else
- __bch2_btree_node_iter_advance(iter, b);
+ bch2_btree_node_iter_verify(iter, b);
+ bch2_btree_node_iter_next_check(iter, b);
#endif
+ __bch2_btree_node_iter_advance(iter, b);
}
static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
@@ -1689,8 +1683,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *ite
bch2_btree_node_iter_bset_pos(iter, b, t),
min_key_type);
if (k &&
- (!prev || __btree_node_iter_cmp(iter->is_extents, b,
- k, prev) > 0)) {
+ (!prev || __btree_node_iter_cmp(b, k, prev) > 0)) {
prev = k;
end = t->end_offset;
}
@@ -1723,11 +1716,11 @@ out:
struct btree_node_iter iter2 = *iter;
if (prev)
- bch2_btree_node_iter_advance(&iter2, b);
+ __bch2_btree_node_iter_advance(&iter2, b);
while ((k = bch2_btree_node_iter_peek_all(&iter2, b)) != orig_pos) {
BUG_ON(k->type >= min_key_type);
- bch2_btree_node_iter_advance(&iter2, b);
+ __bch2_btree_node_iter_advance(&iter2, b);
}
}