summaryrefslogtreecommitdiff
path: root/drivers/md/persistent-data/dm-btree.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/persistent-data/dm-btree.c')
-rw-r--r--drivers/md/persistent-data/dm-btree.c130
1 files changed, 71 insertions, 59 deletions
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 5ce64e93aae7..0c7a2e8d1846 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -13,9 +14,11 @@
#define DM_MSG_PREFIX "btree"
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Array manipulation
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void memcpy_disk(void *dest, const void *src, size_t len)
__dm_written_to_disk(src)
{
@@ -23,8 +26,8 @@ static void memcpy_disk(void *dest, const void *src, size_t len)
__dm_unbless_for_disk(src);
}
-static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
- unsigned index, void *elt)
+static void array_insert(void *base, size_t elt_size, unsigned int nr_elts,
+ unsigned int index, void *elt)
__dm_written_to_disk(elt)
{
if (index < nr_elts)
@@ -80,7 +83,7 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
vt->inc(vt->context, value_ptr(n, 0), nr_entries);
}
-static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
+static int insert_at(size_t value_size, struct btree_node *node, unsigned int index,
uint64_t key, void *value)
__dm_written_to_disk(value)
{
@@ -162,9 +165,9 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
struct frame {
struct dm_block *b;
struct btree_node *n;
- unsigned level;
- unsigned nr_children;
- unsigned current_child;
+ unsigned int level;
+ unsigned int nr_children;
+ unsigned int current_child;
};
struct del_stack {
@@ -193,7 +196,7 @@ static int unprocessed_frames(struct del_stack *s)
static void prefetch_children(struct del_stack *s, struct frame *f)
{
- unsigned i;
+ unsigned int i;
struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
for (i = 0; i < f->nr_children; i++)
@@ -205,7 +208,7 @@ static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
return f->level < (info->levels - 1);
}
-static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
+static int push_frame(struct del_stack *s, dm_block_t b, unsigned int level)
{
int r;
uint32_t ref_count;
@@ -371,7 +374,7 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value_le)
{
- unsigned level, last_level = info->levels - 1;
+ unsigned int level, last_level = info->levels - 1;
int r = -ENODATA;
uint64_t rkey;
__le64 internal_value_le;
@@ -467,7 +470,7 @@ out:
int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, uint64_t *rkey, void *value_le)
{
- unsigned level;
+ unsigned int level;
int r = -ENODATA;
__le64 internal_value_le;
struct ro_spine spine;
@@ -493,7 +496,6 @@ out:
exit_ro_spine(&spine);
return r;
}
-
EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
/*----------------------------------------------------------------*/
@@ -502,11 +504,12 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
* Copies entries from one region of a btree node to another. The regions
* must not overlap.
*/
-static void copy_entries(struct btree_node *dest, unsigned dest_offset,
- struct btree_node *src, unsigned src_offset,
- unsigned count)
+static void copy_entries(struct btree_node *dest, unsigned int dest_offset,
+ struct btree_node *src, unsigned int src_offset,
+ unsigned int count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
+
memcpy(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memcpy(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
@@ -515,11 +518,12 @@ static void copy_entries(struct btree_node *dest, unsigned dest_offset,
* Moves entries from one region fo a btree node to another. The regions
* may overlap.
*/
-static void move_entries(struct btree_node *dest, unsigned dest_offset,
- struct btree_node *src, unsigned src_offset,
- unsigned count)
+static void move_entries(struct btree_node *dest, unsigned int dest_offset,
+ struct btree_node *src, unsigned int src_offset,
+ unsigned int count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
+
memmove(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memmove(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
@@ -528,7 +532,7 @@ static void move_entries(struct btree_node *dest, unsigned dest_offset,
* Erases the first 'count' entries of a btree node, shifting following
* entries down into their place.
*/
-static void shift_down(struct btree_node *n, unsigned count)
+static void shift_down(struct btree_node *n, unsigned int count)
{
move_entries(n, 0, n, count, le32_to_cpu(n->header.nr_entries) - count);
}
@@ -537,7 +541,7 @@ static void shift_down(struct btree_node *n, unsigned count)
* Moves entries in a btree node up 'count' places, making space for
* new entries at the start of the node.
*/
-static void shift_up(struct btree_node *n, unsigned count)
+static void shift_up(struct btree_node *n, unsigned int count)
{
move_entries(n, count, n, 0, le32_to_cpu(n->header.nr_entries));
}
@@ -548,18 +552,20 @@ static void shift_up(struct btree_node *n, unsigned count)
*/
static void redistribute2(struct btree_node *left, struct btree_node *right)
{
- unsigned nr_left = le32_to_cpu(left->header.nr_entries);
- unsigned nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned total = nr_left + nr_right;
- unsigned target_left = total / 2;
- unsigned target_right = total - target_left;
+ unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
+ unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned int total = nr_left + nr_right;
+ unsigned int target_left = total / 2;
+ unsigned int target_right = total - target_left;
if (nr_left < target_left) {
- unsigned delta = target_left - nr_left;
+ unsigned int delta = target_left - nr_left;
+
copy_entries(left, nr_left, right, 0, delta);
shift_down(right, delta);
} else if (nr_left > target_left) {
- unsigned delta = nr_left - target_left;
+ unsigned int delta = nr_left - target_left;
+
if (nr_right)
shift_up(right, delta);
copy_entries(right, 0, left, target_left, delta);
@@ -576,10 +582,10 @@ static void redistribute2(struct btree_node *left, struct btree_node *right)
static void redistribute3(struct btree_node *left, struct btree_node *center,
struct btree_node *right)
{
- unsigned nr_left = le32_to_cpu(left->header.nr_entries);
- unsigned nr_center = le32_to_cpu(center->header.nr_entries);
- unsigned nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned total, target_left, target_center, target_right;
+ unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
+ unsigned int nr_center = le32_to_cpu(center->header.nr_entries);
+ unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned int total, target_left, target_center, target_right;
BUG_ON(nr_center);
@@ -589,19 +595,22 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
target_right = (total - target_left - target_center);
if (nr_left < target_left) {
- unsigned left_short = target_left - nr_left;
+ unsigned int left_short = target_left - nr_left;
+
copy_entries(left, nr_left, right, 0, left_short);
copy_entries(center, 0, right, left_short, target_center);
shift_down(right, nr_right - target_right);
} else if (nr_left < (target_left + target_center)) {
- unsigned left_to_center = nr_left - target_left;
+ unsigned int left_to_center = nr_left - target_left;
+
copy_entries(center, 0, left, target_left, left_to_center);
copy_entries(center, left_to_center, right, 0, target_center - left_to_center);
shift_down(right, nr_right - target_right);
} else {
- unsigned right_short = target_right - nr_right;
+ unsigned int right_short = target_right - nr_right;
+
shift_up(right, right_short);
copy_entries(right, 0, left, nr_left - right_short, right_short);
copy_entries(center, 0, left, target_left, nr_left - target_left);
@@ -642,7 +651,7 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
*
* Where A* is a shadow of A.
*/
-static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
+static int split_one_into_two(struct shadow_spine *s, unsigned int parent_index,
struct dm_btree_value_type *vt, uint64_t key)
{
int r;
@@ -696,7 +705,7 @@ static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
* to the new shadow.
*/
static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
- struct btree_node *parent, unsigned index,
+ struct btree_node *parent, unsigned int index,
struct dm_block **result)
{
int r, inc;
@@ -725,11 +734,11 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *
* Splits two nodes into three. This is more work, but results in fuller
* nodes, so saves metadata space.
*/
-static int split_two_into_three(struct shadow_spine *s, unsigned parent_index,
- struct dm_btree_value_type *vt, uint64_t key)
+static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
+ struct dm_btree_value_type *vt, uint64_t key)
{
int r;
- unsigned middle_index;
+ unsigned int middle_index;
struct dm_block *left, *middle, *right, *parent;
struct btree_node *ln, *rn, *mn, *pn;
__le64 location;
@@ -781,7 +790,7 @@ static int split_two_into_three(struct shadow_spine *s, unsigned parent_index,
if (shadow_current(s) != right)
unlock_block(s->info, right);
- return r;
+ return r;
}
@@ -830,7 +839,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
{
int r;
size_t size;
- unsigned nr_left, nr_right;
+ unsigned int nr_left, nr_right;
struct dm_block *left, *right, *new_parent;
struct btree_node *pn, *ln, *rn;
__le64 val;
@@ -904,7 +913,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
* Redistributes a node's entries with its left sibling.
*/
static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt,
- unsigned parent_index, uint64_t key)
+ unsigned int parent_index, uint64_t key)
{
int r;
struct dm_block *sib;
@@ -933,7 +942,7 @@ static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt
* Redistributes a nodes entries with its right sibling.
*/
static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *vt,
- unsigned parent_index, uint64_t key)
+ unsigned int parent_index, uint64_t key)
{
int r;
struct dm_block *sib;
@@ -961,10 +970,10 @@ static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *v
/*
* Returns the number of spare entries in a node.
*/
-static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned *space)
+static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned int *space)
{
int r;
- unsigned nr_entries;
+ unsigned int nr_entries;
struct dm_block *block;
struct btree_node *node;
@@ -990,17 +999,18 @@ static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigne
*/
#define SPACE_THRESHOLD 8
static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type *vt,
- unsigned parent_index, uint64_t key)
+ unsigned int parent_index, uint64_t key)
{
int r;
struct btree_node *parent = dm_block_data(shadow_parent(s));
- unsigned nr_parent = le32_to_cpu(parent->header.nr_entries);
- unsigned free_space;
+ unsigned int nr_parent = le32_to_cpu(parent->header.nr_entries);
+ unsigned int free_space;
int left_shared = 0, right_shared = 0;
/* Should we move entries to the left sibling? */
if (parent_index > 0) {
dm_block_t left_b = value64(parent, parent_index - 1);
+
r = dm_tm_block_is_shared(s->info->tm, left_b, &left_shared);
if (r)
return r;
@@ -1018,6 +1028,7 @@ static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type
/* Should we move entries to the right sibling? */
if (parent_index < (nr_parent - 1)) {
dm_block_t right_b = value64(parent, parent_index + 1);
+
r = dm_tm_block_is_shared(s->info->tm, right_b, &right_shared);
if (r)
return r;
@@ -1080,7 +1091,7 @@ static bool has_space_for_insert(struct btree_node *node, uint64_t key)
static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
struct dm_btree_value_type *vt,
- uint64_t key, unsigned *index)
+ uint64_t key, unsigned int *index)
{
int r, i = *index, top = 1;
struct btree_node *node;
@@ -1214,9 +1225,9 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
}
static bool need_insert(struct btree_node *node, uint64_t *keys,
- unsigned level, unsigned index)
+ unsigned int level, unsigned int index)
{
- return ((index >= le32_to_cpu(node->header.nr_entries)) ||
+ return ((index >= le32_to_cpu(node->header.nr_entries)) ||
(le64_to_cpu(node->keys[index]) != keys[level]));
}
@@ -1226,7 +1237,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
__dm_written_to_disk(value)
{
int r;
- unsigned level, index = -1, last_level = info->levels - 1;
+ unsigned int level, index = -1, last_level = info->levels - 1;
dm_block_t block = root;
struct shadow_spine spine;
struct btree_node *n;
@@ -1309,7 +1320,7 @@ bad_unblessed:
int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value, dm_block_t *new_root)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
return insert(info, root, keys, value, new_root, NULL);
}
@@ -1318,7 +1329,7 @@ EXPORT_SYMBOL_GPL(dm_btree_insert);
int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value, dm_block_t *new_root,
int *inserted)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
return insert(info, root, keys, value, new_root, inserted);
}
@@ -1341,8 +1352,8 @@ static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
i = le32_to_cpu(ro_node(s)->header.nr_entries);
if (!i)
return -ENODATA;
- else
- i--;
+
+ i--;
if (find_highest)
*result_key = le64_to_cpu(ro_node(s)->keys[i]);
@@ -1412,7 +1423,7 @@ static int walk_node(struct dm_btree_info *info, dm_block_t block,
void *context)
{
int r;
- unsigned i, nr;
+ unsigned int i, nr;
struct dm_block *node;
struct btree_node *n;
uint64_t keys;
@@ -1455,7 +1466,7 @@ EXPORT_SYMBOL_GPL(dm_btree_walk);
static void prefetch_values(struct dm_btree_cursor *c)
{
- unsigned i, nr;
+ unsigned int i, nr;
__le64 value_le;
struct cursor_node *n = c->nodes + c->depth - 1;
struct btree_node *bn = dm_block_data(n->b);
@@ -1585,6 +1596,7 @@ EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
int dm_btree_cursor_next(struct dm_btree_cursor *c)
{
int r = inc_or_backtrack(c);
+
if (!r) {
r = find_leaf(c);
if (r)