From c7d4f7eeb6da9408e9ba7475fe2624bdb4d837d0 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Wed, 25 Sep 2019 16:46:02 -0700 Subject: rbtree: avoid generating code twice for the cached versions (tools copy) As was already noted in rbtree.h, the logic to cache rb_first (or rb_last) can easily be implemented externally to the core rbtree api. This commit takes the changes applied to the include/linux/ and lib/ rbtree files in 9f973cb38088 ("lib/rbtree: avoid generating code twice for the cached versions"), and applies these to the tools/include/linux/ and tools/lib/ files as well to keep them synchronized. Link: http://lkml.kernel.org/r/20190703034812.53002-1-walken@google.com Signed-off-by: Michel Lespinasse Cc: David Howells Cc: Davidlohr Bueso Cc: Peter Zijlstra (Intel) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/include/linux/rbtree.h | 71 ++++++++++++++++++++++------------ tools/include/linux/rbtree_augmented.h | 31 ++++++--------- tools/lib/rbtree.c | 37 ++---------------- 3 files changed, 62 insertions(+), 77 deletions(-) (limited to 'tools') diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h index d83763a5327c..e03b1ea23e0e 100644 --- a/tools/include/linux/rbtree.h +++ b/tools/include/linux/rbtree.h @@ -31,25 +31,9 @@ struct rb_root { struct rb_node *rb_node; }; -/* - * Leftmost-cached rbtrees. - * - * We do not cache the rightmost node based on footprint - * size vs number of potential users that could benefit - * from O(1) rb_last(). Just not worth it, users that want - * this feature can always implement the logic explicitly. - * Furthermore, users that want to cache both pointers may - * find it a bit asymmetric, but that's ok. - */ -struct rb_root_cached { - struct rb_root rb_root; - struct rb_node *rb_leftmost; -}; - #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) #define RB_ROOT (struct rb_root) { NULL, } -#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) @@ -71,12 +55,6 @@ extern struct rb_node *rb_prev(const struct rb_node *); extern struct rb_node *rb_first(const struct rb_root *); extern struct rb_node *rb_last(const struct rb_root *); -extern void rb_insert_color_cached(struct rb_node *, - struct rb_root_cached *, bool); -extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); -/* Same as rb_first(), but O(1) */ -#define rb_first_cached(root) (root)->rb_leftmost - /* Postorder iteration - always visit the parent after its children */ extern struct rb_node *rb_first_postorder(const struct rb_root *); extern struct rb_node *rb_next_postorder(const struct rb_node *); @@ -84,8 +62,6 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *); /* Fast replacement of a single node without remove/rebalance/add/rebalance */ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); -extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, - struct rb_root_cached *root); static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link) @@ -129,4 +105,51 @@ static inline void rb_erase_init(struct rb_node *n, struct rb_root *root) rb_erase(n, root); RB_CLEAR_NODE(n); } + +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } + +/* Same as rb_first(), but O(1) */ +#define rb_first_cached(root) (root)->rb_leftmost + +static inline void rb_insert_color_cached(struct rb_node *node, + struct rb_root_cached *root, + bool leftmost) +{ + if (leftmost) + root->rb_leftmost = node; + rb_insert_color(node, &root->rb_root); +} + +static inline void rb_erase_cached(struct rb_node *node, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase(node, &root->rb_root); +} + +static inline void rb_replace_node_cached(struct rb_node *victim, + struct rb_node *new, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == victim) + root->rb_leftmost = new; + rb_replace_node(victim, new, &root->rb_root); +} + #endif /* __TOOLS_LINUX_PERF_RBTREE_H */ diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h index ddd01006ece5..467a3eefe1d2 100644 --- a/tools/include/linux/rbtree_augmented.h +++ b/tools/include/linux/rbtree_augmented.h @@ -32,17 +32,16 @@ struct rb_augment_callbacks { void (*rotate)(struct rb_node *old, struct rb_node *new); }; -extern void __rb_insert_augmented(struct rb_node *node, - struct rb_root *root, - bool newleft, struct rb_node **leftmost, +extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + /* * Fixup the rbtree and update the augmented information when rebalancing. * * On insertion, the user must update the augmented information on the path * leading to the inserted node, then call rb_link_node() as usual and - * rb_augment_inserted() instead of the usual rb_insert_color() call. - * If rb_augment_inserted() rebalances the rbtree, it will callback into + * rb_insert_augmented() instead of the usual rb_insert_color() call. + * If rb_insert_augmented() rebalances the rbtree, it will callback into * a user provided function to update the augmented information on the * affected subtrees. */ @@ -50,7 +49,7 @@ static inline void rb_insert_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) { - __rb_insert_augmented(node, root, false, NULL, augment->rotate); + __rb_insert_augmented(node, root, augment->rotate); } static inline void @@ -58,8 +57,9 @@ rb_insert_augmented_cached(struct rb_node *node, struct rb_root_cached *root, bool newleft, const struct rb_augment_callbacks *augment) { - __rb_insert_augmented(node, &root->rb_root, - newleft, &root->rb_leftmost, augment->rotate); + if (newleft) + root->rb_leftmost = node; + rb_insert_augmented(node, &root->rb_root, augment); } #define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ @@ -139,7 +139,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, static __always_inline struct rb_node * __rb_erase_augmented(struct rb_node *node, struct rb_root *root, - struct rb_node **leftmost, const struct rb_augment_callbacks *augment) { struct rb_node *child = node->rb_right; @@ -147,9 +146,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, struct rb_node *parent, *rebalance; unsigned long pc; - if (leftmost && node == *leftmost) - *leftmost = rb_next(node); - if (!tmp) { /* * Case 1: node to erase has no more than 1 child (easy!) @@ -249,8 +245,7 @@ static __always_inline void rb_erase_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) { - struct rb_node *rebalance = __rb_erase_augmented(node, root, - NULL, augment); + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); if (rebalance) __rb_erase_color(rebalance, root, augment->rotate); } @@ -259,11 +254,9 @@ static __always_inline void rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, const struct rb_augment_callbacks *augment) { - struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root, - &root->rb_leftmost, - augment); - if (rebalance) - __rb_erase_color(rebalance, &root->rb_root, augment->rotate); + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase_augmented(node, &root->rb_root, augment); } #endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */ diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c index 804f145e3113..2548ff8c4d9c 100644 --- a/tools/lib/rbtree.c +++ b/tools/lib/rbtree.c @@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, static __always_inline void __rb_insert(struct rb_node *node, struct rb_root *root, - bool newleft, struct rb_node **leftmost, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; - if (newleft) - *leftmost = node; - while (true) { /* * Loop invariant: node is red. @@ -436,34 +432,17 @@ static const struct rb_augment_callbacks dummy_callbacks = { void rb_insert_color(struct rb_node *node, struct rb_root *root) { - __rb_insert(node, root, false, NULL, dummy_rotate); + __rb_insert(node, root, dummy_rotate); } void rb_erase(struct rb_node *node, struct rb_root *root) { struct rb_node *rebalance; - rebalance = __rb_erase_augmented(node, root, - NULL, &dummy_callbacks); + rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); if (rebalance) ____rb_erase_color(rebalance, root, dummy_rotate); } -void rb_insert_color_cached(struct rb_node *node, - struct rb_root_cached *root, bool leftmost) -{ - __rb_insert(node, &root->rb_root, leftmost, - &root->rb_leftmost, dummy_rotate); -} - -void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) -{ - struct rb_node *rebalance; - rebalance = __rb_erase_augmented(node, &root->rb_root, - &root->rb_leftmost, &dummy_callbacks); - if (rebalance) - ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate); -} - /* * Augmented rbtree manipulation functions. * @@ -472,10 +451,9 @@ void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) */ void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, - bool newleft, struct rb_node **leftmost, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { - __rb_insert(node, root, newleft, leftmost, augment_rotate); + __rb_insert(node, root, augment_rotate); } /* @@ -580,15 +558,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, __rb_change_child(victim, new, parent, root); } -void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, - struct rb_root_cached *root) -{ - rb_replace_node(victim, new, &root->rb_root); - - if (root->rb_leftmost == victim) - root->rb_leftmost = new; -} - static struct rb_node *rb_left_deepest_node(const struct rb_node *node) { for (;;) { -- cgit v1.2.3 From 444b8a83f1e01584ff2d53f5951d8e836c0070b5 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Wed, 25 Sep 2019 16:46:04 -0700 Subject: augmented rbtree: add comments for RB_DECLARE_CALLBACKS macro Patch series "make RB_DECLARE_CALLBACKS more generic", v3. These changes are intended to make the RB_DECLARE_CALLBACKS macro more generic (allowing the aubmented subtree information to be a struct instead of a scalar). I have verified the compiled lib/interval_tree.o and mm/mmap.o files to check that they didn't change. This held as expected for interval_tree.o; mmap.o did have some changes which could be reverted by marking __vma_link_rb as noinline. I did not add such a change to the patchset; I felt it was reasonable enough to leave the inlining decision up to the compiler. This patch (of 3): Add a short comment summarizing the arguments to RB_DECLARE_CALLBACKS. The arguments are also now capitalized. This copies the style of the INTERVAL_TREE_DEFINE macro. No functional changes in this commit, only comments and capitalization. Link: http://lkml.kernel.org/r/20190703040156.56953-2-walken@google.com Signed-off-by: Michel Lespinasse Acked-by: Davidlohr Bueso Acked-by: Peter Zijlstra (Intel) Cc: David Howells Cc: Uladzislau Rezki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rbtree_augmented.h | 54 +++++++++++++++++++++------------- tools/include/linux/rbtree_augmented.h | 54 +++++++++++++++++++++------------- 2 files changed, 66 insertions(+), 42 deletions(-) (limited to 'tools') diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 179faab29f52..979941600082 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -60,39 +60,51 @@ rb_insert_augmented_cached(struct rb_node *node, rb_insert_augmented(node, &root->rb_root, augment); } -#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ - rbtype, rbaugmented, rbcompute) \ +/* + * Template for declaring augmented rbtree callbacks + * + * RBSTATIC: 'static' or empty + * RBNAME: name of the rb_augment_callbacks structure + * RBSTRUCT: struct type of the tree nodes + * RBFIELD: name of struct rb_node field within RBSTRUCT + * RBTYPE: type of the RBAUGMENTED field + * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree + * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data + */ + +#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ + RBTYPE, RBAUGMENTED, RBCOMPUTE) \ static inline void \ -rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ +RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ { \ while (rb != stop) { \ - rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ - rbtype augmented = rbcompute(node); \ - if (node->rbaugmented == augmented) \ + RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ + RBTYPE augmented = RBCOMPUTE(node); \ + if (node->RBAUGMENTED == augmented) \ break; \ - node->rbaugmented = augmented; \ - rb = rb_parent(&node->rbfield); \ + node->RBAUGMENTED = augmented; \ + rb = rb_parent(&node->RBFIELD); \ } \ } \ static inline void \ -rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ +RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ { \ - rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ - rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ - new->rbaugmented = old->rbaugmented; \ + RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ + RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ + new->RBAUGMENTED = old->RBAUGMENTED; \ } \ static void \ -rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ +RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ { \ - rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ - rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ - new->rbaugmented = old->rbaugmented; \ - old->rbaugmented = rbcompute(old); \ + RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ + RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ + new->RBAUGMENTED = old->RBAUGMENTED; \ + old->RBAUGMENTED = RBCOMPUTE(old); \ } \ -rbstatic const struct rb_augment_callbacks rbname = { \ - .propagate = rbname ## _propagate, \ - .copy = rbname ## _copy, \ - .rotate = rbname ## _rotate \ +RBSTATIC const struct rb_augment_callbacks RBNAME = { \ + .propagate = RBNAME ## _propagate, \ + .copy = RBNAME ## _copy, \ + .rotate = RBNAME ## _rotate \ }; diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h index 467a3eefe1d2..de3a480204ba 100644 --- a/tools/include/linux/rbtree_augmented.h +++ b/tools/include/linux/rbtree_augmented.h @@ -62,39 +62,51 @@ rb_insert_augmented_cached(struct rb_node *node, rb_insert_augmented(node, &root->rb_root, augment); } -#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ - rbtype, rbaugmented, rbcompute) \ +/* + * Template for declaring augmented rbtree callbacks + * + * RBSTATIC: 'static' or empty + * RBNAME: name of the rb_augment_callbacks structure + * RBSTRUCT: struct type of the tree nodes + * RBFIELD: name of struct rb_node field within RBSTRUCT + * RBTYPE: type of the RBAUGMENTED field + * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree + * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data + */ + +#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ + RBTYPE, RBAUGMENTED, RBCOMPUTE) \ static inline void \ -rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ +RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ { \ while (rb != stop) { \ - rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ - rbtype augmented = rbcompute(node); \ - if (node->rbaugmented == augmented) \ + RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ + RBTYPE augmented = RBCOMPUTE(node); \ + if (node->RBAUGMENTED == augmented) \ break; \ - node->rbaugmented = augmented; \ - rb = rb_parent(&node->rbfield); \ + node->RBAUGMENTED = augmented; \ + rb = rb_parent(&node->RBFIELD); \ } \ } \ static inline void \ -rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ +RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ { \ - rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ - rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ - new->rbaugmented = old->rbaugmented; \ + RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ + RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ + new->RBAUGMENTED = old->RBAUGMENTED; \ } \ static void \ -rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ +RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ { \ - rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ - rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ - new->rbaugmented = old->rbaugmented; \ - old->rbaugmented = rbcompute(old); \ + RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ + RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ + new->RBAUGMENTED = old->RBAUGMENTED; \ + old->RBAUGMENTED = RBCOMPUTE(old); \ } \ -rbstatic const struct rb_augment_callbacks rbname = { \ - .propagate = rbname ## _propagate, \ - .copy = rbname ## _copy, \ - .rotate = rbname ## _rotate \ +RBSTATIC const struct rb_augment_callbacks RBNAME = { \ + .propagate = RBNAME ## _propagate, \ + .copy = RBNAME ## _copy, \ + .rotate = RBNAME ## _rotate \ }; -- cgit v1.2.3 From 315cc066b8ae8349a27887ad7a34e1916e9797fe Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Wed, 25 Sep 2019 16:46:07 -0700 Subject: augmented rbtree: add new RB_DECLARE_CALLBACKS_MAX macro Add RB_DECLARE_CALLBACKS_MAX, which generates augmented rbtree callbacks for the case where the augmented value is a scalar whose definition follows a max(f(node)) pattern. This actually covers all present uses of RB_DECLARE_CALLBACKS, and saves some (source) code duplication in the various RBCOMPUTE function definitions. [walken@google.com: fix mm/vmalloc.c] Link: http://lkml.kernel.org/r/CANN689FXgK13wDYNh1zKxdipeTuALG4eKvKpsdZqKFJ-rvtGiQ@mail.gmail.com [walken@google.com: re-add check to check_augmented()] Link: http://lkml.kernel.org/r/20190727022027.GA86863@google.com Link: http://lkml.kernel.org/r/20190703040156.56953-3-walken@google.com Signed-off-by: Michel Lespinasse Acked-by: Peter Zijlstra (Intel) Cc: David Howells Cc: Davidlohr Bueso Cc: Uladzislau Rezki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pat_rbtree.c | 19 +++-------------- drivers/block/drbd/drbd_interval.c | 29 +++----------------------- include/linux/interval_tree_generic.h | 22 ++------------------ include/linux/rbtree_augmented.h | 36 ++++++++++++++++++++++++++++++++- lib/rbtree_test.c | 37 ++++++++++++++++------------------ mm/mmap.c | 29 ++++++++++++++++---------- mm/vmalloc.c | 5 ++--- tools/include/linux/rbtree_augmented.h | 36 ++++++++++++++++++++++++++++++++- 8 files changed, 115 insertions(+), 98 deletions(-) (limited to 'tools') diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index fa16036fa592..65ebe4b88f7c 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -54,23 +54,10 @@ static u64 get_subtree_max_end(struct rb_node *node) return ret; } -static u64 compute_subtree_max_end(struct memtype *data) -{ - u64 max_end = data->end, child_max_end; - - child_max_end = get_subtree_max_end(data->rb.rb_right); - if (child_max_end > max_end) - max_end = child_max_end; - - child_max_end = get_subtree_max_end(data->rb.rb_left); - if (child_max_end > max_end) - max_end = child_max_end; - - return max_end; -} +#define NODE_END(node) ((node)->end) -RB_DECLARE_CALLBACKS(static, memtype_rb_augment_cb, struct memtype, rb, - u64, subtree_max_end, compute_subtree_max_end) +RB_DECLARE_CALLBACKS_MAX(static, memtype_rb_augment_cb, + struct memtype, rb, u64, subtree_max_end, NODE_END) /* Find the first (lowest start addr) overlapping range from rb tree */ static struct memtype *memtype_rb_lowest_match(struct rb_root *root, diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c index c58986556161..651bd0236a99 100644 --- a/drivers/block/drbd/drbd_interval.c +++ b/drivers/block/drbd/drbd_interval.c @@ -13,33 +13,10 @@ sector_t interval_end(struct rb_node *node) return this->end; } -/** - * compute_subtree_last - compute end of @node - * - * The end of an interval is the highest (start + (size >> 9)) value of this - * node and of its children. Called for @node and its parents whenever the end - * may have changed. - */ -static inline sector_t -compute_subtree_last(struct drbd_interval *node) -{ - sector_t max = node->sector + (node->size >> 9); - - if (node->rb.rb_left) { - sector_t left = interval_end(node->rb.rb_left); - if (left > max) - max = left; - } - if (node->rb.rb_right) { - sector_t right = interval_end(node->rb.rb_right); - if (right > max) - max = right; - } - return max; -} +#define NODE_END(node) ((node)->sector + ((node)->size >> 9)) -RB_DECLARE_CALLBACKS(static, augment_callbacks, struct drbd_interval, rb, - sector_t, end, compute_subtree_last); +RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks, + struct drbd_interval, rb, sector_t, end, NODE_END); /** * drbd_insert_interval - insert a new interval into a tree diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h index 855476145fe1..aaa8a0767aa3 100644 --- a/include/linux/interval_tree_generic.h +++ b/include/linux/interval_tree_generic.h @@ -30,26 +30,8 @@ \ /* Callbacks for augmented rbtree insert and remove */ \ \ -static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \ -{ \ - ITTYPE max = ITLAST(node), subtree_last; \ - if (node->ITRB.rb_left) { \ - subtree_last = rb_entry(node->ITRB.rb_left, \ - ITSTRUCT, ITRB)->ITSUBTREE; \ - if (max < subtree_last) \ - max = subtree_last; \ - } \ - if (node->ITRB.rb_right) { \ - subtree_last = rb_entry(node->ITRB.rb_right, \ - ITSTRUCT, ITRB)->ITSUBTREE; \ - if (max < subtree_last) \ - max = subtree_last; \ - } \ - return max; \ -} \ - \ -RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \ - ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \ +RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \ + ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \ \ /* Insert / remove interval nodes from the tree */ \ \ diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 979941600082..e5937e387e02 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -61,7 +61,7 @@ rb_insert_augmented_cached(struct rb_node *node, } /* - * Template for declaring augmented rbtree callbacks + * Template for declaring augmented rbtree callbacks (generic case) * * RBSTATIC: 'static' or empty * RBNAME: name of the rb_augment_callbacks structure @@ -107,6 +107,40 @@ RBSTATIC const struct rb_augment_callbacks RBNAME = { \ .rotate = RBNAME ## _rotate \ }; +/* + * Template for declaring augmented rbtree callbacks, + * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes. + * + * RBSTATIC: 'static' or empty + * RBNAME: name of the rb_augment_callbacks structure + * RBSTRUCT: struct type of the tree nodes + * RBFIELD: name of struct rb_node field within RBSTRUCT + * RBTYPE: type of the RBAUGMENTED field + * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree + * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar + */ + +#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ + RBTYPE, RBAUGMENTED, RBCOMPUTE) \ +static inline RBTYPE RBNAME ## _compute_max(RBSTRUCT *node) \ +{ \ + RBSTRUCT *child; \ + RBTYPE max = RBCOMPUTE(node); \ + if (node->RBFIELD.rb_left) { \ + child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \ + if (child->RBAUGMENTED > max) \ + max = child->RBAUGMENTED; \ + } \ + if (node->RBFIELD.rb_right) { \ + child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \ + if (child->RBAUGMENTED > max) \ + max = child->RBAUGMENTED; \ + } \ + return max; \ +} \ +RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ + RBTYPE, RBAUGMENTED, RBNAME ## _compute_max) + #define RB_RED 0 #define RB_BLACK 1 diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 62b8ee92643d..41ae3c7570d3 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -77,26 +77,10 @@ static inline void erase_cached(struct test_node *node, struct rb_root_cached *r } -static inline u32 augment_recompute(struct test_node *node) -{ - u32 max = node->val, child_augmented; - if (node->rb.rb_left) { - child_augmented = rb_entry(node->rb.rb_left, struct test_node, - rb)->augmented; - if (max < child_augmented) - max = child_augmented; - } - if (node->rb.rb_right) { - child_augmented = rb_entry(node->rb.rb_right, struct test_node, - rb)->augmented; - if (max < child_augmented) - max = child_augmented; - } - return max; -} +#define NODE_VAL(node) ((node)->val) -RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb, - u32, augmented, augment_recompute) +RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks, + struct test_node, rb, u32, augmented, NODE_VAL) static void insert_augmented(struct test_node *node, struct rb_root_cached *root) @@ -238,7 +222,20 @@ static void check_augmented(int nr_nodes) check(nr_nodes); for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { struct test_node *node = rb_entry(rb, struct test_node, rb); - WARN_ON_ONCE(node->augmented != augment_recompute(node)); + u32 subtree, max = node->val; + if (node->rb.rb_left) { + subtree = rb_entry(node->rb.rb_left, struct test_node, + rb)->augmented; + if (max < subtree) + max = subtree; + } + if (node->rb.rb_right) { + subtree = rb_entry(node->rb.rb_right, struct test_node, + rb)->augmented; + if (max < subtree) + max = subtree; + } + WARN_ON_ONCE(node->augmented != max); } } diff --git a/mm/mmap.c b/mm/mmap.c index f1e8c7f93e04..14b7da317ec0 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -289,9 +289,9 @@ out: return retval; } -static long vma_compute_subtree_gap(struct vm_area_struct *vma) +static inline unsigned long vma_compute_gap(struct vm_area_struct *vma) { - unsigned long max, prev_end, subtree_gap; + unsigned long gap, prev_end; /* * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we @@ -299,14 +299,21 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma) * an unmapped area; whereas when expanding we only require one. * That's a little inconsistent, but keeps the code here simpler. */ - max = vm_start_gap(vma); + gap = vm_start_gap(vma); if (vma->vm_prev) { prev_end = vm_end_gap(vma->vm_prev); - if (max > prev_end) - max -= prev_end; + if (gap > prev_end) + gap -= prev_end; else - max = 0; + gap = 0; } + return gap; +} + +#ifdef CONFIG_DEBUG_VM_RB +static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma) +{ + unsigned long max = vma_compute_gap(vma), subtree_gap; if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; @@ -322,7 +329,6 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma) return max; } -#ifdef CONFIG_DEBUG_VM_RB static int browse_rb(struct mm_struct *mm) { struct rb_root *root = &mm->mm_rb; @@ -428,8 +434,9 @@ static void validate_mm(struct mm_struct *mm) #define validate_mm(mm) do { } while (0) #endif -RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, - unsigned long, rb_subtree_gap, vma_compute_subtree_gap) +RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks, + struct vm_area_struct, vm_rb, + unsigned long, rb_subtree_gap, vma_compute_gap) /* * Update augmented rbtree rb_subtree_gap values after vma->vm_start or @@ -439,8 +446,8 @@ RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, static void vma_gap_update(struct vm_area_struct *vma) { /* - * As it turns out, RB_DECLARE_CALLBACKS() already created a callback - * function that does exactly what we want. + * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created + * a callback function that does exactly what we want. */ vma_gap_callbacks_propagate(&vma->vm_rb, NULL); } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index fcadd3e25c0c..a3c70e275f4e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -396,9 +396,8 @@ compute_subtree_max_size(struct vmap_area *va) get_subtree_max_size(va->rb_node.rb_right)); } -RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb, - struct vmap_area, rb_node, unsigned long, subtree_max_size, - compute_subtree_max_size) +RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, + struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) static void purge_vmap_area_lazy(void); static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h index de3a480204ba..4e8c4c76e9a2 100644 --- a/tools/include/linux/rbtree_augmented.h +++ b/tools/include/linux/rbtree_augmented.h @@ -63,7 +63,7 @@ rb_insert_augmented_cached(struct rb_node *node, } /* - * Template for declaring augmented rbtree callbacks + * Template for declaring augmented rbtree callbacks (generic case) * * RBSTATIC: 'static' or empty * RBNAME: name of the rb_augment_callbacks structure @@ -109,6 +109,40 @@ RBSTATIC const struct rb_augment_callbacks RBNAME = { \ .rotate = RBNAME ## _rotate \ }; +/* + * Template for declaring augmented rbtree callbacks, + * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes. + * + * RBSTATIC: 'static' or empty + * RBNAME: name of the rb_augment_callbacks structure + * RBSTRUCT: struct type of the tree nodes + * RBFIELD: name of struct rb_node field within RBSTRUCT + * RBTYPE: type of the RBAUGMENTED field + * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree + * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar + */ + +#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ + RBTYPE, RBAUGMENTED, RBCOMPUTE) \ +static inline RBTYPE RBNAME ## _compute_max(RBSTRUCT *node) \ +{ \ + RBSTRUCT *child; \ + RBTYPE max = RBCOMPUTE(node); \ + if (node->RBFIELD.rb_left) { \ + child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \ + if (child->RBAUGMENTED > max) \ + max = child->RBAUGMENTED; \ + } \ + if (node->RBFIELD.rb_right) { \ + child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \ + if (child->RBAUGMENTED > max) \ + max = child->RBAUGMENTED; \ + } \ + return max; \ +} \ +RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ + RBTYPE, RBAUGMENTED, RBNAME ## _compute_max) + #define RB_RED 0 #define RB_BLACK 1 -- cgit v1.2.3 From 6d2052d188d962ffb7ad3d413e6ffd5f276aec94 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Wed, 25 Sep 2019 16:46:10 -0700 Subject: augmented rbtree: rework the RB_DECLARE_CALLBACKS macro definition Change the definition of the RBCOMPUTE function. The propagate callback repeatedly calls RBCOMPUTE as it moves from leaf to root. it wants to stop recomputing once the augmented subtree information doesn't change. This was previously checked using the == operator, but that only works when the augmented subtree information is a scalar field. This commit modifies the RBCOMPUTE function so that it now sets the augmented subtree information instead of returning it, and returns a boolean value indicating if the propagate callback should stop. The motivation for this change is that I want to introduce augmented rbtree uses where the augmented data for the subtree is a struct instead of a scalar. Link: http://lkml.kernel.org/r/20190703040156.56953-4-walken@google.com Signed-off-by: Michel Lespinasse Acked-by: Peter Zijlstra (Intel) Cc: David Howells Cc: Davidlohr Bueso Cc: Uladzislau Rezki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rbtree_augmented.h | 24 ++++++++++++------------ tools/include/linux/rbtree_augmented.h | 24 ++++++++++++------------ 2 files changed, 24 insertions(+), 24 deletions(-) (limited to 'tools') diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index e5937e387e02..fdd421b8d9ae 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -67,22 +67,19 @@ rb_insert_augmented_cached(struct rb_node *node, * RBNAME: name of the rb_augment_callbacks structure * RBSTRUCT: struct type of the tree nodes * RBFIELD: name of struct rb_node field within RBSTRUCT - * RBTYPE: type of the RBAUGMENTED field - * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree + * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data */ -#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ - RBTYPE, RBAUGMENTED, RBCOMPUTE) \ +#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ + RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \ static inline void \ RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ { \ while (rb != stop) { \ RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ - RBTYPE augmented = RBCOMPUTE(node); \ - if (node->RBAUGMENTED == augmented) \ + if (RBCOMPUTE(node, true)) \ break; \ - node->RBAUGMENTED = augmented; \ rb = rb_parent(&node->RBFIELD); \ } \ } \ @@ -99,7 +96,7 @@ RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ new->RBAUGMENTED = old->RBAUGMENTED; \ - old->RBAUGMENTED = RBCOMPUTE(old); \ + RBCOMPUTE(old, false); \ } \ RBSTATIC const struct rb_augment_callbacks RBNAME = { \ .propagate = RBNAME ## _propagate, \ @@ -122,7 +119,7 @@ RBSTATIC const struct rb_augment_callbacks RBNAME = { \ #define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ RBTYPE, RBAUGMENTED, RBCOMPUTE) \ -static inline RBTYPE RBNAME ## _compute_max(RBSTRUCT *node) \ +static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \ { \ RBSTRUCT *child; \ RBTYPE max = RBCOMPUTE(node); \ @@ -136,10 +133,13 @@ static inline RBTYPE RBNAME ## _compute_max(RBSTRUCT *node) \ if (child->RBAUGMENTED > max) \ max = child->RBAUGMENTED; \ } \ - return max; \ + if (exit && node->RBAUGMENTED == max) \ + return true; \ + node->RBAUGMENTED = max; \ + return false; \ } \ -RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ - RBTYPE, RBAUGMENTED, RBNAME ## _compute_max) +RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ + RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max) #define RB_RED 0 diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h index 4e8c4c76e9a2..381aa948610d 100644 --- a/tools/include/linux/rbtree_augmented.h +++ b/tools/include/linux/rbtree_augmented.h @@ -69,22 +69,19 @@ rb_insert_augmented_cached(struct rb_node *node, * RBNAME: name of the rb_augment_callbacks structure * RBSTRUCT: struct type of the tree nodes * RBFIELD: name of struct rb_node field within RBSTRUCT - * RBTYPE: type of the RBAUGMENTED field - * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree + * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data */ -#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ - RBTYPE, RBAUGMENTED, RBCOMPUTE) \ +#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ + RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \ static inline void \ RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ { \ while (rb != stop) { \ RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ - RBTYPE augmented = RBCOMPUTE(node); \ - if (node->RBAUGMENTED == augmented) \ + if (RBCOMPUTE(node, true)) \ break; \ - node->RBAUGMENTED = augmented; \ rb = rb_parent(&node->RBFIELD); \ } \ } \ @@ -101,7 +98,7 @@ RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ new->RBAUGMENTED = old->RBAUGMENTED; \ - old->RBAUGMENTED = RBCOMPUTE(old); \ + RBCOMPUTE(old, false); \ } \ RBSTATIC const struct rb_augment_callbacks RBNAME = { \ .propagate = RBNAME ## _propagate, \ @@ -124,7 +121,7 @@ RBSTATIC const struct rb_augment_callbacks RBNAME = { \ #define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ RBTYPE, RBAUGMENTED, RBCOMPUTE) \ -static inline RBTYPE RBNAME ## _compute_max(RBSTRUCT *node) \ +static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \ { \ RBSTRUCT *child; \ RBTYPE max = RBCOMPUTE(node); \ @@ -138,10 +135,13 @@ static inline RBTYPE RBNAME ## _compute_max(RBSTRUCT *node) \ if (child->RBAUGMENTED > max) \ max = child->RBAUGMENTED; \ } \ - return max; \ + if (exit && node->RBAUGMENTED == max) \ + return true; \ + node->RBAUGMENTED = max; \ + return false; \ } \ -RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ - RBTYPE, RBAUGMENTED, RBNAME ## _compute_max) +RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ + RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max) #define RB_RED 0 -- cgit v1.2.3