summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorSidhartha Kumar <sidhartha.kumar@oracle.com>2023-12-13 23:50:57 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-01-05 17:19:43 +0300
commit2c30b8b105d690b5bb69aab31f198e2385be9b03 (patch)
tree47f6fc8a41a0a0d427c0257afb6feccbc1807b97 /lib
parent11d41d01c088ff1bd2d3de4762117385b657bb44 (diff)
downloadlinux-2c30b8b105d690b5bb69aab31f198e2385be9b03.tar.xz
maple_tree: do not preallocate nodes for slot stores
commit 4249f13c11be8b8b7bf93204185e150c3bdc968d upstream. mas_preallocate() defaults to requesting 1 node for preallocation and then ,depending on the type of store, will update the request variable. There isn't a check for a slot store type, so slot stores are preallocating the default 1 node. Slot stores do not require any additional nodes, so add a check for the slot store case that will bypass node_count_gfp(). Update the tests to reflect that slot stores do not require allocations. User visible effects of this bug include increased memory usage from the unneeded node that was allocated. Link: https://lkml.kernel.org/r/20231213205058.386589-1-sidhartha.kumar@oracle.com Fixes: 0b8bb544b1a7 ("maple_tree: update mas_preallocate() testing") Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Peng Zhang <zhangpeng.00@bytedance.com> Cc: <stable@vger.kernel.org> [6.6+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/maple_tree.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index bb24d84a4922..684689457d77 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -5501,6 +5501,17 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
mas_wr_end_piv(&wr_mas);
node_size = mas_wr_new_end(&wr_mas);
+
+ /* Slot store, does not require additional nodes */
+ if (node_size == wr_mas.node_end) {
+ /* reuse node */
+ if (!mt_in_rcu(mas->tree))
+ return 0;
+ /* shifting boundary */
+ if (wr_mas.offset_end - mas->offset == 1)
+ return 0;
+ }
+
if (node_size >= mt_slots[wr_mas.type]) {
/* Split, worst case for now. */
request = 1 + mas_mt_height(mas) * 2;