summaryrefslogtreecommitdiff
path: root/lib/stackdepot.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2023-11-20 20:47:14 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-12-11 03:51:46 +0300
commit410b764f89f59cce858d94fc781b68c1f27a0ca9 (patch)
treea8466fd4cd435189a7fb877eaa6ca2d596b3959a /lib/stackdepot.c
parent022012dcf44209074af97b6ae531a10c08736b31 (diff)
downloadlinux-410b764f89f59cce858d94fc781b68c1f27a0ca9.tar.xz
lib/stackdepot: add refcount for records
Add a reference counter for how many times a stack records has been added to stack depot. Add a new STACK_DEPOT_FLAG_GET flag to stack_depot_save_flags that instructs the stack depot to increment the refcount. Do not yet decrement the refcount; this is implemented in one of the following patches. Do not yet enable any users to use the flag to avoid overflowing the refcount. This is preparatory patch for implementing the eviction of stack records from the stack depot. Link: https://lkml.kernel.org/r/a3fc14a2359d019d2a008d4ff8b46a665371ffee.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Marco Elver <elver@google.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib/stackdepot.c')
-rw-r--r--lib/stackdepot.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 59d61d5c09a7..911dee11bf39 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/printk.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stacktrace.h>
@@ -60,6 +61,7 @@ struct stack_record {
u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */
union handle_parts handle;
+ refcount_t count;
unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
};
@@ -373,6 +375,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
stack->hash = hash;
stack->size = size;
/* stack->handle is already filled in by depot_init_pool(). */
+ refcount_set(&stack->count, 1);
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
/*
@@ -489,6 +492,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
/* Fast path: look the stack trace up without full locking. */
found = find_stack(bucket, entries, nr_entries, hash);
if (found) {
+ if (depot_flags & STACK_DEPOT_FLAG_GET)
+ refcount_inc(&found->count);
read_unlock_irqrestore(&pool_rwlock, flags);
goto exit;
}
@@ -528,12 +533,15 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
list_add(&new->list, bucket);
found = new;
}
- } else if (prealloc) {
+ } else {
+ if (depot_flags & STACK_DEPOT_FLAG_GET)
+ refcount_inc(&found->count);
/*
* Stack depot already contains this stack trace, but let's
* keep the preallocated memory for future.
*/
- depot_keep_new_pool(&prealloc);
+ if (prealloc)
+ depot_keep_new_pool(&prealloc);
}
write_unlock_irqrestore(&pool_rwlock, flags);