summaryrefslogtreecommitdiff
path: root/mm/kasan/tags.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2022-09-06 00:05:47 +0300
committerAndrew Morton <akpm@linux-foundation.org>2022-10-04 00:03:02 +0300
commit80b92bfe3bb75aa6688f58af9df356757a46f659 (patch)
tree03827a0d4725b1ad461ff9f57e9919f7c0b33ad3 /mm/kasan/tags.c
parent7ebfce33125100e3f0c5e059845a019a1401433d (diff)
downloadlinux-80b92bfe3bb75aa6688f58af9df356757a46f659.tar.xz
kasan: dynamically allocate stack ring entries
Instead of using a large static array, allocate the stack ring dynamically via memblock_alloc(). The size of the stack ring is controlled by a new kasan.stack_ring_size command-line parameter. When kasan.stack_ring_size is not provided, the default value of 32 << 10 is used. When the stack trace collection is disabled via kasan.stacktrace=off, the stack ring is not allocated. Link: https://lkml.kernel.org/r/03b82ab60db53427e9818e0b0c1971baa10c3cbc.1662411800.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Acked-by: Marco Elver <elver@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Peter Collingbourne <pcc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/kasan/tags.c')
-rw-r--r--mm/kasan/tags.c25
1 files changed, 24 insertions, 1 deletions
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index dd929ab166fb..67a222586846 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/static_key.h>
@@ -19,6 +20,8 @@
#include "kasan.h"
#include "../slab.h"
+#define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
+
enum kasan_arg_stacktrace {
KASAN_ARG_STACKTRACE_DEFAULT,
KASAN_ARG_STACKTRACE_OFF,
@@ -54,6 +57,16 @@ static int __init early_kasan_flag_stacktrace(char *arg)
}
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
+/* kasan.stack_ring_size=<number of entries> */
+static int __init early_kasan_flag_stack_ring_size(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ return kstrtoul(arg, 0, &stack_ring.size);
+}
+early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size);
+
void __init kasan_init_tags(void)
{
switch (kasan_arg_stacktrace) {
@@ -67,6 +80,16 @@ void __init kasan_init_tags(void)
static_branch_enable(&kasan_flag_stacktrace);
break;
}
+
+ if (kasan_stack_collection_enabled()) {
+ if (!stack_ring.size)
+ stack_ring.size = KASAN_STACK_RING_SIZE_DEFAULT;
+ stack_ring.entries = memblock_alloc(
+ sizeof(stack_ring.entries[0]) * stack_ring.size,
+ SMP_CACHE_BYTES);
+ if (WARN_ON(!stack_ring.entries))
+ static_branch_disable(&kasan_flag_stacktrace);
+ }
}
static void save_stack_info(struct kmem_cache *cache, void *object,
@@ -88,7 +111,7 @@ static void save_stack_info(struct kmem_cache *cache, void *object,
next:
pos = atomic64_fetch_add(1, &stack_ring.pos);
- entry = &stack_ring.entries[pos % KASAN_STACK_RING_SIZE];
+ entry = &stack_ring.entries[pos % stack_ring.size];
/* Detect stack ring entry slots that are being written to. */
old_ptr = READ_ONCE(entry->ptr);