summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-22 12:28:23 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-22 12:28:23 +0300
commit1c52283265a462a100ae63ddf58b4e5884acde86 (patch)
treec0d3fa7a02fc0111bc7e56aa1be88e9b92795704 /lib
parent8205ae327e396820fb7a176a94768146ac0b87ea (diff)
parent6e61dde82e8bfe65e8ebbe43da45e615bc529236 (diff)
downloadlinux-1c52283265a462a100ae63ddf58b4e5884acde86.tar.xz
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton: "This is the post-linux-next queue. Material which was based on or dependent upon material which was in -next. 69 patches. Subsystems affected by this patch series: mm (migration and zsmalloc), sysctl, proc, and lib" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (69 commits) mm: hide the FRONTSWAP Kconfig symbol frontswap: remove support for multiple ops mm: mark swap_lock and swap_active_head static frontswap: simplify frontswap_register_ops frontswap: remove frontswap_test mm: simplify try_to_unuse frontswap: remove the frontswap exports frontswap: simplify frontswap_init frontswap: remove frontswap_curr_pages frontswap: remove frontswap_shrink frontswap: remove frontswap_tmem_exclusive_gets frontswap: remove frontswap_writethrough mm: remove cleancache lib/stackdepot: always do filter_irq_stacks() in stack_depot_save() lib/stackdepot: allow optional init and stack_table allocation by kvmalloc() proc: remove PDE_DATA() completely fs: proc: store PDE()->data into inode->i_private zsmalloc: replace get_cpu_var with local_lock zsmalloc: replace per zpage lock with pool->migrate_lock locking/rwlocks: introduce write_lock_nested ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/stackdepot.c46
-rw-r--r--lib/test_sysctl.c22
4 files changed, 47 insertions, 27 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index c20b68ad2bc3..51c368a50b16 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -673,6 +673,10 @@ config STACKDEPOT
bool
select STACKTRACE
+config STACKDEPOT_ALWAYS_INIT
+ bool
+ select STACKDEPOT
+
config STACK_HASH_ORDER
int "stack depot hash size (12 => 4KB, 20 => 1024KB)"
range 12 20
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index cdc842d090db..879757b6dd14 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -38,7 +38,7 @@ menuconfig KASAN
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
HAVE_ARCH_KASAN_HW_TAGS
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
- select STACKDEPOT
+ select STACKDEPOT_ALWAYS_INIT
help
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index b437ae79aca1..bf5ba9af0500 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -23,6 +23,7 @@
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/printk.h>
#include <linux/slab.h>
@@ -161,18 +162,40 @@ static int __init is_stack_depot_disabled(char *str)
}
early_param("stack_depot_disable", is_stack_depot_disabled);
-int __init stack_depot_init(void)
+/*
+ * __ref because of memblock_alloc(), which will not be actually called after
+ * the __init code is gone, because at that point slab_is_available() is true
+ */
+__ref int stack_depot_init(void)
{
- if (!stack_depot_disable) {
+ static DEFINE_MUTEX(stack_depot_init_mutex);
+
+ mutex_lock(&stack_depot_init_mutex);
+ if (!stack_depot_disable && !stack_table) {
size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
int i;
- stack_table = memblock_alloc(size, size);
- for (i = 0; i < STACK_HASH_SIZE; i++)
- stack_table[i] = NULL;
+ if (slab_is_available()) {
+ pr_info("Stack Depot allocating hash table with kvmalloc\n");
+ stack_table = kvmalloc(size, GFP_KERNEL);
+ } else {
+ pr_info("Stack Depot allocating hash table with memblock_alloc\n");
+ stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
+ }
+ if (stack_table) {
+ for (i = 0; i < STACK_HASH_SIZE; i++)
+ stack_table[i] = NULL;
+ } else {
+ pr_err("Stack Depot hash table allocation failed, disabling\n");
+ stack_depot_disable = true;
+ mutex_unlock(&stack_depot_init_mutex);
+ return -ENOMEM;
+ }
}
+ mutex_unlock(&stack_depot_init_mutex);
return 0;
}
+EXPORT_SYMBOL_GPL(stack_depot_init);
/* Calculate hash for a stack */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
@@ -305,6 +328,9 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
* (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
* any allocations and will fail if no space is left to store the stack trace.
*
+ * If the stack trace in @entries is from an interrupt, only the portion up to
+ * interrupt entry is saved.
+ *
* Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case from contexts where neither %GFP_ATOMIC nor
@@ -323,6 +349,16 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned long flags;
u32 hash;
+ /*
+ * If this stack trace is from an interrupt, including anything before
+ * interrupt entry usually leads to unbounded stackdepot growth.
+ *
+ * Because use of filter_irq_stacks() is a requirement to ensure
+ * stackdepot can efficiently deduplicate interrupt stacks, always
+ * filter_irq_stacks() to simplify all callers' use of stackdepot.
+ */
+ nr_entries = filter_irq_stacks(entries, nr_entries);
+
if (unlikely(nr_entries == 0) || stack_depot_disable)
goto fast_exit;
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 3750323973f4..a5a3d6c27e1f 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -128,26 +128,6 @@ static struct ctl_table test_table[] = {
{ }
};
-static struct ctl_table test_sysctl_table[] = {
- {
- .procname = "test_sysctl",
- .maxlen = 0,
- .mode = 0555,
- .child = test_table,
- },
- { }
-};
-
-static struct ctl_table test_sysctl_root_table[] = {
- {
- .procname = "debug",
- .maxlen = 0,
- .mode = 0555,
- .child = test_sysctl_table,
- },
- { }
-};
-
static struct ctl_table_header *test_sysctl_header;
static int __init test_sysctl_init(void)
@@ -155,7 +135,7 @@ static int __init test_sysctl_init(void)
test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
if (!test_data.bitmap_0001)
return -ENOMEM;
- test_sysctl_header = register_sysctl_table(test_sysctl_root_table);
+ test_sysctl_header = register_sysctl("debug/test_sysctl", test_table);
if (!test_sysctl_header) {
kfree(test_data.bitmap_0001);
return -ENOMEM;