summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-11-01 21:46:27 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-11-01 21:46:27 +0300
commit2d6bb6adb714b133db92ccd4bfc9c20f75f71f3f (patch)
treeaef040a1ee4b8b6edc5a4fa2b3c6a2c48219f27a
parent7c6c54b505b8aea1782ce6a6e8f3b8297d179937 (diff)
parent6fcde90466738b84a073e4f4d18c50015ee29fb2 (diff)
downloadlinux-2d6bb6adb714b133db92ccd4bfc9c20f75f71f3f.tar.xz
Merge tag 'stackleak-v4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull stackleak gcc plugin from Kees Cook: "Please pull this new GCC plugin, stackleak, for v4.20-rc1. This plugin was ported from grsecurity by Alexander Popov. It provides efficient stack content poisoning at syscall exit. This creates a defense against at least two classes of flaws: - Uninitialized stack usage. (We continue to work on improving the compiler to do this in other ways: e.g. unconditional zero init was proposed to GCC and Clang, and more plugin work has started too). - Stack content exposure. By greatly reducing the lifetime of valid stack contents, exposures via either direct read bugs or unknown cache side-channels become much more difficult to exploit. This complements the existing buddy and heap poisoning options, but provides the coverage for stacks. The x86 hooks are included in this series (which have been reviewed by Ingo, Dave Hansen, and Thomas Gleixner). The arm64 hooks have already been merged through the arm64 tree (written by Laura Abbott and reviewed by Mark Rutland and Will Deacon). With VLAs having been removed this release, there is no need for alloca() protection, so it has been removed from the plugin" * tag 'stackleak-v4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: arm64: Drop unneeded stackleak_check_alloca() stackleak: Allow runtime disabling of kernel stack erasing doc: self-protection: Add information about STACKLEAK feature fs/proc: Show STACKLEAK metrics in the /proc file system lkdtm: Add a test for STACKLEAK gcc-plugins: Add STACKLEAK plugin for tracking the kernel stack x86/entry: Add STACKLEAK erasing the kernel stack at the end of syscalls
-rw-r--r--Documentation/security/self-protection.rst10
-rw-r--r--Documentation/sysctl/kernel.txt18
-rw-r--r--Documentation/x86/x86_64/mm.txt3
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/arm64/kernel/process.c22
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/entry/calling.h14
-rw-r--r--arch/x86/entry/entry_32.S7
-rw-r--r--arch/x86/entry/entry_64.S3
-rw-r--r--arch/x86/entry/entry_64_compat.S5
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/core.c1
-rw-r--r--drivers/misc/lkdtm/lkdtm.h3
-rw-r--r--drivers/misc/lkdtm/stackleak.c73
-rw-r--r--fs/proc/base.c18
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/stackleak.h35
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/stackleak.c132
-rw-r--r--kernel/sysctl.c15
-rw-r--r--scripts/Makefile.gcc-plugins10
-rw-r--r--scripts/gcc-plugins/Kconfig51
-rw-r--r--scripts/gcc-plugins/stackleak_plugin.c427
24 files changed, 841 insertions, 28 deletions
diff --git a/Documentation/security/self-protection.rst b/Documentation/security/self-protection.rst
index e1ca698e0006..f584fb74b4ff 100644
--- a/Documentation/security/self-protection.rst
+++ b/Documentation/security/self-protection.rst
@@ -302,11 +302,11 @@ sure structure holes are cleared.
Memory poisoning
----------------
-When releasing memory, it is best to poison the contents (clear stack on
-syscall return, wipe heap memory on a free), to avoid reuse attacks that
-rely on the old contents of memory. This frustrates many uninitialized
-variable attacks, stack content exposures, heap content exposures, and
-use-after-free attacks.
+When releasing memory, it is best to poison the contents, to avoid reuse
+attacks that rely on the old contents of memory. E.g., clear stack on a
+syscall return (``CONFIG_GCC_PLUGIN_STACKLEAK``), wipe heap memory on a
+free. This frustrates many uninitialized variable attacks, stack content
+exposures, heap content exposures, and use-after-free attacks.
Destination tracking
--------------------
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 37a679501ddc..1b8775298cf7 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -89,6 +89,7 @@ show up in /proc/sys/kernel:
- shmmni
- softlockup_all_cpu_backtrace
- soft_watchdog
+- stack_erasing
- stop-a [ SPARC only ]
- sysrq ==> Documentation/admin-guide/sysrq.rst
- sysctl_writes_strict
@@ -987,6 +988,23 @@ detect a hard lockup condition.
==============================================================
+stack_erasing
+
+This parameter can be used to control kernel stack erasing at the end
+of syscalls for kernels built with CONFIG_GCC_PLUGIN_STACKLEAK.
+
+That erasing reduces the information which kernel stack leak bugs
+can reveal and blocks some uninitialized stack variable attacks.
+The tradeoff is the performance impact: on a single CPU system kernel
+compilation sees a 1% slowdown, other systems and workloads may vary.
+
+ 0: kernel stack erasing is disabled, STACKLEAK_METRICS are not updated.
+
+ 1: kernel stack erasing is enabled (default), it is performed before
+ returning to the userspace at the end of syscalls.
+
+==============================================================
+
tainted:
Non-zero if the kernel has been tainted. Numeric values, which can be
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 702898633b00..73aaaa3da436 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -146,3 +146,6 @@ Their order is preserved but their base will be offset early at boot time.
Be very careful vs. KASLR when changing anything here. The KASLR address
range must not overlap with anything except the KASAN shadow area, which is
correct as KASAN disables KASLR.
+
+For both 4- and 5-level layouts, the STACKLEAK_POISON value in the last 2MB
+hole: ffffffffffff4111
diff --git a/arch/Kconfig b/arch/Kconfig
index ed27fd262627..e1e540ffa979 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -429,6 +429,13 @@ config SECCOMP_FILTER
See Documentation/userspace-api/seccomp_filter.rst for details.
+config HAVE_ARCH_STACKLEAK
+ bool
+ help
+ An architecture should select this if it has the code which
+ fills the used part of the kernel stack with the STACKLEAK_POISON
+ value before returning from system calls.
+
config HAVE_STACKPROTECTOR
bool
help
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index ce99c58cd1f1..d9a4c2d6dd8b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -497,25 +497,3 @@ void arch_setup_new_exec(void)
{
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
}
-
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
-void __used stackleak_check_alloca(unsigned long size)
-{
- unsigned long stack_left;
- unsigned long current_sp = current_stack_pointer;
- struct stack_info info;
-
- BUG_ON(!on_accessible_stack(current, current_sp, &info));
-
- stack_left = current_sp - info.low;
-
- /*
- * There's a good chance we're almost out of stack space if this
- * is true. Using panic() over BUG() is more likely to give
- * reliable debugging output.
- */
- if (size >= stack_left)
- panic("alloca() over the kernel stack boundary\n");
-}
-EXPORT_SYMBOL(stackleak_check_alloca);
-#endif
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c51c989c19c0..ba7e3464ee92 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -129,6 +129,7 @@ config X86
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
+ select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 708b46a54578..25e5a6bda8c3 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -329,8 +329,22 @@ For 32-bit we have the following conventions - kernel is built with
#endif
+.macro STACKLEAK_ERASE_NOCLOBBER
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ PUSH_AND_CLEAR_REGS
+ call stackleak_erase
+ POP_REGS
+#endif
+.endm
+
#endif /* CONFIG_X86_64 */
+.macro STACKLEAK_ERASE
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ call stackleak_erase
+#endif
+.endm
+
/*
* This does 'call enter_from_user_mode' unless we can avoid it based on
* kernel config or using the static jump infrastructure.
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 687e47f8a796..d309f30cf7af 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -46,6 +46,8 @@
#include <asm/frame.h>
#include <asm/nospec-branch.h>
+#include "calling.h"
+
.section .entry.text, "ax"
/*
@@ -712,6 +714,7 @@ ENTRY(ret_from_fork)
/* When we fork, we trace the syscall return in the child, too. */
movl %esp, %eax
call syscall_return_slowpath
+ STACKLEAK_ERASE
jmp restore_all
/* kernel thread */
@@ -886,6 +889,8 @@ ENTRY(entry_SYSENTER_32)
ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
"jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+ STACKLEAK_ERASE
+
/* Opportunistic SYSEXIT */
TRACE_IRQS_ON /* User mode traces as IRQs on. */
@@ -997,6 +1002,8 @@ ENTRY(entry_INT80_32)
call do_int80_syscall_32
.Lsyscall_32_done:
+ STACKLEAK_ERASE
+
restore_all:
TRACE_IRQS_IRET
SWITCH_TO_ENTRY_STACK
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 4d7a2d9d44cf..ce25d84023c0 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -266,6 +266,8 @@ syscall_return_via_sysret:
* We are on the trampoline stack. All regs except RDI are live.
* We can do future final exit work right here.
*/
+ STACKLEAK_ERASE_NOCLOBBER
+
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
popq %rdi
@@ -625,6 +627,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
* We are on the trampoline stack. All regs except RDI are live.
* We can do future final exit work right here.
*/
+ STACKLEAK_ERASE_NOCLOBBER
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 7d0df78db727..8eaf8952c408 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -261,6 +261,11 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
/* Opportunistic SYSRET */
sysret32_from_system_call:
+ /*
+ * We are not going to return to userspace from the trampoline
+ * stack. So let's erase the thread stack right now.
+ */
+ STACKLEAK_ERASE
TRACE_IRQS_ON /* User mode traces as IRQs on. */
movq RBX(%rsp), %rbx /* pt_regs->rbx */
movq RBP(%rsp), %rbp /* pt_regs->rbp */
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 3370a4138e94..951c984de61a 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -8,7 +8,9 @@ lkdtm-$(CONFIG_LKDTM) += perms.o
lkdtm-$(CONFIG_LKDTM) += refcount.o
lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += usercopy.o
+lkdtm-$(CONFIG_LKDTM) += stackleak.o
+KASAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_rodata.o := n
OBJCOPYFLAGS :=
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 5a755590d3dc..2837dc77478e 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -184,6 +184,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_STACK_BEYOND),
CRASHTYPE(USERCOPY_KERNEL),
CRASHTYPE(USERCOPY_KERNEL_DS),
+ CRASHTYPE(STACKLEAK_ERASING),
};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 07db641d71d0..3c6fd327e166 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -84,4 +84,7 @@ void lkdtm_USERCOPY_STACK_BEYOND(void);
void lkdtm_USERCOPY_KERNEL(void);
void lkdtm_USERCOPY_KERNEL_DS(void);
+/* lkdtm_stackleak.c */
+void lkdtm_STACKLEAK_ERASING(void);
+
#endif
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
new file mode 100644
index 000000000000..d5a084475abc
--- /dev/null
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code tests that the current task stack is properly erased (filled
+ * with STACKLEAK_POISON).
+ *
+ * Authors:
+ * Alexander Popov <alex.popov@linux.com>
+ * Tycho Andersen <tycho@tycho.ws>
+ */
+
+#include "lkdtm.h"
+#include <linux/stackleak.h>
+
+void lkdtm_STACKLEAK_ERASING(void)
+{
+ unsigned long *sp, left, found, i;
+ const unsigned long check_depth =
+ STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+
+ /*
+ * For the details about the alignment of the poison values, see
+ * the comment in stackleak_track_stack().
+ */
+ sp = PTR_ALIGN(&i, sizeof(unsigned long));
+
+ left = ((unsigned long)sp & (THREAD_SIZE - 1)) / sizeof(unsigned long);
+ sp--;
+
+ /*
+ * One 'long int' at the bottom of the thread stack is reserved
+ * and not poisoned.
+ */
+ if (left > 1) {
+ left--;
+ } else {
+ pr_err("FAIL: not enough stack space for the test\n");
+ return;
+ }
+
+ pr_info("checking unused part of the thread stack (%lu bytes)...\n",
+ left * sizeof(unsigned long));
+
+ /*
+ * Search for 'check_depth' poison values in a row (just like
+ * stackleak_erase() does).
+ */
+ for (i = 0, found = 0; i < left && found <= check_depth; i++) {
+ if (*(sp - i) == STACKLEAK_POISON)
+ found++;
+ else
+ found = 0;
+ }
+
+ if (found <= check_depth) {
+ pr_err("FAIL: thread stack is not erased (checked %lu bytes)\n",
+ i * sizeof(unsigned long));
+ return;
+ }
+
+ pr_info("first %lu bytes are unpoisoned\n",
+ (i - found) * sizeof(unsigned long));
+
+ /* The rest of thread stack should be erased */
+ for (; i < left; i++) {
+ if (*(sp - i) != STACKLEAK_POISON) {
+ pr_err("FAIL: thread stack is NOT properly erased\n");
+ return;
+ }
+ }
+
+ pr_info("OK: the rest of the thread stack is properly erased\n");
+ return;
+}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7e9f07bf260d..ce3465479447 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2905,6 +2905,21 @@ static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns,
}
#endif /* CONFIG_LIVEPATCH */
+#ifdef CONFIG_STACKLEAK_METRICS
+static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+{
+ unsigned long prev_depth = THREAD_SIZE -
+ (task->prev_lowest_stack & (THREAD_SIZE - 1));
+ unsigned long depth = THREAD_SIZE -
+ (task->lowest_stack & (THREAD_SIZE - 1));
+
+ seq_printf(m, "previous stack depth: %lu\nstack depth: %lu\n",
+ prev_depth, depth);
+ return 0;
+}
+#endif /* CONFIG_STACKLEAK_METRICS */
+
/*
* Thread groups
*/
@@ -3006,6 +3021,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_LIVEPATCH
ONE("patch_state", S_IRUSR, proc_pid_patch_state),
#endif
+#ifdef CONFIG_STACKLEAK_METRICS
+ ONE("stack_depth", S_IRUGO, proc_stack_depth),
+#endif
};
static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8f8a5418b627..a51c13c2b1a0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1200,6 +1200,11 @@ struct task_struct {
void *security;
#endif
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ unsigned long lowest_stack;
+ unsigned long prev_lowest_stack;
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
new file mode 100644
index 000000000000..3d5c3271a9a8
--- /dev/null
+++ b/include/linux/stackleak.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STACKLEAK_H
+#define _LINUX_STACKLEAK_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+/*
+ * Check that the poison value points to the unused hole in the
+ * virtual memory map for your platform.
+ */
+#define STACKLEAK_POISON -0xBEEF
+#define STACKLEAK_SEARCH_DEPTH 128
+
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#include <asm/stacktrace.h>
+
+static inline void stackleak_task_init(struct task_struct *t)
+{
+ t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
+# ifdef CONFIG_STACKLEAK_METRICS
+ t->prev_lowest_stack = t->lowest_stack;
+# endif
+}
+
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+int stack_erasing_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+#endif
+
+#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
+static inline void stackleak_task_init(struct task_struct *t) { }
+#endif
+
+#endif
diff --git a/kernel/Makefile b/kernel/Makefile
index 7a63d567fdb5..7343b3a9bff0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -117,6 +117,10 @@ obj-$(CONFIG_HAS_IOMEM) += iomem.o
obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_RSEQ) += rseq.o
+obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
+KASAN_SANITIZE_stackleak.o := n
+KCOV_INSTRUMENT_stackleak.o := n
+
$(obj)/configs.o: $(obj)/config_data.h
targets += config_data.gz
diff --git a/kernel/fork.c b/kernel/fork.c
index 8f82a3bdcb8f..07cddff89c7b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -91,6 +91,7 @@
#include <linux/kcov.h>
#include <linux/livepatch.h>
#include <linux/thread_info.h>
+#include <linux/stackleak.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -1926,6 +1927,8 @@ static __latent_entropy struct task_struct *copy_process(
if (retval)
goto bad_fork_cleanup_io;
+ stackleak_task_init(p);
+
if (pid != &init_struct_pid) {
pid = alloc_pid(p->nsproxy->pid_ns_for_children);
if (IS_ERR(pid)) {
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
new file mode 100644
index 000000000000..e42892926244
--- /dev/null
+++ b/kernel/stackleak.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code fills the used part of the kernel stack with a poison value
+ * before returning to userspace. It's part of the STACKLEAK feature
+ * ported from grsecurity/PaX.
+ *
+ * Author: Alexander Popov <alex.popov@linux.com>
+ *
+ * STACKLEAK reduces the information which kernel stack leak bugs can
+ * reveal and blocks some uninitialized stack variable attacks.
+ */
+
+#include <linux/stackleak.h>
+
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+#include <linux/jump_label.h>
+#include <linux/sysctl.h>
+
+static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
+
+int stack_erasing_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret = 0;
+ int state = !static_branch_unlikely(&stack_erasing_bypass);
+ int prev_state = state;
+
+ table->data = &state;
+ table->maxlen = sizeof(int);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ state = !!state;
+ if (ret || !write || state == prev_state)
+ return ret;
+
+ if (state)
+ static_branch_disable(&stack_erasing_bypass);
+ else
+ static_branch_enable(&stack_erasing_bypass);
+
+ pr_warn("stackleak: kernel stack erasing is %s\n",
+ state ? "enabled" : "disabled");
+ return ret;
+}
+
+#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
+#else
+#define skip_erasing() false
+#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
+
+asmlinkage void stackleak_erase(void)
+{
+ /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
+ unsigned long kstack_ptr = current->lowest_stack;
+ unsigned long boundary = (unsigned long)end_of_stack(current);
+ unsigned int poison_count = 0;
+ const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+
+ if (skip_erasing())
+ return;
+
+ /* Check that 'lowest_stack' value is sane */
+ if (unlikely(kstack_ptr - boundary >= THREAD_SIZE))
+ kstack_ptr = boundary;
+
+ /* Search for the poison value in the kernel stack */
+ while (kstack_ptr > boundary && poison_count <= depth) {
+ if (*(unsigned long *)kstack_ptr == STACKLEAK_POISON)
+ poison_count++;
+ else
+ poison_count = 0;
+
+ kstack_ptr -= sizeof(unsigned long);
+ }
+
+ /*
+ * One 'long int' at the bottom of the thread stack is reserved and
+ * should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y).
+ */
+ if (kstack_ptr == boundary)
+ kstack_ptr += sizeof(unsigned long);
+
+#ifdef CONFIG_STACKLEAK_METRICS
+ current->prev_lowest_stack = kstack_ptr;
+#endif
+
+ /*
+ * Now write the poison value to the kernel stack. Start from
+ * 'kstack_ptr' and move up till the new 'boundary'. We assume that
+ * the stack pointer doesn't change when we write poison.
+ */
+ if (on_thread_stack())
+ boundary = current_stack_pointer;
+ else
+ boundary = current_top_of_stack();
+
+ while (kstack_ptr < boundary) {
+ *(unsigned long *)kstack_ptr = STACKLEAK_POISON;
+ kstack_ptr += sizeof(unsigned long);
+ }
+
+ /* Reset the 'lowest_stack' value for the next syscall */
+ current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
+}
+
+void __used stackleak_track_stack(void)
+{
+ /*
+ * N.B. stackleak_erase() fills the kernel stack with the poison value,
+ * which has the register width. That code assumes that the value
+ * of 'lowest_stack' is aligned on the register width boundary.
+ *
+ * That is true for x86 and x86_64 because of the kernel stack
+ * alignment on these platforms (for details, see 'cc_stack_align' in
+ * arch/x86/Makefile). Take care of that when you port STACKLEAK to
+ * new platforms.
+ */
+ unsigned long sp = (unsigned long)&sp;
+
+ /*
+ * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than
+ * STACKLEAK_SEARCH_DEPTH makes the poison search in
+ * stackleak_erase() unreliable. Let's prevent that.
+ */
+ BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH);
+
+ if (sp < current->lowest_stack &&
+ sp >= (unsigned long)task_stack_page(current) +
+ sizeof(unsigned long)) {
+ current->lowest_stack = sp;
+ }
+}
+EXPORT_SYMBOL(stackleak_track_stack);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index cc02050fd0c4..3ae223f7b5df 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -91,7 +91,9 @@
#ifdef CONFIG_CHR_DEV_SG
#include <scsi/sg.h>
#endif
-
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+#include <linux/stackleak.h>
+#endif
#ifdef CONFIG_LOCKUP_DETECTOR
#include <linux/nmi.h>
#endif
@@ -1233,6 +1235,17 @@ static struct ctl_table kern_table[] = {
.extra2 = &one,
},
#endif
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+ {
+ .procname = "stack_erasing",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = stack_erasing_sysctl,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
{ }
};
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 0a482f341576..46c5c6809806 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -26,6 +26,16 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) \
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE) \
+= -fplugin-arg-randomize_layout_plugin-performance-mode
+gcc-plugin-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak_plugin.so
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
+ += -DSTACKLEAK_PLUGIN
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
+ += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE)
+ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable
+endif
+export DISABLE_STACKLEAK_PLUGIN
+
# All the plugin CFLAGS are collected here in case a build target needs to
# filter them out of the KBUILD_CFLAGS.
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index cb0c889e13aa..0d5c799688f0 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
@@ -139,4 +139,55 @@ config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE
in structures. This reduces the performance hit of RANDSTRUCT
at the cost of weakened randomization.
+config GCC_PLUGIN_STACKLEAK
+ bool "Erase the kernel stack before returning from syscalls"
+ depends on GCC_PLUGINS
+ depends on HAVE_ARCH_STACKLEAK
+ help
+ This option makes the kernel erase the kernel stack before
+ returning from system calls. That reduces the information which
+ kernel stack leak bugs can reveal and blocks some uninitialized
+ stack variable attacks.
+
+ The tradeoff is the performance impact: on a single CPU system kernel
+ compilation sees a 1% slowdown, other systems and workloads may vary
+ and you are advised to test this feature on your expected workload
+ before deploying it.
+
+ This plugin was ported from grsecurity/PaX. More information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
+config STACKLEAK_TRACK_MIN_SIZE
+ int "Minimum stack frame size of functions tracked by STACKLEAK"
+ default 100
+ range 0 4096
+ depends on GCC_PLUGIN_STACKLEAK
+ help
+ The STACKLEAK gcc plugin instruments the kernel code for tracking
+ the lowest border of the kernel stack (and for some other purposes).
+ It inserts the stackleak_track_stack() call for the functions with
+ a stack frame size greater than or equal to this parameter.
+ If unsure, leave the default value 100.
+
+config STACKLEAK_METRICS
+ bool "Show STACKLEAK metrics in the /proc file system"
+ depends on GCC_PLUGIN_STACKLEAK
+ depends on PROC_FS
+ help
+ If this is set, STACKLEAK metrics for every task are available in
+ the /proc file system. In particular, /proc/<pid>/stack_depth
+ shows the maximum kernel stack consumption for the current and
+ previous syscalls. Although this information is not precise, it
+ can be useful for estimating the STACKLEAK performance impact for
+ your workloads.
+
+config STACKLEAK_RUNTIME_DISABLE
+ bool "Allow runtime disabling of kernel stack erasing"
+ depends on GCC_PLUGIN_STACKLEAK
+ help
+ This option provides 'stack_erasing' sysctl, which can be used in
+ runtime to control kernel stack erasing for kernels built with
+ CONFIG_GCC_PLUGIN_STACKLEAK.
+
endif
diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
new file mode 100644
index 000000000000..2f48da98b5d4
--- /dev/null
+++ b/scripts/gcc-plugins/stackleak_plugin.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2011-2017 by the PaX Team <pageexec@freemail.hu>
+ * Modified by Alexander Popov <alex.popov@linux.com>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ * but for the kernel it doesn't matter since it doesn't link against
+ * any of the gcc libraries
+ *
+ * This gcc plugin is needed for tracking the lowest border of the kernel stack.
+ * It instruments the kernel code inserting stackleak_track_stack() calls:
+ * - after alloca();
+ * - for the functions with a stack frame size greater than or equal
+ * to the "track-min-size" plugin parameter.
+ *
+ * This plugin is ported from grsecurity/PaX. For more information see:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+ *
+ * Debugging:
+ * - use fprintf() to stderr, debug_generic_expr(), debug_gimple_stmt(),
+ * print_rtl() and print_simple_rtl();
+ * - add "-fdump-tree-all -fdump-rtl-all" to the plugin CFLAGS in
+ * Makefile.gcc-plugins to see the verbose dumps of the gcc passes;
+ * - use gcc -E to understand the preprocessing shenanigans;
+ * - use gcc with enabled CFG/GIMPLE/SSA verification (--enable-checking).
+ */
+
+#include "gcc-common.h"
+
+__visible int plugin_is_GPL_compatible;
+
+static int track_frame_size = -1;
+static const char track_function[] = "stackleak_track_stack";
+
+/*
+ * Mark these global variables (roots) for gcc garbage collector since
+ * they point to the garbage-collected memory.
+ */
+static GTY(()) tree track_function_decl;
+
+static struct plugin_info stackleak_plugin_info = {
+ .version = "201707101337",
+ .help = "track-min-size=nn\ttrack stack for functions with a stack frame size >= nn bytes\n"
+ "disable\t\tdo not activate the plugin\n"
+};
+
+static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after)
+{
+ gimple stmt;
+ gcall *stackleak_track_stack;
+ cgraph_node_ptr node;
+ int frequency;
+ basic_block bb;
+
+ /* Insert call to void stackleak_track_stack(void) */
+ stmt = gimple_build_call(track_function_decl, 0);
+ stackleak_track_stack = as_a_gcall(stmt);
+ if (after) {
+ gsi_insert_after(gsi, stackleak_track_stack,
+ GSI_CONTINUE_LINKING);
+ } else {
+ gsi_insert_before(gsi, stackleak_track_stack, GSI_SAME_STMT);
+ }
+
+ /* Update the cgraph */
+ bb = gimple_bb(stackleak_track_stack);
+ node = cgraph_get_create_node(track_function_decl);
+ gcc_assert(node);
+ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb);
+ cgraph_create_edge(cgraph_get_node(current_function_decl), node,
+ stackleak_track_stack, bb->count, frequency);
+}
+
+static bool is_alloca(gimple stmt)
+{
+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
+ return true;
+
+#if BUILDING_GCC_VERSION >= 4007
+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
+ return true;
+#endif
+
+ return false;
+}
+
+/*
+ * Work with the GIMPLE representation of the code. Insert the
+ * stackleak_track_stack() call after alloca() and into the beginning
+ * of the function if it is not instrumented.
+ */
+static unsigned int stackleak_instrument_execute(void)
+{
+ basic_block bb, entry_bb;
+ bool prologue_instrumented = false, is_leaf = true;
+ gimple_stmt_iterator gsi;
+
+ /*
+ * ENTRY_BLOCK_PTR is a basic block which represents possible entry
+ * point of a function. This block does not contain any code and
+ * has a CFG edge to its successor.
+ */
+ gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ entry_bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+
+ /*
+ * Loop through the GIMPLE statements in each of cfun basic blocks.
+ * cfun is a global variable which represents the function that is
+ * currently processed.
+ */
+ FOR_EACH_BB_FN(bb, cfun) {
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ gimple stmt;
+
+ stmt = gsi_stmt(gsi);
+
+ /* Leaf function is a function which makes no calls */
+ if (is_gimple_call(stmt))
+ is_leaf = false;
+
+ if (!is_alloca(stmt))
+ continue;
+
+ /* Insert stackleak_track_stack() call after alloca() */
+ stackleak_add_track_stack(&gsi, true);
+ if (bb == entry_bb)
+ prologue_instrumented = true;
+ }
+ }
+
+ if (prologue_instrumented)
+ return 0;
+
+ /*
+ * Special cases to skip the instrumentation.
+ *
+ * Taking the address of static inline functions materializes them,
+ * but we mustn't instrument some of them as the resulting stack
+ * alignment required by the function call ABI will break other
+ * assumptions regarding the expected (but not otherwise enforced)
+ * register clobbering ABI.
+ *
+ * Case in point: native_save_fl on amd64 when optimized for size
+ * clobbers rdx if it were instrumented here.
+ *
+ * TODO: any more special cases?
+ */
+ if (is_leaf &&
+ !TREE_PUBLIC(current_function_decl) &&
+ DECL_DECLARED_INLINE_P(current_function_decl)) {
+ return 0;
+ }
+
+ if (is_leaf &&
+ !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)),
+ "_paravirt_", 10)) {
+ return 0;
+ }
+
+ /* Insert stackleak_track_stack() call at the function beginning */
+ bb = entry_bb;
+ if (!single_pred_p(bb)) {
+ /* gcc_assert(bb_loop_depth(bb) ||
+ (bb->flags & BB_IRREDUCIBLE_LOOP)); */
+ split_edge(single_succ_edge(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+ }
+ gsi = gsi_after_labels(bb);
+ stackleak_add_track_stack(&gsi, false);
+
+ return 0;
+}
+
+static bool large_stack_frame(void)
+{
+#if BUILDING_GCC_VERSION >= 8000
+ return maybe_ge(get_frame_size(), track_frame_size);
+#else
+ return (get_frame_size() >= track_frame_size);
+#endif
+}
+
+/*
+ * Work with the RTL representation of the code.
+ * Remove the unneeded stackleak_track_stack() calls from the functions
+ * which don't call alloca() and don't have a large enough stack frame size.
+ */
+static unsigned int stackleak_cleanup_execute(void)
+{
+ rtx_insn *insn, *next;
+
+ if (cfun->calls_alloca)
+ return 0;
+
+ if (large_stack_frame())
+ return 0;
+
+ /*
+ * Find stackleak_track_stack() calls. Loop through the chain of insns,
+ * which is an RTL representation of the code for a function.
+ *
+ * The example of a matching insn:
+ * (call_insn 8 4 10 2 (call (mem (symbol_ref ("stackleak_track_stack")
+ * [flags 0x41] <function_decl 0x7f7cd3302a80 stackleak_track_stack>)
+ * [0 stackleak_track_stack S1 A8]) (0)) 675 {*call} (expr_list
+ * (symbol_ref ("stackleak_track_stack") [flags 0x41] <function_decl
+ * 0x7f7cd3302a80 stackleak_track_stack>) (expr_list (0) (nil))) (nil))
+ */
+ for (insn = get_insns(); insn; insn = next) {
+ rtx body;
+
+ next = NEXT_INSN(insn);
+
+ /* Check the expression code of the insn */
+ if (!CALL_P(insn))
+ continue;
+
+ /*
+ * Check the expression code of the insn body, which is an RTL
+ * Expression (RTX) describing the side effect performed by
+ * that insn.
+ */
+ body = PATTERN(insn);
+
+ if (GET_CODE(body) == PARALLEL)
+ body = XVECEXP(body, 0, 0);
+
+ if (GET_CODE(body) != CALL)
+ continue;
+
+ /*
+ * Check the first operand of the call expression. It should
+ * be a mem RTX describing the needed subroutine with a
+ * symbol_ref RTX.
+ */
+ body = XEXP(body, 0);
+ if (GET_CODE(body) != MEM)
+ continue;
+
+ body = XEXP(body, 0);
+ if (GET_CODE(body) != SYMBOL_REF)
+ continue;
+
+ if (SYMBOL_REF_DECL(body) != track_function_decl)
+ continue;
+
+ /* Delete the stackleak_track_stack() call */
+ delete_insn_and_edges(insn);
+#if BUILDING_GCC_VERSION >= 4007 && BUILDING_GCC_VERSION < 8000
+ if (GET_CODE(next) == NOTE &&
+ NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
+ insn = next;
+ next = NEXT_INSN(insn);
+ delete_insn_and_edges(insn);
+ }
+#endif
+ }
+
+ return 0;
+}
+
+static bool stackleak_gate(void)
+{
+ tree section;
+
+ section = lookup_attribute("section",
+ DECL_ATTRIBUTES(current_function_decl));
+ if (section && TREE_VALUE(section)) {
+ section = TREE_VALUE(TREE_VALUE(section));
+
+ if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10))
+ return false;
+ if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13))
+ return false;
+ if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13))
+ return false;
+ if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13))
+ return false;
+ }
+
+ return track_frame_size >= 0;
+}
+
+/* Build the function declaration for stackleak_track_stack() */
+static void stackleak_start_unit(void *gcc_data __unused,
+ void *user_data __unused)
+{
+ tree fntype;
+
+ /* void stackleak_track_stack(void) */
+ fntype = build_function_type_list(void_type_node, NULL_TREE);
+ track_function_decl = build_fn_decl(track_function, fntype);
+ DECL_ASSEMBLER_NAME(track_function_decl); /* for LTO */
+ TREE_PUBLIC(track_function_decl) = 1;
+ TREE_USED(track_function_decl) = 1;
+ DECL_EXTERNAL(track_function_decl) = 1;
+ DECL_ARTIFICIAL(track_function_decl) = 1;
+ DECL_PRESERVE_P(track_function_decl) = 1;
+}
+
+/*
+ * Pass gate function is a predicate function that gets executed before the
+ * corresponding pass. If the return value is 'true' the pass gets executed,
+ * otherwise, it is skipped.
+ */
+static bool stackleak_instrument_gate(void)
+{
+ return stackleak_gate();
+}
+
+#define PASS_NAME stackleak_instrument
+#define PROPERTIES_REQUIRED PROP_gimple_leh | PROP_cfg
+#define TODO_FLAGS_START TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts
+#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func \
+ | TODO_update_ssa | TODO_rebuild_cgraph_edges
+#include "gcc-generate-gimple-pass.h"
+
+static bool stackleak_cleanup_gate(void)
+{
+ return stackleak_gate();
+}
+
+#define PASS_NAME stackleak_cleanup
+#define TODO_FLAGS_FINISH TODO_dump_func
+#include "gcc-generate-rtl-pass.h"
+
+/*
+ * Every gcc plugin exports a plugin_init() function that is called right
+ * after the plugin is loaded. This function is responsible for registering
+ * the plugin callbacks and doing other required initialization.
+ */
+__visible int plugin_init(struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version)
+{
+ const char * const plugin_name = plugin_info->base_name;
+ const int argc = plugin_info->argc;
+ const struct plugin_argument * const argv = plugin_info->argv;
+ int i = 0;
+
+ /* Extra GGC root tables describing our GTY-ed data */
+ static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = {
+ {
+ .base = &track_function_decl,
+ .nelt = 1,
+ .stride = sizeof(track_function_decl),
+ .cb = &gt_ggc_mx_tree_node,
+ .pchw = &gt_pch_nx_tree_node
+ },
+ LAST_GGC_ROOT_TAB
+ };
+
+ /*
+ * The stackleak_instrument pass should be executed before the
+ * "optimized" pass, which is the control flow graph cleanup that is
+ * performed just before expanding gcc trees to the RTL. In former
+ * versions of the plugin this new pass was inserted before the
+ * "tree_profile" pass, which is currently called "profile".
+ */
+ PASS_INFO(stackleak_instrument, "optimized", 1,
+ PASS_POS_INSERT_BEFORE);
+
+ /*
+ * The stackleak_cleanup pass should be executed after the
+ * "reload" pass, when the stack frame size is final.
+ */
+ PASS_INFO(stackleak_cleanup, "reload", 1, PASS_POS_INSERT_AFTER);
+
+ if (!plugin_default_version_check(version, &gcc_version)) {
+ error(G_("incompatible gcc/plugin versions"));
+ return 1;
+ }
+
+ /* Parse the plugin arguments */
+ for (i = 0; i < argc; i++) {
+ if (!strcmp(argv[i].key, "disable"))
+ return 0;
+
+ if (!strcmp(argv[i].key, "track-min-size")) {
+ if (!argv[i].value) {
+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"),
+ plugin_name, argv[i].key);
+ return 1;
+ }
+
+ track_frame_size = atoi(argv[i].value);
+ if (track_frame_size < 0) {
+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"),
+ plugin_name, argv[i].key, argv[i].value);
+ return 1;
+ }
+ } else {
+ error(G_("unknown option '-fplugin-arg-%s-%s'"),
+ plugin_name, argv[i].key);
+ return 1;
+ }
+ }
+
+ /* Give the information about the plugin */
+ register_callback(plugin_name, PLUGIN_INFO, NULL,
+ &stackleak_plugin_info);
+
+ /* Register to be called before processing a translation unit */
+ register_callback(plugin_name, PLUGIN_START_UNIT,
+ &stackleak_start_unit, NULL);
+
+ /* Register an extra GCC garbage collector (GGC) root table */
+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL,
+ (void *)&gt_ggc_r_gt_stackleak);
+
+ /*
+ * Hook into the Pass Manager to register new gcc passes.
+ *
+ * The stack frame size info is available only at the last RTL pass,
+ * when it's too late to insert complex code like a function call.
+ * So we register two gcc passes to instrument every function at first
+ * and remove the unneeded instrumentation later.
+ */
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+ &stackleak_instrument_pass_info);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+ &stackleak_cleanup_pass_info);
+
+ return 0;
+}