summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/stackprotector.h
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2020-06-18 01:56:24 +0300
committerBorislav Petkov <bp@suse.de>2020-06-18 14:09:17 +0300
commitc9a1ff316bc9b1d1806a4366d0aef6e18833ba52 (patch)
treef2dae0b0a21105a645d90444b3effddc06504c94 /arch/x86/include/asm/stackprotector.h
parenta5ce9f2bb665d1d2b31f139a02dbaa2dfbb62fa6 (diff)
downloadlinux-c9a1ff316bc9b1d1806a4366d0aef6e18833ba52.tar.xz
x86/stackprotector: Pre-initialize canary for secondary CPUs
The idle tasks created for each secondary CPU already have a random stack canary generated by fork(). Copy the canary to the percpu variable before starting the secondary CPU which removes the need to call boot_init_stack_canary(). Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20200617225624.799335-1-brgerst@gmail.com
Diffstat (limited to 'arch/x86/include/asm/stackprotector.h')
-rw-r--r--arch/x86/include/asm/stackprotector.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 9804a7957f4e..7fb482f0f25b 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -90,6 +90,15 @@ static __always_inline void boot_init_stack_canary(void)
#endif
}
+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
+{
+#ifdef CONFIG_X86_64
+ per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
+#else
+ per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
+#endif
+}
+
static inline void setup_stack_canary_segment(int cpu)
{
#ifdef CONFIG_X86_32
@@ -119,6 +128,9 @@ static inline void load_stack_canary_segment(void)
static inline void setup_stack_canary_segment(int cpu)
{ }
+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
+{ }
+
static inline void load_stack_canary_segment(void)
{
#ifdef CONFIG_X86_32