From 9e09d445f1cab518eedf4454d119ead27979b8f4 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 11 Apr 2023 17:29:41 +0100 Subject: arm64: stacktrace: recover return address for first entry The function which calls the top-level backtracing function may have been instrumented with ftrace and/or kprobes, and hence the first return address may have been rewritten. Factor out the existing fgraph / kretprobes address recovery, and use this for the first address. As the comment for the fgraph case isn't all that helpful, I've also dropped that. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Cc: Catalin Marinas Cc: Madhavan T. Venkataraman Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230411162943.203199-2-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/stacktrace.c | 53 ++++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 23 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 83154303e682..dd03feba8048 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -69,6 +69,32 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state, state->pc = thread_saved_pc(task); } +static __always_inline int +unwind_recover_return_address(struct unwind_state *state) +{ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (state->task->ret_stack && + (state->pc == (unsigned long)return_to_handler)) { + unsigned long orig_pc; + orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc, + (void *)state->fp); + if (WARN_ON_ONCE(state->pc == orig_pc)) + return -EINVAL; + state->pc = orig_pc; + } +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(state->pc)) { + state->pc = kretprobe_find_ret_addr(state->task, + (void *)state->fp, + &state->kr_cur); + } +#endif /* CONFIG_KRETPROBES */ + + return 0; +} + /* * Unwind from one frame record (A) to the next frame record (B). * @@ -92,35 +118,16 @@ static int notrace unwind_next(struct unwind_state *state) state->pc = ptrauth_strip_insn_pac(state->pc); -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - if (tsk->ret_stack && - (state->pc == (unsigned long)return_to_handler)) { - unsigned long orig_pc; - /* - * This is a case where function graph tracer has - * modified a return address (LR) in a stack frame - * to hook a function return. - * So replace it to an original value. - */ - orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, - (void *)state->fp); - if (WARN_ON_ONCE(state->pc == orig_pc)) - return -EINVAL; - state->pc = orig_pc; - } -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#ifdef CONFIG_KRETPROBES - if (is_kretprobe_trampoline(state->pc)) - state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); -#endif - - return 0; + return unwind_recover_return_address(state); } NOKPROBE_SYMBOL(unwind_next); static void notrace unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, void *cookie) { + if (unwind_recover_return_address(state)) + return; + while (1) { int ret; -- cgit v1.2.3 From ead6122c289eec06bc1bd167442dfab358fafefb Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 11 Apr 2023 17:29:42 +0100 Subject: arm64: stacktrace: move dump functions to end of file For historical reasons, the backtrace dumping functions are placed in the middle of stacktrace.c, despite using functions defined later. For clarity, and to make subsequent refactoring easier, move the dumping functions to the end of stacktrace.c There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Cc: Catalin Marinas Cc: Madhavan T. Venkataraman Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230411162943.203199-3-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/stacktrace.c | 66 +++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index dd03feba8048..ee398cf5141f 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -140,39 +140,6 @@ static void notrace unwind(struct unwind_state *state, } NOKPROBE_SYMBOL(unwind); -static bool dump_backtrace_entry(void *arg, unsigned long where) -{ - char *loglvl = arg; - printk("%s %pSb\n", loglvl, (void *)where); - return true; -} - -void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, - const char *loglvl) -{ - pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); - - if (regs && user_mode(regs)) - return; - - if (!tsk) - tsk = current; - - if (!try_get_task_stack(tsk)) - return; - - printk("%sCall trace:\n", loglvl); - arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); - - put_task_stack(tsk); -} - -void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) -{ - dump_backtrace(NULL, tsk, loglvl); - barrier(); -} - /* * Per-cpu stacks are only accessible when unwinding the current task in a * non-preemptible context. @@ -237,3 +204,36 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, unwind(&state, consume_entry, cookie); } + +static bool dump_backtrace_entry(void *arg, unsigned long where) +{ + char *loglvl = arg; + printk("%s %pSb\n", loglvl, (void *)where); + return true; +} + +void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) +{ + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); + + if (regs && user_mode(regs)) + return; + + if (!tsk) + tsk = current; + + if (!try_get_task_stack(tsk)) + return; + + printk("%sCall trace:\n", loglvl); + arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); + + put_task_stack(tsk); +} + +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) +{ + dump_backtrace(NULL, tsk, loglvl); + barrier(); +} -- cgit v1.2.3 From b5ecc19e684eee1df32a035b7d981932f344fc67 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 11 Apr 2023 17:29:43 +0100 Subject: arm64: stacktrace: always inline core stacktrace functions The arm64 stacktrace code can be used in kprobe context, and so cannot be safely probed. Some (but not all) of the unwind functions are annotated with `NOKPROBE_SYMBOL()` to ensure this, with others markes as `__always_inline`, relying on the top-level unwind function being marked as `noinstr`. This patch has stacktrace.c consistently mark the internal stacktrace functions as `__always_inline`, removing the need for NOKPROBE_SYMBOL() as the top-level unwind function (arch_stack_walk()) is marked as `noinstr`. This is more consistent and is a simpler pattern to follow for future additions to stacktrace.c. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Cc: Catalin Marinas Cc: Madhavan T. Venkataraman Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230411162943.203199-4-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/stacktrace.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index ee398cf5141f..f527fadcb33b 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -25,8 +25,9 @@ * * The regs must be on a stack currently owned by the calling task. */ -static __always_inline void unwind_init_from_regs(struct unwind_state *state, - struct pt_regs *regs) +static __always_inline void +unwind_init_from_regs(struct unwind_state *state, + struct pt_regs *regs) { unwind_init_common(state, current); @@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state, * * The function which invokes this must be noinline. */ -static __always_inline void unwind_init_from_caller(struct unwind_state *state) +static __always_inline void +unwind_init_from_caller(struct unwind_state *state) { unwind_init_common(state, current); @@ -60,8 +62,9 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state) * duration of the unwind, or the unwind will be bogus. It is never valid to * call this for the current task. */ -static __always_inline void unwind_init_from_task(struct unwind_state *state, - struct task_struct *task) +static __always_inline void +unwind_init_from_task(struct unwind_state *state, + struct task_struct *task) { unwind_init_common(state, task); @@ -102,7 +105,8 @@ unwind_recover_return_address(struct unwind_state *state) * records (e.g. a cycle), determined based on the location and fp value of A * and the location (but not the fp value) of B. */ -static int notrace unwind_next(struct unwind_state *state) +static __always_inline int +unwind_next(struct unwind_state *state) { struct task_struct *tsk = state->task; unsigned long fp = state->fp; @@ -120,10 +124,10 @@ static int notrace unwind_next(struct unwind_state *state) return unwind_recover_return_address(state); } -NOKPROBE_SYMBOL(unwind_next); -static void notrace unwind(struct unwind_state *state, - stack_trace_consume_fn consume_entry, void *cookie) +static __always_inline void +unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, + void *cookie) { if (unwind_recover_return_address(state)) return; @@ -138,7 +142,6 @@ static void notrace unwind(struct unwind_state *state, break; } } -NOKPROBE_SYMBOL(unwind); /* * Per-cpu stacks are only accessible when unwinding the current task in a -- cgit v1.2.3 From 9df3f5082ff94c55e84523a28c040445220b2f64 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 12 Apr 2023 17:01:32 +0100 Subject: arm64: avoid redundant PAC stripping in __builtin_return_address() In old versions of GCC and Clang, __builtin_return_address() did not strip the PAC. This was not the behaviour we desired, and so we wrapped this with code to strip the PAC in commit: 689eae42afd7a916 ("arm64: mask PAC bits of __builtin_return_address") Since then, both GCC and Clang decided that __builtin_return_address() *should* strip the PAC, and the existing behaviour was a bug. GCC was fixed in 11.1.0, with those fixes backported to 10.2.0, 9.4.0, 8.5.0, but not earlier: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94891 Clang was fixed in 12.0.0, though this was not backported: https://reviews.llvm.org/D75044 When using a compiler whose __builtin_return_address() strips the PAC, our wrapper to strip the PAC is redundant. Similarly, when pointer authentication is not in use within the kernel pointers will not have a PAC, and so there's no point stripping those pointers. To avoid this redundant work, this patch updates the __builtin_return_address() wrapper to only be used when in-kernel pointer authentication is configured and the compiler's __builtin_return_address() does not strip the PAC. This is a cleanup/optimization, and not a fix that requires backporting. Stripping a PAC should be an idempotent operation, and so redundantly stripping the PAC is not harmful. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Amit Daniel Kachhap Cc: Catalin Marinas Cc: James Morse Cc: Kristina Martsenko Cc: Will Deacon Link: https://lore.kernel.org/r/20230412160134.306148-2-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 14 ++++++++++++++ arch/arm64/include/asm/compiler.h | 3 +++ 2 files changed, 17 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1023e896d46b..d69b624b3083 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -362,6 +362,20 @@ config ARCH_PROC_KCORE_TEXT config BROKEN_GAS_INST def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n) +config BUILTIN_RETURN_ADDRESS_STRIPS_PAC + bool + # Clang's __builtin_return_adddress() strips the PAC since 12.0.0 + # https://reviews.llvm.org/D75044 + default y if CC_IS_CLANG && (CLANG_VERSION >= 120000) + # GCC's __builtin_return_address() strips the PAC since 11.1.0, + # and this was backported to 10.2.0, 9.4.0, 8.5.0, but not earlier + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94891 + default y if CC_IS_GCC && (GCC_VERSION >= 110100) + default y if CC_IS_GCC && (GCC_VERSION >= 100200) && (GCC_VERSION < 110000) + default y if CC_IS_GCC && (GCC_VERSION >= 90400) && (GCC_VERSION < 100000) + default y if CC_IS_GCC && (GCC_VERSION >= 80500) && (GCC_VERSION < 90000) + default n + config KASAN_SHADOW_OFFSET hex depends on KASAN_GENERIC || KASAN_SW_TAGS diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h index 6fb2e6bcc392..9033a0b2f46a 100644 --- a/arch/arm64/include/asm/compiler.h +++ b/arch/arm64/include/asm/compiler.h @@ -20,7 +20,10 @@ ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \ (ptr & ~ptrauth_user_pac_mask())) +#if defined(CONFIG_ARM64_PTR_AUTH_KERNEL) && \ + !defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC) #define __builtin_return_address(val) \ (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val))) +#endif #endif /* __ASM_COMPILER_H */ -- cgit v1.2.3 From ca708599ca43567cdd69f799454f95d1c80ffee5 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 12 Apr 2023 17:01:33 +0100 Subject: arm64: use XPACLRI to strip PAC Currently we strip the PAC from pointers using C code, which requires generating bitmasks, and conditionally clearing/setting bits depending on bit 55. We can do better by using XPACLRI directly. When the logic was originally written to strip PACs from user pointers, contemporary toolchains used for the kernel had assemblers which were unaware of the PAC instructions. As stripping the PAC from userspace pointers required unconditional clearing of a fixed set of bits (which could be performed with a single instruction), it was simpler to implement the masking in C than it was to make use of XPACI or XPACLRI. When support for in-kernel pointer authentication was added, the stripping logic was extended to cover TTBR1 pointers, requiring several instructions to handle whether to clear/set bits dependent on bit 55 of the pointer. This patch simplifies the stripping of PACs by using XPACLRI directly, as contemporary toolchains do within __builtin_return_address(). This saves a number of instructions, especially where __builtin_return_address() does not implicitly strip the PAC but is heavily used (e.g. with tracepoints). As the kernel might be compiled with an assembler without knowledge of XPACLRI, it is assembled using the 'HINT #7' alias, which results in an identical opcode. At the same time, I've split ptrauth_strip_insn_pac() into ptrauth_strip_user_insn_pac() and ptrauth_strip_kernel_insn_pac() helpers so that we can avoid unnecessary PAC stripping when pointer authentication is not in use in userspace or kernel respectively. The underlying xpaclri() macro uses inline assembly which clobbers x30. The clobber causes the compiler to save/restore the original x30 value in a frame record (protected with PACIASP and AUTIASP when in-kernel authentication is enabled), so this does not provide a gadget to alter the return address. Similarly this does not adversely affect unwinding due to the presence of the frame record. The ptrauth_user_pac_mask() and ptrauth_kernel_pac_mask() are exported from the kernel in ptrace and core dumps, so these are retained. A subsequent patch will move them out of . Signed-off-by: Mark Rutland Cc: Amit Daniel Kachhap Cc: Catalin Marinas Cc: James Morse Cc: Kristina Martsenko Cc: Will Deacon Link: https://lore.kernel.org/r/20230412160134.306148-3-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/compiler.h | 32 +++++++++++++++++++++++++------- arch/arm64/include/asm/pointer_auth.h | 6 ------ arch/arm64/kernel/perf_callchain.c | 2 +- arch/arm64/kernel/process.c | 2 +- arch/arm64/kernel/stacktrace.c | 2 +- 5 files changed, 28 insertions(+), 16 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h index 9033a0b2f46a..b6d1d62474e5 100644 --- a/arch/arm64/include/asm/compiler.h +++ b/arch/arm64/include/asm/compiler.h @@ -15,15 +15,33 @@ #define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) #define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) -/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */ -#define ptrauth_clear_pac(ptr) \ - ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \ - (ptr & ~ptrauth_user_pac_mask())) +#define xpaclri(ptr) \ +({ \ + register unsigned long __xpaclri_ptr asm("x30") = (ptr); \ + \ + asm( \ + ARM64_ASM_PREAMBLE \ + " hint #7\n" \ + : "+r" (__xpaclri_ptr)); \ + \ + __xpaclri_ptr; \ +}) -#if defined(CONFIG_ARM64_PTR_AUTH_KERNEL) && \ - !defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC) +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL +#define ptrauth_strip_kernel_insn_pac(ptr) xpaclri(ptr) +#else +#define ptrauth_strip_kernel_insn_pac(ptr) (ptr) +#endif + +#ifdef CONFIG_ARM64_PTR_AUTH +#define ptrauth_strip_user_insn_pac(ptr) xpaclri(ptr) +#else +#define ptrauth_strip_user_insn_pac(ptr) (ptr) +#endif + +#if !defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC) #define __builtin_return_address(val) \ - (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val))) + (void *)(ptrauth_strip_kernel_insn_pac((unsigned long)__builtin_return_address(val))) #endif #endif /* __ASM_COMPILER_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index efb098de3a84..b0665db86aa6 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -97,11 +97,6 @@ extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys, unsigned long enabled); extern int ptrauth_get_enabled_keys(struct task_struct *tsk); -static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) -{ - return ptrauth_clear_pac(ptr); -} - static __always_inline void ptrauth_enable(void) { if (!system_supports_address_auth()) @@ -133,7 +128,6 @@ static __always_inline void ptrauth_enable(void) #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL) #define ptrauth_get_enabled_keys(tsk) (-EINVAL) -#define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_suspend_exit() #define ptrauth_thread_init_user() #define ptrauth_thread_switch_user(tsk) diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 65b196e3ca6c..6d157f32187b 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -38,7 +38,7 @@ user_backtrace(struct frame_tail __user *tail, if (err) return NULL; - lr = ptrauth_strip_insn_pac(buftail.lr); + lr = ptrauth_strip_user_insn_pac(buftail.lr); perf_callchain_store(entry, lr); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 71d59b5abede..b5bed62483cb 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -217,7 +217,7 @@ void __show_regs(struct pt_regs *regs) if (!user_mode(regs)) { printk("pc : %pS\n", (void *)regs->pc); - printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr)); + printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr)); } else { printk("pc : %016llx\n", regs->pc); printk("lr : %016llx\n", lr); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index f527fadcb33b..17f66a74c745 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -120,7 +120,7 @@ unwind_next(struct unwind_state *state) if (err) return err; - state->pc = ptrauth_strip_insn_pac(state->pc); + state->pc = ptrauth_strip_kernel_insn_pac(state->pc); return unwind_recover_return_address(state); } -- cgit v1.2.3 From de1702f65feb516d5394e81a78519de9dc567031 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 12 Apr 2023 17:01:34 +0100 Subject: arm64: move PAC masks to Now that we use XPACLRI to strip PACs within the kernel, the ptrauth_user_pac_mask() and ptrauth_kernel_pac_mask() definitions no longer need to live in . Move them to , and ensure that this header is included where they are used. Signed-off-by: Mark Rutland Cc: Amit Daniel Kachhap Cc: Catalin Marinas Cc: James Morse Cc: Kristina Martsenko Cc: Will Deacon Link: https://lore.kernel.org/r/20230412160134.306148-4-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/compiler.h | 7 ------- arch/arm64/include/asm/pointer_auth.h | 7 +++++++ arch/arm64/kernel/crash_core.c | 1 + 3 files changed, 8 insertions(+), 7 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h index b6d1d62474e5..9bbd7b7097ff 100644 --- a/arch/arm64/include/asm/compiler.h +++ b/arch/arm64/include/asm/compiler.h @@ -8,13 +8,6 @@ #define ARM64_ASM_PREAMBLE #endif -/* - * The EL0/EL1 pointer bits used by a pointer authentication code. - * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. - */ -#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) -#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) - #define xpaclri(ptr) \ ({ \ register unsigned long __xpaclri_ptr asm("x30") = (ptr); \ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index b0665db86aa6..d2e0306e65d3 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -10,6 +10,13 @@ #include #include +/* + * The EL0/EL1 pointer bits used by a pointer authentication code. + * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. + */ +#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) +#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) + #define PR_PAC_ENABLED_KEYS_MASK \ (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c index 2b65aae332ce..66cde752cd74 100644 --- a/arch/arm64/kernel/crash_core.c +++ b/arch/arm64/kernel/crash_core.c @@ -8,6 +8,7 @@ #include #include #include +#include static inline u64 get_tcr_el1_t1sz(void); -- cgit v1.2.3