From 1beef60e7d6b90be826a73f626204f55a4cd7640 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 24 Nov 2023 11:05:10 +0000 Subject: arm64: stacktrace: factor out kernel unwind state On arm64 we share some unwinding code between the regular kernel unwinder and the KVM hyp unwinder. Some of this common code only matters to the regular unwinder, e.g. the `kr_cur` and `task` fields in the common struct unwind_state. We're likely to add more state which only matters for regular kernel unwinding (or only for hyp unwinding). In preparation for such changes, this patch factors out the kernel-specific state into a new struct kunwind_state, and updates the kernel unwind code accordingly. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Kalesh Singh Cc: Madhavan T. Venkataraman Cc: Mark Brown Cc: Puranjay Mohan Cc: Will Deacon Reviewed-by: Puranjay Mohan Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20231124110511.2795958-2-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/stacktrace/common.h | 19 +---- arch/arm64/include/asm/stacktrace/nvhe.h | 2 +- arch/arm64/kernel/stacktrace.c | 113 ++++++++++++++++++----------- 3 files changed, 74 insertions(+), 60 deletions(-) diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 508f734de46e..f63dc654e545 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -9,7 +9,6 @@ #ifndef __ASM_STACKTRACE_COMMON_H #define __ASM_STACKTRACE_COMMON_H -#include #include struct stack_info { @@ -23,12 +22,6 @@ struct stack_info { * @fp: The fp value in the frame record (or the real fp) * @pc: The lr value in the frame record (or the real lr) * - * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance - * associated with the most recently encountered replacement lr - * value. - * - * @task: The task being unwound. - * * @stack: The stack currently being unwound. * @stacks: An array of stacks which can be unwound. * @nr_stacks: The number of stacks in @stacks. @@ -36,10 +29,6 @@ struct stack_info { struct unwind_state { unsigned long fp; unsigned long pc; -#ifdef CONFIG_KRETPROBES - struct llist_node *kr_cur; -#endif - struct task_struct *task; struct stack_info stack; struct stack_info *stacks; @@ -66,14 +55,8 @@ static inline bool stackinfo_on_stack(const struct stack_info *info, return true; } -static inline void unwind_init_common(struct unwind_state *state, - struct task_struct *task) +static inline void unwind_init_common(struct unwind_state *state) { - state->task = task; -#ifdef CONFIG_KRETPROBES - state->kr_cur = NULL; -#endif - state->stack = stackinfo_get_unknown(); } diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h index 25ab83a315a7..44759281d0d4 100644 --- a/arch/arm64/include/asm/stacktrace/nvhe.h +++ b/arch/arm64/include/asm/stacktrace/nvhe.h @@ -31,7 +31,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state, unsigned long fp, unsigned long pc) { - unwind_init_common(state, NULL); + unwind_init_common(state); state->fp = fp; state->pc = pc; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 17f66a74c745..aafc89192787 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -18,6 +19,31 @@ #include #include +/* + * Kernel unwind state + * + * @common: Common unwind state. + * @task: The task being unwound. + * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance + * associated with the most recently encountered replacement lr + * value. + */ +struct kunwind_state { + struct unwind_state common; + struct task_struct *task; +#ifdef CONFIG_KRETPROBES + struct llist_node *kr_cur; +#endif +}; + +static __always_inline void +kunwind_init(struct kunwind_state *state, + struct task_struct *task) +{ + unwind_init_common(&state->common); + state->task = task; +} + /* * Start an unwind from a pt_regs. * @@ -26,13 +52,13 @@ * The regs must be on a stack currently owned by the calling task. */ static __always_inline void -unwind_init_from_regs(struct unwind_state *state, - struct pt_regs *regs) +kunwind_init_from_regs(struct kunwind_state *state, + struct pt_regs *regs) { - unwind_init_common(state, current); + kunwind_init(state, current); - state->fp = regs->regs[29]; - state->pc = regs->pc; + state->common.fp = regs->regs[29]; + state->common.pc = regs->pc; } /* @@ -44,12 +70,12 @@ unwind_init_from_regs(struct unwind_state *state, * The function which invokes this must be noinline. */ static __always_inline void -unwind_init_from_caller(struct unwind_state *state) +kunwind_init_from_caller(struct kunwind_state *state) { - unwind_init_common(state, current); + kunwind_init(state, current); - state->fp = (unsigned long)__builtin_frame_address(1); - state->pc = (unsigned long)__builtin_return_address(0); + state->common.fp = (unsigned long)__builtin_frame_address(1); + state->common.pc = (unsigned long)__builtin_return_address(0); } /* @@ -63,35 +89,38 @@ unwind_init_from_caller(struct unwind_state *state) * call this for the current task. */ static __always_inline void -unwind_init_from_task(struct unwind_state *state, - struct task_struct *task) +kunwind_init_from_task(struct kunwind_state *state, + struct task_struct *task) { - unwind_init_common(state, task); + kunwind_init(state, task); - state->fp = thread_saved_fp(task); - state->pc = thread_saved_pc(task); + state->common.fp = thread_saved_fp(task); + state->common.pc = thread_saved_pc(task); } static __always_inline int -unwind_recover_return_address(struct unwind_state *state) +kunwind_recover_return_address(struct kunwind_state *state) { #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (state->task->ret_stack && - (state->pc == (unsigned long)return_to_handler)) { + (state->common.pc == (unsigned long)return_to_handler)) { unsigned long orig_pc; - orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc, - (void *)state->fp); - if (WARN_ON_ONCE(state->pc == orig_pc)) + orig_pc = ftrace_graph_ret_addr(state->task, NULL, + state->common.pc, + (void *)state->common.fp); + if (WARN_ON_ONCE(state->common.pc == orig_pc)) return -EINVAL; - state->pc = orig_pc; + state->common.pc = orig_pc; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_KRETPROBES - if (is_kretprobe_trampoline(state->pc)) { - state->pc = kretprobe_find_ret_addr(state->task, - (void *)state->fp, - &state->kr_cur); + if (is_kretprobe_trampoline(state->common.pc)) { + unsigned long orig_pc; + orig_pc = kretprobe_find_ret_addr(state->task, + (void *)state->common.fp, + &state->kr_cur); + state->common.pc = orig_pc; } #endif /* CONFIG_KRETPROBES */ @@ -106,38 +135,38 @@ unwind_recover_return_address(struct unwind_state *state) * and the location (but not the fp value) of B. */ static __always_inline int -unwind_next(struct unwind_state *state) +kunwind_next(struct kunwind_state *state) { struct task_struct *tsk = state->task; - unsigned long fp = state->fp; + unsigned long fp = state->common.fp; int err; /* Final frame; nothing to unwind */ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_frame_record(state); + err = unwind_next_frame_record(&state->common); if (err) return err; - state->pc = ptrauth_strip_kernel_insn_pac(state->pc); + state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc); - return unwind_recover_return_address(state); + return kunwind_recover_return_address(state); } static __always_inline void -unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, - void *cookie) +do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry, + void *cookie) { - if (unwind_recover_return_address(state)) + if (kunwind_recover_return_address(state)) return; while (1) { int ret; - if (!consume_entry(cookie, state->pc)) + if (!consume_entry(cookie, state->common.pc)) break; - ret = unwind_next(state); + ret = kunwind_next(state); if (ret < 0) break; } @@ -190,22 +219,24 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, STACKINFO_EFI, #endif }; - struct unwind_state state = { - .stacks = stacks, - .nr_stacks = ARRAY_SIZE(stacks), + struct kunwind_state state = { + .common = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }, }; if (regs) { if (task != current) return; - unwind_init_from_regs(&state, regs); + kunwind_init_from_regs(&state, regs); } else if (task == current) { - unwind_init_from_caller(&state); + kunwind_init_from_caller(&state); } else { - unwind_init_from_task(&state, task); + kunwind_init_from_task(&state, task); } - unwind(&state, consume_entry, cookie); + do_kunwind(&state, consume_entry, cookie); } static bool dump_backtrace_entry(void *arg, unsigned long where) -- cgit v1.2.3 From 1aba06e7b2b49c1a0e905fcaf9c56d70164fbd8d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 24 Nov 2023 11:05:11 +0000 Subject: arm64: stacktrace: factor out kunwind_stack_walk() Currently arm64 uses the generic arch_stack_walk() interface for all stack walking code. This only passes a PC value and cookie to the unwind callback, whereas we'd like to pass some additional information in some cases. For example, the BPF exception unwinder wants the FP, for reliable stacktrace we'll want to perform additional checks on other portions of unwind state, and we'd like to expand the information printed by dump_backtrace() to include provenance and reliability information. As preparation for all of the above, this patch factors the core unwind logic out of arch_stack_walk() and into a new kunwind_stack_walk() function which provides all of the unwind state to a callback function. The existing arch_stack_walk() interface is implemented atop this. The kunwind_stack_walk() function is intended to be a private implementation detail of unwinders in stacktrace.c, and not something to be exported generally to kernel code. It is __always_inline'd into its caller so that neither it or its caller appear in stactraces (which is the existing/required behavior for arch_stack_walk() and friends) and so that the compiler can optimize away some of the indirection. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Kalesh Singh Cc: Madhavan T. Venkataraman Cc: Mark Brown Cc: Puranjay Mohan Cc: Will Deacon Reviewed-by: Kalesh Singh Reviewed-by: Puranjay Mohan Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20231124110511.2795958-3-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/stacktrace.c | 39 +++++++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index aafc89192787..7f88028a00c0 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -154,8 +154,10 @@ kunwind_next(struct kunwind_state *state) return kunwind_recover_return_address(state); } +typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie); + static __always_inline void -do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry, +do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state, void *cookie) { if (kunwind_recover_return_address(state)) @@ -164,7 +166,7 @@ do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry, while (1) { int ret; - if (!consume_entry(cookie, state->common.pc)) + if (!consume_state(state, cookie)) break; ret = kunwind_next(state); if (ret < 0) @@ -201,9 +203,10 @@ do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry, : stackinfo_get_unknown(); \ }) -noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, - void *cookie, struct task_struct *task, - struct pt_regs *regs) +static __always_inline void +kunwind_stack_walk(kunwind_consume_fn consume_state, + void *cookie, struct task_struct *task, + struct pt_regs *regs) { struct stack_info stacks[] = { stackinfo_get_task(task), @@ -236,7 +239,31 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, kunwind_init_from_task(&state, task); } - do_kunwind(&state, consume_entry, cookie); + do_kunwind(&state, consume_state, cookie); +} + +struct kunwind_consume_entry_data { + stack_trace_consume_fn consume_entry; + void *cookie; +}; + +static bool +arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie) +{ + struct kunwind_consume_entry_data *data = cookie; + return data->consume_entry(data->cookie, state->common.pc); +} + +noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task, + struct pt_regs *regs) +{ + struct kunwind_consume_entry_data data = { + .consume_entry = consume_entry, + .cookie = cookie, + }; + + kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs); } static bool dump_backtrace_entry(void *arg, unsigned long where) -- cgit v1.2.3