summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/stacktrace.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2022-09-01 16:06:45 +0300
committerCatalin Marinas <catalin.marinas@arm.com>2022-09-09 14:30:08 +0300
commit8df137300d1964c3810991aa2fe17a105348b647 (patch)
tree5bf40143e616f71417b66d1ece0b2a5fc70cfbe2 /arch/arm64/kernel/stacktrace.c
parentbd8abd68836b5c2b668afc4fb46d85d687779dec (diff)
downloadlinux-8df137300d1964c3810991aa2fe17a105348b647.tar.xz
arm64: stacktrace: track all stack boundaries explicitly
Currently we call an on_accessible_stack() callback for each step of the unwinder, requiring redundant work to be performed in the core of the unwind loop (e.g. disabling preemption around accesses to per-cpu variables containing stack boundaries). To prevent unwind loops which go through a stack multiple times, we have to track the set of unwound stacks, requiring a stack_type enum which needs to cater for all the stacks of all possible callees. To prevent loops within a stack, we must track the prior FP values. This patch reworks the unwinder to minimize the work in the core of the unwinder, and to remove the need for the stack_type enum. The set of accessible stacks (and their boundaries) are determined at the start of the unwind, and the current stack is tracked during the unwind, with completed stacks removed from the set of accessible stacks. This makes the boundary checks more accurate (e.g. detecting overlapped frame records), and removes the need for separate tracking of the prior FP and visited stacks. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Kalesh Singh <kaleshsingh@google.com> Reviewed-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com> Reviewed-by: Mark Brown <broonie@kernel.org> Cc: Fuad Tabba <tabba@google.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20220901130646.1316937-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel/stacktrace.c')
-rw-r--r--arch/arm64/kernel/stacktrace.c91
1 files changed, 38 insertions, 53 deletions
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index ca56fd732c2a..9c8820f24262 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -67,57 +67,6 @@ static inline void unwind_init_from_task(struct unwind_state *state,
state->pc = thread_saved_pc(task);
}
-static bool on_accessible_stack(const struct task_struct *tsk,
- unsigned long sp, unsigned long size,
- struct stack_info *info)
-{
- struct stack_info tmp;
-
- tmp = stackinfo_get_task(tsk);
- if (stackinfo_on_stack(&tmp, sp, size))
- goto found;
-
- /*
- * We can only safely access per-cpu stacks when unwinding the current
- * task in a non-preemptible context.
- */
- if (tsk != current || preemptible())
- goto not_found;
-
- tmp = stackinfo_get_irq();
- if (stackinfo_on_stack(&tmp, sp, size))
- goto found;
-
- tmp = stackinfo_get_overflow();
- if (stackinfo_on_stack(&tmp, sp, size))
- goto found;
-
- /*
- * We can only safely access SDEI stacks which unwinding the current
- * task in an NMI context.
- */
- if (!IS_ENABLED(CONFIG_VMAP_STACK) ||
- !IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) ||
- !in_nmi())
- goto not_found;
-
- tmp = stackinfo_get_sdei_normal();
- if (stackinfo_on_stack(&tmp, sp, size))
- goto found;
-
- tmp = stackinfo_get_sdei_critical();
- if (stackinfo_on_stack(&tmp, sp, size))
- goto found;
-
-not_found:
- *info = stackinfo_get_unknown();
- return false;
-
-found:
- *info = tmp;
- return true;
-}
-
/*
* Unwind from one frame record (A) to the next frame record (B).
*
@@ -135,7 +84,7 @@ static int notrace unwind_next(struct unwind_state *state)
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT;
- err = unwind_next_frame_record(state, on_accessible_stack, NULL);
+ err = unwind_next_frame_record(state, NULL);
if (err)
return err;
@@ -215,11 +164,47 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
barrier();
}
+/*
+ * Per-cpu stacks are only accessible when unwinding the current task in a
+ * non-preemptible context.
+ */
+#define STACKINFO_CPU(name) \
+ ({ \
+ ((task == current) && !preemptible()) \
+ ? stackinfo_get_##name() \
+ : stackinfo_get_unknown(); \
+ })
+
+/*
+ * SDEI stacks are only accessible when unwinding the current task in an NMI
+ * context.
+ */
+#define STACKINFO_SDEI(name) \
+ ({ \
+ ((task == current) && in_nmi()) \
+ ? stackinfo_get_sdei_##name() \
+ : stackinfo_get_unknown(); \
+ })
+
noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
- struct unwind_state state;
+ struct stack_info stacks[] = {
+ stackinfo_get_task(task),
+ STACKINFO_CPU(irq),
+#if defined(CONFIG_VMAP_STACK)
+ STACKINFO_CPU(overflow),
+#endif
+#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
+ STACKINFO_SDEI(normal),
+ STACKINFO_SDEI(critical),
+#endif
+ };
+ struct unwind_state state = {
+ .stacks = stacks,
+ .nr_stacks = ARRAY_SIZE(stacks),
+ };
if (regs) {
if (task != current)