summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/nospec-branch.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2022-09-15 14:11:37 +0300
committerPeter Zijlstra <peterz@infradead.org>2022-10-17 17:41:19 +0300
commitee3e2469b3463d28ca4cde20e0283319ac6a562d (patch)
tree958265de2b1865d821630bc175df913052ac7f25 /arch/x86/include/asm/nospec-branch.h
parent36b64f101219dd9e6e4f0ea880b64e8a90da547b (diff)
downloadlinux-ee3e2469b3463d28ca4cde20e0283319ac6a562d.tar.xz
x86/ftrace: Make it call depth tracking aware
Since ftrace has trampolines, don't use thunks for the __fentry__ site but instead require that every function called from there includes accounting. This very much includes all the direct-call functions. Additionally, ftrace uses ROP tricks in two places: - return_to_handler(), and - ftrace_regs_caller() when pt_regs->orig_ax is set by a direct-call. return_to_handler() already uses a retpoline to replace an indirect-jump to defeat IBT, since this is a jump-type retpoline, make sure there is no accounting done and ALTERNATIVE the RET into a ret. ftrace_regs_caller() does much the same and gets the same treatment. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220915111148.927545073@infradead.org
Diffstat (limited to 'arch/x86/include/asm/nospec-branch.h')
-rw-r--r--arch/x86/include/asm/nospec-branch.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 4771147c7c5a..82580adbca4b 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -343,6 +343,12 @@ static inline void x86_set_skl_return_thunk(void)
{
x86_return_thunk = &__x86_return_skl;
}
+
+#define CALL_DEPTH_ACCOUNT \
+ ALTERNATIVE("", \
+ __stringify(INCREMENT_CALL_DEPTH), \
+ X86_FEATURE_CALL_DEPTH)
+
#ifdef CONFIG_CALL_THUNKS_DEBUG
DECLARE_PER_CPU(u64, __x86_call_count);
DECLARE_PER_CPU(u64, __x86_ret_count);
@@ -351,6 +357,9 @@ DECLARE_PER_CPU(u64, __x86_ctxsw_count);
#endif
#else
static inline void x86_set_skl_return_thunk(void) {}
+
+#define CALL_DEPTH_ACCOUNT ""
+
#endif
#ifdef CONFIG_RETPOLINE