summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/nospec-branch.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-19 11:46:02 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-19 11:46:02 +0300
commitbf98bae3d8a18745e54fef9fd71fd129f6e9f7e5 (patch)
treee174308d07b223d9493c120c444c02109b8880ad /arch/x86/include/asm/nospec-branch.h
parent4e7ffde6984a7fa842489be7055570e5f5a4f0b5 (diff)
parent6405b72e8d17bd1875a56ae52d23ec3cd51b9d66 (diff)
downloadlinux-bf98bae3d8a18745e54fef9fd71fd129f6e9f7e5.tar.xz
Merge tag 'x86_urgent_for_v6.5_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: "Extraordinary embargoed times call for extraordinary measures. That's why this week's x86/urgent branch is larger than usual, containing all the known fallout fixes after the SRSO mitigation got merged. I know, it is a bit late in the game but everyone who has reported a bug stemming from the SRSO pile, has tested that branch and has confirmed that it fixes their bug. Also, I've run it on every possible hardware I have and it is looking good. It is running on this very machine while I'm typing, for 2 days now without an issue. Famous last words... - Use LEA ...%rsp instead of ADD %rsp in the Zen1/2 SRSO return sequence as latter clobbers flags which interferes with fastop emulation in KVM, leading to guests freezing during boot - A fix for the DIV(0) quotient data leak on Zen1 to clear the divider buffers at the right time - Disable the SRSO mitigation on unaffected configurations as it got enabled there unnecessarily - Change .text section name to fix CONFIG_LTO_CLANG builds - Improve the optprobe indirect jmp check so that certain configurations can still be able to use optprobes at all - A serious and good scrubbing of the untraining routines by PeterZ: - Add proper speculation stopping traps so that objtool is happy - Adjust objtool to handle the new thunks - Make the thunk pointer assignable to the different untraining sequences at runtime, thus avoiding the alternative at the return thunk. It simplifies the code a bit too. - Add a entry_untrain_ret() main entry point which selects the respective untraining sequence - Rename things so that they're more clear - Fix stack validation with FRAME_POINTER=y builds - Fix static call patching to handle when a JMP to the return thunk is the last insn on the very last module memory page - Add more documentation about what each untraining routine does and why" * tag 'x86_urgent_for_v6.5_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/srso: Correct the mitigation status when SMT is disabled x86/static_call: Fix __static_call_fixup() objtool/x86: Fixup frame-pointer vs rethunk x86/srso: Explain the untraining sequences a bit more x86/cpu/kvm: Provide UNTRAIN_RET_VM x86/cpu: Cleanup the untrain mess x86/cpu: Rename srso_(.*)_alias to srso_alias_\1 x86/cpu: Rename original retbleed methods x86/cpu: Clean up SRSO return thunk mess x86/alternative: Make custom return thunk unconditional objtool/x86: Fix SRSO mess x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk() x86/cpu: Fix __x86_return_thunk symbol type x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG x86/srso: Disable the mitigation on unaffected configurations x86/CPU/AMD: Fix the DIV(0) initial fix attempt x86/retpoline: Don't clobber RFLAGS during srso_safe_ret()
Diffstat (limited to 'arch/x86/include/asm/nospec-branch.h')
-rw-r--r--arch/x86/include/asm/nospec-branch.h49
1 files changed, 27 insertions, 22 deletions
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 3faf044569a5..c55cc243592e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -272,9 +272,9 @@
.endm
#ifdef CONFIG_CPU_UNRET_ENTRY
-#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
+#define CALL_UNTRAIN_RET "call entry_untrain_ret"
#else
-#define CALL_ZEN_UNTRAIN_RET ""
+#define CALL_UNTRAIN_RET ""
#endif
/*
@@ -282,7 +282,7 @@
* return thunk isn't mapped into the userspace tables (then again, AMD
* typically has NO_MELTDOWN).
*
- * While zen_untrain_ret() doesn't clobber anything but requires stack,
+ * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
* entry_ibpb() will clobber AX, CX, DX.
*
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
@@ -293,14 +293,20 @@
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
VALIDATE_UNRET_END
ALTERNATIVE_3 "", \
- CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
#endif
+.endm
-#ifdef CONFIG_CPU_SRSO
- ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
- "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
+.macro UNTRAIN_RET_VM
+#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
+ defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
+ VALIDATE_UNRET_END
+ ALTERNATIVE_3 "", \
+ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \
+ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
#endif
.endm
@@ -309,15 +315,10 @@
defined(CONFIG_CALL_DEPTH_TRACKING)
VALIDATE_UNRET_END
ALTERNATIVE_3 "", \
- CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
__stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
#endif
-
-#ifdef CONFIG_CPU_SRSO
- ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
- "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
-#endif
.endm
@@ -341,17 +342,24 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
+#ifdef CONFIG_RETHUNK
extern void __x86_return_thunk(void);
-extern void zen_untrain_ret(void);
+#else
+static inline void __x86_return_thunk(void) {}
+#endif
+
+extern void retbleed_return_thunk(void);
+extern void srso_return_thunk(void);
+extern void srso_alias_return_thunk(void);
+
+extern void retbleed_untrain_ret(void);
extern void srso_untrain_ret(void);
-extern void srso_untrain_ret_alias(void);
+extern void srso_alias_untrain_ret(void);
+
+extern void entry_untrain_ret(void);
extern void entry_ibpb(void);
-#ifdef CONFIG_CALL_THUNKS
extern void (*x86_return_thunk)(void);
-#else
-#define x86_return_thunk (&__x86_return_thunk)
-#endif
#ifdef CONFIG_CALL_DEPTH_TRACKING
extern void __x86_return_skl(void);
@@ -478,9 +486,6 @@ enum ssb_mitigation {
SPEC_STORE_BYPASS_SECCOMP,
};
-extern char __indirect_thunk_start[];
-extern char __indirect_thunk_end[];
-
static __always_inline
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
{