summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile8
-rw-r--r--arch/x86/Kconfig106
-rw-r--r--arch/x86/entry/calling.h4
-rw-r--r--arch/x86/include/asm/disabled-features.h18
-rw-r--r--arch/x86/include/asm/linkage.h4
-rw-r--r--arch/x86/include/asm/nospec-branch.h10
-rw-r--r--arch/x86/include/asm/static_call.h2
-rw-r--r--arch/x86/kernel/alternative.c5
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c42
-rw-r--r--arch/x86/kernel/static_call.c2
-rw-r--r--arch/x86/kvm/emulate.c4
-rw-r--r--arch/x86/lib/retpoline.S4
-rw-r--r--scripts/Makefile.build3
-rwxr-xr-xscripts/link-vmlinux.sh2
-rw-r--r--security/Kconfig11
-rw-r--r--tools/objtool/builtin-check.c3
-rw-r--r--tools/objtool/builtin.h2
-rw-r--r--tools/objtool/check.c9
19 files changed, 172 insertions, 69 deletions
diff --git a/Makefile b/Makefile
index 4d502a81d24b..96794db9f18a 100644
--- a/Makefile
+++ b/Makefile
@@ -672,14 +672,18 @@ endif
ifdef CONFIG_CC_IS_GCC
RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
-RETPOLINE_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern)
RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
endif
ifdef CONFIG_CC_IS_CLANG
RETPOLINE_CFLAGS := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS := -mretpoline
-RETPOLINE_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern)
endif
+
+ifdef CONFIG_RETHUNK
+RETHUNK_CFLAGS := -mfunction-return=thunk-extern
+RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
+endif
+
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6eebb8927378..16c045906b2a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -453,30 +453,6 @@ config GOLDFISH
def_bool y
depends on X86_GOLDFISH
-config RETPOLINE
- bool "Avoid speculative indirect branches in kernel"
- default y
- help
- Compile kernel with the retpoline compiler options to guard against
- kernel-to-user data leaks by avoiding speculative indirect
- branches. Requires a compiler with -mindirect-branch=thunk-extern
- support for full protection. The kernel may run slower.
-
-config CC_HAS_SLS
- def_bool $(cc-option,-mharden-sls=all)
-
-config CC_HAS_RETURN_THUNK
- def_bool $(cc-option,-mfunction-return=thunk-extern)
-
-config SLS
- bool "Mitigate Straight-Line-Speculation"
- depends on CC_HAS_SLS && X86_64
- default n
- help
- Compile the kernel with straight-line-speculation options to guard
- against straight line speculation. The kernel image might be slightly
- larger.
-
config X86_CPU_RESCTRL
bool "x86 CPU resource control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
@@ -2430,6 +2406,88 @@ source "kernel/livepatch/Kconfig"
endmenu
+config CC_HAS_SLS
+ def_bool $(cc-option,-mharden-sls=all)
+
+config CC_HAS_RETURN_THUNK
+ def_bool $(cc-option,-mfunction-return=thunk-extern)
+
+menuconfig SPECULATION_MITIGATIONS
+ bool "Mitigations for speculative execution vulnerabilities"
+ default y
+ help
+ Say Y here to enable options which enable mitigations for
+ speculative execution hardware vulnerabilities.
+
+ If you say N, all mitigations will be disabled. You really
+ should know what you are doing to say so.
+
+if SPECULATION_MITIGATIONS
+
+config PAGE_TABLE_ISOLATION
+ bool "Remove the kernel mapping in user mode"
+ default y
+ depends on (X86_64 || X86_PAE)
+ help
+ This feature reduces the number of hardware side channels by
+ ensuring that the majority of kernel addresses are not mapped
+ into userspace.
+
+ See Documentation/x86/pti.rst for more details.
+
+config RETPOLINE
+ bool "Avoid speculative indirect branches in kernel"
+ default y
+ help
+ Compile kernel with the retpoline compiler options to guard against
+ kernel-to-user data leaks by avoiding speculative indirect
+ branches. Requires a compiler with -mindirect-branch=thunk-extern
+ support for full protection. The kernel may run slower.
+
+config RETHUNK
+ bool "Enable return-thunks"
+ depends on RETPOLINE && CC_HAS_RETURN_THUNK
+ default y
+ help
+ Compile the kernel with the return-thunks compiler option to guard
+ against kernel-to-user data leaks by avoiding return speculation.
+ Requires a compiler with -mfunction-return=thunk-extern
+ support for full protection. The kernel may run slower.
+
+config CPU_UNRET_ENTRY
+ bool "Enable UNRET on kernel entry"
+ depends on CPU_SUP_AMD && RETHUNK
+ default y
+ help
+ Compile the kernel with support for the retbleed=unret mitigation.
+
+config CPU_IBPB_ENTRY
+ bool "Enable IBPB on kernel entry"
+ depends on CPU_SUP_AMD
+ default y
+ help
+ Compile the kernel with support for the retbleed=ibpb mitigation.
+
+config CPU_IBRS_ENTRY
+ bool "Enable IBRS on kernel entry"
+ depends on CPU_SUP_INTEL
+ default y
+ help
+ Compile the kernel with support for the spectre_v2=ibrs mitigation.
+ This mitigates both spectre_v2 and retbleed at great cost to
+ performance.
+
+config SLS
+ bool "Mitigate Straight-Line-Speculation"
+ depends on CC_HAS_SLS && X86_64
+ default n
+ help
+ Compile the kernel with straight-line-speculation options to guard
+ against straight line speculation. The kernel image might be slightly
+ larger.
+
+endif
+
config ARCH_HAS_ADD_PAGES
def_bool y
depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index c3c68e97a404..a4b357e5bbfe 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -323,6 +323,7 @@ For 32-bit we have the following conventions - kernel is built with
* Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
*/
.macro IBRS_ENTER save_reg
+#ifdef CONFIG_CPU_IBRS_ENTRY
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx
@@ -343,6 +344,7 @@ For 32-bit we have the following conventions - kernel is built with
shr $32, %rdx
wrmsr
.Lend_\@:
+#endif
.endm
/*
@@ -350,6 +352,7 @@ For 32-bit we have the following conventions - kernel is built with
* regs. Must be called after the last RET.
*/
.macro IBRS_EXIT save_reg
+#ifdef CONFIG_CPU_IBRS_ENTRY
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx
@@ -364,6 +367,7 @@ For 32-bit we have the following conventions - kernel is built with
shr $32, %rdx
wrmsr
.Lend_\@:
+#endif
.endm
/*
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index e5b4e3d460c1..16c24915210d 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -60,9 +60,19 @@
# define DISABLE_RETPOLINE 0
#else
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
- (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)) | \
- (1 << (X86_FEATURE_RETHUNK & 31)) | \
- (1 << (X86_FEATURE_UNRET & 31)))
+ (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
+#endif
+
+#ifdef CONFIG_RETHUNK
+# define DISABLE_RETHUNK 0
+#else
+# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
+#endif
+
+#ifdef CONFIG_CPU_UNRET_ENTRY
+# define DISABLE_UNRET 0
+#else
+# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
#endif
/* Force disable because it's broken beyond repair */
@@ -82,7 +92,7 @@
#define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_SMAP)
#define DISABLED_MASK10 0
-#define DISABLED_MASK11 (DISABLE_RETPOLINE)
+#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
#define DISABLED_MASK12 0
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index d04e61c2f863..5000cf59bdf5 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -18,7 +18,7 @@
#define __ALIGN_STR __stringify(__ALIGN)
#endif
-#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
+#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define RET jmp __x86_return_thunk
#else /* CONFIG_RETPOLINE */
#ifdef CONFIG_SLS
@@ -30,7 +30,7 @@
#else /* __ASSEMBLY__ */
-#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
+#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define ASM_RET "jmp __x86_return_thunk\n\t"
#else /* CONFIG_RETPOLINE */
#ifdef CONFIG_SLS
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index f50a18d62ad7..2989b42fcf4e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -127,6 +127,12 @@
.Lskip_rsb_\@:
.endm
+#ifdef CONFIG_CPU_UNRET_ENTRY
+#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
+#else
+#define CALL_ZEN_UNTRAIN_RET ""
+#endif
+
/*
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
* return thunk isn't mapped into the userspace tables (then again, AMD
@@ -139,10 +145,10 @@
* where we have a stack but before any RET instruction.
*/
.macro UNTRAIN_RET
-#ifdef CONFIG_RETPOLINE
+#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY)
ANNOTATE_UNRET_END
ALTERNATIVE_2 "", \
- "call zen_untrain_ret", X86_FEATURE_UNRET, \
+ CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
#endif
.endm
diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
index 7efce22d6355..491aadfac611 100644
--- a/arch/x86/include/asm/static_call.h
+++ b/arch/x86/include/asm/static_call.h
@@ -44,7 +44,7 @@
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_RETHUNK
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk")
#else
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 1fded2daaa2e..77eefa0b0f32 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -662,6 +662,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
}
}
+#ifdef CONFIG_RETHUNK
/*
* Rewrite the compiler generated return thunk tail-calls.
*
@@ -723,6 +724,10 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
}
}
}
+#else
+void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
+#endif /* CONFIG_RETHUNK */
+
#else /* !RETPOLINES || !CONFIG_STACK_VALIDATION */
void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index a2a7f0275e36..8b9e3277a6ce 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -916,6 +916,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
void init_spectral_chicken(struct cpuinfo_x86 *c)
{
+#ifdef CONFIG_CPU_UNRET_ENTRY
u64 value;
/*
@@ -932,6 +933,7 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
}
}
+#endif
}
static void init_amd_zn(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 5e88663591bb..e96d33675229 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -798,7 +798,6 @@ static int __init retbleed_parse_cmdline(char *str)
early_param("retbleed", retbleed_parse_cmdline);
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
-#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler; falling back to IBPB!\n"
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
static void __init retbleed_select_mitigation(void)
@@ -813,18 +812,33 @@ static void __init retbleed_select_mitigation(void)
return;
case RETBLEED_CMD_UNRET:
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ } else {
+ pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
+ goto do_cmd_auto;
+ }
break;
case RETBLEED_CMD_IBPB:
- retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ } else {
+ pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
+ goto do_cmd_auto;
+ }
break;
+do_cmd_auto:
case RETBLEED_CMD_AUTO:
default:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY))
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ }
/*
* The Intel mitigation (IBRS or eIBRS) was already selected in
@@ -837,14 +851,6 @@ static void __init retbleed_select_mitigation(void)
switch (retbleed_mitigation) {
case RETBLEED_MITIGATION_UNRET:
-
- if (!IS_ENABLED(CONFIG_RETPOLINE) ||
- !IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) {
- pr_err(RETBLEED_COMPILER_MSG);
- retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
- goto retbleed_force_ibpb;
- }
-
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
@@ -856,7 +862,6 @@ static void __init retbleed_select_mitigation(void)
break;
case RETBLEED_MITIGATION_IBPB:
-retbleed_force_ibpb:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
mitigate_smt = true;
break;
@@ -1227,6 +1232,12 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_AUTO;
}
+ if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
+ pr_err("%s selected but not compiled in. Switching to AUTO select\n",
+ mitigation_options[i].option);
+ return SPECTRE_V2_CMD_AUTO;
+ }
+
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
mitigation_options[i].option);
@@ -1284,7 +1295,8 @@ static void __init spectre_v2_select_mitigation(void)
break;
}
- if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
+ if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
+ boot_cpu_has_bug(X86_BUG_RETBLEED) &&
retbleed_cmd != RETBLEED_CMD_OFF &&
boot_cpu_has(X86_FEATURE_IBRS) &&
boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index 02e471c27efa..8f7ba3ae1c3c 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -108,7 +108,7 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
}
EXPORT_SYMBOL_GPL(arch_static_call_transform);
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_RETHUNK
/*
* This is called by apply_returns() to fix up static call trampolines,
* specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1f953f230ad5..59e5d79f5c34 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -435,10 +435,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
* Depending on .config the SETcc functions look like:
*
* SETcc %al [3 bytes]
- * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETPOLINE]
+ * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK]
* INT3 [1 byte; CONFIG_SLS]
*/
-#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETPOLINE)) + \
+#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETHUNK)) + \
IS_ENABLED(CONFIG_SLS))
#define SETCC_LENGTH (3 + RET_LENGTH)
#define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1))
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 807f674fd59e..1221bb099afb 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -71,6 +71,8 @@ SYM_CODE_END(__x86_indirect_thunk_array)
* This function name is magical and is used by -mfunction-return=thunk-extern
* for the compiler to generate JMPs to it.
*/
+#ifdef CONFIG_RETHUNK
+
.section .text.__x86.return_thunk
/*
@@ -135,3 +137,5 @@ SYM_FUNC_END(zen_untrain_ret)
__EXPORT_THUNK(zen_untrain_ret)
EXPORT_SYMBOL(__x86_return_thunk)
+
+#endif /* CONFIG_RETHUNK */
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index bea7e54b2ab9..17e8b2065900 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -227,6 +227,9 @@ endif
ifdef CONFIG_RETPOLINE
objtool_args += --retpoline
endif
+ifdef CONFIG_RETHUNK
+ objtool_args += --rethunk
+endif
ifdef CONFIG_X86_SMAP
objtool_args += --uaccess
endif
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index a7c72eb5a624..d0b44bee9286 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -65,7 +65,7 @@ objtool_link()
if [ -n "${CONFIG_VMLINUX_VALIDATION}" ]; then
objtoolopt="check"
- if [ -n "${CONFIG_RETPOLINE}" ]; then
+ if [ -n "${CONFIG_CPU_UNRET_ENTRY}" ]; then
objtoolopt="${objtoolopt} --unret"
fi
if [ -z "${CONFIG_FRAME_POINTER}" ]; then
diff --git a/security/Kconfig b/security/Kconfig
index 0548db16c49d..9893c316da89 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -54,17 +54,6 @@ config SECURITY_NETWORK
implement socket and networking access controls.
If you are unsure how to answer this question, answer N.
-config PAGE_TABLE_ISOLATION
- bool "Remove the kernel mapping in user mode"
- default y
- depends on (X86_64 || X86_PAE) && !UML
- help
- This feature reduces the number of hardware side channels by
- ensuring that the majority of kernel addresses are not mapped
- into userspace.
-
- See Documentation/x86/pti.rst for more details.
-
config SECURITY_INFINIBAND
bool "Infiniband Security Hooks"
depends on SECURITY && INFINIBAND
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index d04eab7e77ae..447a49c03abb 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -19,7 +19,7 @@
#include "objtool.h"
bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats,
- validate_dup, vmlinux, sls, unret;
+ validate_dup, vmlinux, sls, unret, rethunk;
static const char * const check_usage[] = {
"objtool check [<options>] file.o",
@@ -30,6 +30,7 @@ const struct option check_options[] = {
OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
+ OPT_BOOLEAN(0, "rethunk", &rethunk, "validate and annotate rethunk usage"),
OPT_BOOLEAN(0, "unret", &unret, "validate entry unret placement"),
OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"),
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
index b6b486e28d54..61d8d49dbc65 100644
--- a/tools/objtool/builtin.h
+++ b/tools/objtool/builtin.h
@@ -9,7 +9,7 @@
extern const struct option check_options[];
extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats,
- validate_dup, vmlinux, sls, unret;
+ validate_dup, vmlinux, sls, unret, rethunk;
extern int cmd_check(int argc, const char **argv);
extern int cmd_orc(int argc, const char **argv);
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 864d407009c9..ea80b29b9913 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -3262,8 +3262,11 @@ static int validate_retpoline(struct objtool_file *file)
continue;
if (insn->type == INSN_RETURN) {
- WARN_FUNC("'naked' return found in RETPOLINE build",
- insn->sec, insn->offset);
+ if (rethunk) {
+ WARN_FUNC("'naked' return found in RETHUNK build",
+ insn->sec, insn->offset);
+ } else
+ continue;
} else {
WARN_FUNC("indirect %s found in RETPOLINE build",
insn->sec, insn->offset,
@@ -3533,7 +3536,9 @@ int check(struct objtool_file *file)
if (ret < 0)
goto out;
warnings += ret;
+ }
+ if (rethunk) {
ret = create_return_sites_sections(file);
if (ret < 0)
goto out;