summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/bugs.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
-rw-r--r--arch/x86/kernel/cpu/bugs.c95
1 files changed, 50 insertions, 45 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 10499bcd4e39..bb0ab8466b91 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
static DEFINE_MUTEX(spec_ctrl_mutex);
-void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
+void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val)
@@ -717,7 +717,7 @@ void update_gds_msr(void)
case GDS_MITIGATION_UCODE_NEEDED:
case GDS_MITIGATION_HYPERVISOR:
return;
- };
+ }
wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
@@ -1019,7 +1019,6 @@ static void __init retbleed_select_mitigation(void)
do_cmd_auto:
case RETBLEED_CMD_AUTO:
- default:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
@@ -1042,8 +1041,7 @@ do_cmd_auto:
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
- if (IS_ENABLED(CONFIG_RETHUNK))
- x86_return_thunk = retbleed_return_thunk;
+ x86_return_thunk = retbleed_return_thunk;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
@@ -1061,7 +1059,8 @@ do_cmd_auto:
case RETBLEED_MITIGATION_STUFF:
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
- x86_set_skl_return_thunk();
+
+ x86_return_thunk = call_depth_return_thunk;
break;
default:
@@ -1290,6 +1289,8 @@ spectre_v2_user_select_mitigation(void)
spectre_v2_user_ibpb = mode;
switch (cmd) {
+ case SPECTRE_V2_USER_CMD_NONE:
+ break;
case SPECTRE_V2_USER_CMD_FORCE:
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
@@ -1301,8 +1302,6 @@ spectre_v2_user_select_mitigation(void)
case SPECTRE_V2_USER_CMD_SECCOMP:
static_branch_enable(&switch_mm_cond_ibpb);
break;
- default:
- break;
}
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
@@ -2160,6 +2159,10 @@ static int l1d_flush_prctl_get(struct task_struct *task)
static int ssb_prctl_get(struct task_struct *task)
{
switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_NONE:
+ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+ return PR_SPEC_ENABLE;
+ return PR_SPEC_NOT_AFFECTED;
case SPEC_STORE_BYPASS_DISABLE:
return PR_SPEC_DISABLE;
case SPEC_STORE_BYPASS_SECCOMP:
@@ -2171,11 +2174,8 @@ static int ssb_prctl_get(struct task_struct *task)
if (task_spec_ssb_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
- default:
- if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
- return PR_SPEC_ENABLE;
- return PR_SPEC_NOT_AFFECTED;
}
+ BUG();
}
static int ib_prctl_get(struct task_struct *task)
@@ -2353,6 +2353,8 @@ early_param("l1tf", l1tf_cmdline);
enum srso_mitigation {
SRSO_MITIGATION_NONE,
+ SRSO_MITIGATION_UCODE_NEEDED,
+ SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
SRSO_MITIGATION_MICROCODE,
SRSO_MITIGATION_SAFE_RET,
SRSO_MITIGATION_IBPB,
@@ -2368,11 +2370,13 @@ enum srso_mitigation_cmd {
};
static const char * const srso_strings[] = {
- [SRSO_MITIGATION_NONE] = "Vulnerable",
- [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode",
- [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET",
- [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
- [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
+ [SRSO_MITIGATION_NONE] = "Vulnerable",
+ [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
+ [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
+ [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
+ [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
+ [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
+ [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
};
static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
@@ -2406,34 +2410,44 @@ static void __init srso_select_mitigation(void)
{
bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
- if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
- goto pred_cmd;
+ if (cpu_mitigations_off())
+ return;
- if (!has_microcode) {
- pr_warn("IBPB-extending microcode not applied!\n");
- pr_warn(SRSO_NOTICE);
- } else {
+ if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
+ if (boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+ return;
+ }
+
+ if (has_microcode) {
/*
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
+ *
+ * Zen1/2 don't have SBPB, no need to try to enable it here.
*/
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
return;
}
- }
- if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
- if (has_microcode) {
- pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
srso_mitigation = SRSO_MITIGATION_IBPB;
- goto pred_cmd;
+ goto out;
}
+ } else {
+ pr_warn("IBPB-extending microcode not applied!\n");
+ pr_warn(SRSO_NOTICE);
+
+ /* may be overwritten by SRSO_CMD_SAFE_RET below */
+ srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
}
switch (srso_cmd) {
case SRSO_CMD_OFF:
- goto pred_cmd;
+ if (boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+ return;
case SRSO_CMD_MICROCODE:
if (has_microcode) {
@@ -2458,10 +2472,12 @@ static void __init srso_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_SRSO);
x86_return_thunk = srso_return_thunk;
}
- srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+ if (has_microcode)
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+ else
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
} else {
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
- goto pred_cmd;
}
break;
@@ -2473,7 +2489,6 @@ static void __init srso_select_mitigation(void)
}
} else {
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
- goto pred_cmd;
}
break;
@@ -2485,20 +2500,12 @@ static void __init srso_select_mitigation(void)
}
} else {
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
- goto pred_cmd;
}
break;
-
- default:
- break;
}
- pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
-
-pred_cmd:
- if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
- boot_cpu_has(X86_FEATURE_SBPB))
- x86_pred_cmd = PRED_CMD_SBPB;
+out:
+ pr_info("%s\n", srso_strings[srso_mitigation]);
}
#undef pr_fmt
@@ -2704,9 +2711,7 @@ static ssize_t srso_show_state(char *buf)
if (boot_cpu_has(X86_FEATURE_SRSO_NO))
return sysfs_emit(buf, "Mitigation: SMT disabled\n");
- return sysfs_emit(buf, "%s%s\n",
- srso_strings[srso_mitigation],
- boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
+ return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
}
static ssize_t gds_show_state(char *buf)