summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-04 21:46:37 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-04 21:46:37 +0300
commit0a23fb262d17f587c9bb1e6cc83ad4158b21f16e (patch)
treefd5a2b1563856a04fa056daa18cca7be8896b322 /arch/x86
parent5c5e048b2417a56b7b52bdbb66d4fc99d0c20dd2 (diff)
parentcf5ab01c87030a085e211a0a327535932ec6f719 (diff)
downloadlinux-0a23fb262d17f587c9bb1e6cc83ad4158b21f16e.tar.xz
Merge tag 'x86_microcode_for_v6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 microcode loading updates from Borislac Petkov: "Major microcode loader restructuring, cleanup and improvements by Thomas Gleixner: - Restructure the code needed for it and add a temporary initrd mapping on 32-bit so that the loader can access the microcode blobs. This in itself is a preparation for the next major improvement: - Do not load microcode on 32-bit before paging has been enabled. Handling this has caused an endless stream of headaches, issues, ugly code and unnecessary hacks in the past. And there really wasn't any sensible reason to do that in the first place. So switch the 32-bit loading to happen after paging has been enabled and turn the loader code "real purrty" again - Drop mixed microcode steppings loading on Intel - there, a single patch loaded on the whole system is sufficient - Rework late loading to track which CPUs have updated microcode successfully and which haven't, act accordingly - Move late microcode loading on Intel in NMI context in order to guarantee concurrent loading on all threads - Make the late loading CPU-hotplug-safe and have the offlined threads be woken up for the purpose of the update - Add support for a minimum revision which determines whether late microcode loading is safe on a machine and the microcode does not change software visible features which the machine cannot use anyway since feature detection has happened already. Roughly, the minimum revision is the smallest revision number which must be loaded currently on the system so that late updates can be allowed - Other nice leanups, fixess, etc all over the place" * tag 'x86_microcode_for_v6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits) x86/microcode/intel: Add a minimum required revision for late loading x86/microcode: Prepare for minimal revision check x86/microcode: Handle "offline" CPUs correctly x86/apic: Provide apic_force_nmi_on_cpu() x86/microcode: Protect against instrumentation x86/microcode: Rendezvous and load in NMI x86/microcode: Replace the all-in-one rendevous handler x86/microcode: Provide new control functions x86/microcode: Add per CPU control field x86/microcode: Add per CPU result state x86/microcode: Sanitize __wait_for_cpus() x86/microcode: Clarify the late load logic x86/microcode: Handle "nosmt" correctly x86/microcode: Clean up mc_cpu_down_prep() x86/microcode: Get rid of the schedule work indirection x86/microcode: Mop up early loading leftovers x86/microcode/amd: Use cached microcode for AP load x86/microcode/amd: Cache builtin/initrd microcode early x86/microcode/amd: Cache builtin microcode too x86/microcode/amd: Use correct per CPU ucode_cpu_info ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig29
-rw-r--r--arch/x86/include/asm/apic.h5
-rw-r--r--arch/x86/include/asm/cpu.h20
-rw-r--r--arch/x86/include/asm/microcode.h21
-rw-r--r--arch/x86/include/asm/setup.h1
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/ipi.c8
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c1
-rw-r--r--arch/x86/kernel/cpu/common.c12
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c157
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c670
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c688
-rw-r--r--arch/x86/kernel/cpu/microcode/internal.h35
-rw-r--r--arch/x86/kernel/head32.c120
-rw-r--r--arch/x86/kernel/head_32.S10
-rw-r--r--arch/x86/kernel/nmi.c9
-rw-r--r--arch/x86/kernel/smpboot.c12
19 files changed, 897 insertions, 905 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a917f62eff2..3762f41bb092 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1313,16 +1313,41 @@ config MICROCODE
def_bool y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
+config MICROCODE_INITRD32
+ def_bool y
+ depends on MICROCODE && X86_32 && BLK_DEV_INITRD
+
config MICROCODE_LATE_LOADING
bool "Late microcode loading (DANGEROUS)"
default n
- depends on MICROCODE
+ depends on MICROCODE && SMP
help
Loading microcode late, when the system is up and executing instructions
is a tricky business and should be avoided if possible. Just the sequence
of synchronizing all cores and SMT threads is one fragile dance which does
not guarantee that cores might not softlock after the loading. Therefore,
- use this at your own risk. Late loading taints the kernel too.
+ use this at your own risk. Late loading taints the kernel unless the
+ microcode header indicates that it is safe for late loading via the
+ minimal revision check. This minimal revision check can be enforced on
+ the kernel command line with "microcode.minrev=Y".
+
+config MICROCODE_LATE_FORCE_MINREV
+ bool "Enforce late microcode loading minimal revision check"
+ default n
+ depends on MICROCODE_LATE_LOADING
+ help
+ To prevent that users load microcode late which modifies already
+ in use features, newer microcode patches have a minimum revision field
+ in the microcode header, which tells the kernel which minimum
+ revision must be active in the CPU to safely load that new microcode
+ late into the running system. If disabled the check will not
+ be enforced but the kernel will be tainted when the minimal
+ revision check fails.
+
+ This minimal revision check can also be controlled via the
+ "microcode.minrev" parameter on the kernel command line.
+
+ If unsure say Y.
config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support"
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index b0d192f613b7..d21f48f1c242 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -276,7 +276,8 @@ struct apic {
u32 disable_esr : 1,
dest_mode_logical : 1,
- x2apic_set_max_apicid : 1;
+ x2apic_set_max_apicid : 1,
+ nmi_to_offline_cpu : 1;
u32 (*calc_dest_apicid)(unsigned int cpu);
@@ -531,6 +532,8 @@ extern u32 apic_flat_calc_apicid(unsigned int cpu);
extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap);
extern u32 default_cpu_present_to_apicid(int mps_cpu);
+void apic_send_nmi_to_offline_cpu(unsigned int cpu);
+
#else /* CONFIG_X86_LOCAL_APIC */
static inline u32 read_apic_id(void) { return 0; }
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 25050d953eee..fecc4fe1d68a 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -71,26 +71,12 @@ static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
extern __noendbr void cet_disable(void);
-struct ucode_cpu_info;
+struct cpu_signature;
-int intel_cpu_collect_info(struct ucode_cpu_info *uci);
-
-static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1,
- unsigned int s2, unsigned int p2)
-{
- if (s1 != s2)
- return false;
-
- /* Processor flags are either both 0 ... */
- if (!p1 && !p2)
- return true;
-
- /* ... or they intersect. */
- return p1 & p2;
-}
+void intel_collect_cpu_info(struct cpu_signature *sig);
extern u64 x86_read_arch_cap_msr(void);
-int intel_find_matching_signature(void *mc, unsigned int csig, int cpf);
+bool intel_find_matching_signature(void *mc, struct cpu_signature *sig);
int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);
extern struct cpumask cpus_stop_mask;
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index bbbe9d744977..695e569159c1 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -23,6 +23,8 @@ static inline void load_ucode_ap(void) { }
static inline void microcode_bsp_resume(void) { }
#endif
+extern unsigned long initrd_start_early;
+
#ifdef CONFIG_CPU_SUP_INTEL
/* Intel specific microcode defines. Public for IFS */
struct microcode_header_intel {
@@ -36,7 +38,8 @@ struct microcode_header_intel {
unsigned int datasize;
unsigned int totalsize;
unsigned int metasize;
- unsigned int reserved[2];
+ unsigned int min_req_ver;
+ unsigned int reserved;
};
struct microcode_intel {
@@ -68,11 +71,19 @@ static inline u32 intel_get_microcode_revision(void)
return rev;
}
+#endif /* !CONFIG_CPU_SUP_INTEL */
-void show_ucode_info_early(void);
+bool microcode_nmi_handler(void);
+void microcode_offline_nmi_handler(void);
-#else /* CONFIG_CPU_SUP_INTEL */
-static inline void show_ucode_info_early(void) { }
-#endif /* !CONFIG_CPU_SUP_INTEL */
+#ifdef CONFIG_MICROCODE_LATE_LOADING
+DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
+static __always_inline bool microcode_nmi_handler_enabled(void)
+{
+ return static_branch_unlikely(&microcode_nmi_handler_enable);
+}
+#else
+static __always_inline bool microcode_nmi_handler_enabled(void) { return false; }
+#endif
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index f3495623ac99..bf483fcb4e57 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -126,6 +126,7 @@ void clear_bss(void);
#ifdef __i386__
asmlinkage void __init __noreturn i386_start_kernel(void);
+void __init mk_early_pgtbl_32(void);
#else
asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 3269a0e23d3a..0000325ab98f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -16,6 +16,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_head64.o = -pg
+CFLAGS_REMOVE_head32.o = -pg
CFLAGS_REMOVE_sev.o = -pg
CFLAGS_REMOVE_rethook.o = -pg
endif
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 37daa3fd6819..7139867d69cd 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -103,6 +103,7 @@ static struct apic apic_flat __ro_after_init = {
.send_IPI_allbutself = default_send_IPI_allbutself,
.send_IPI_all = default_send_IPI_all,
.send_IPI_self = default_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
@@ -173,6 +174,7 @@ static struct apic apic_physflat __ro_after_init = {
.send_IPI_allbutself = default_send_IPI_allbutself,
.send_IPI_all = default_send_IPI_all,
.send_IPI_self = default_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 0078730a512e..5da693d633b7 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -97,6 +97,14 @@ sendmask:
__apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
}
+void apic_send_nmi_to_offline_cpu(unsigned int cpu)
+{
+ if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu))
+ return;
+ if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask)))
+ return;
+ apic->send_IPI(cpu, NMI_VECTOR);
+}
#endif /* CONFIG_SMP */
static inline int __prepare_ICR2(unsigned int mask)
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index affbff65e497..a8306089c91b 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -251,6 +251,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_self = x2apic_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.read = native_apic_msr_read,
.write = native_apic_msr_write,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 7c9fe28f742f..558a4a8824f4 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -166,6 +166,7 @@ static struct apic apic_x2apic_phys __ro_after_init = {
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_self = x2apic_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.read = native_apic_msr_read,
.write = native_apic_msr_write,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 5d9591146244..b14fc8c1c953 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2164,8 +2164,6 @@ static inline void setup_getcpu(int cpu)
}
#ifdef CONFIG_X86_64
-static inline void ucode_cpu_init(int cpu) { }
-
static inline void tss_setup_ist(struct tss_struct *tss)
{
/* Set up the per-CPU TSS IST stacks */
@@ -2176,16 +2174,8 @@ static inline void tss_setup_ist(struct tss_struct *tss)
/* Only mapped when SEV-ES is active */
tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
}
-
#else /* CONFIG_X86_64 */
-
-static inline void ucode_cpu_init(int cpu)
-{
- show_ucode_info_early();
-}
-
static inline void tss_setup_ist(struct tss_struct *tss) { }
-
#endif /* !CONFIG_X86_64 */
static inline void tss_setup_io_bitmap(struct tss_struct *tss)
@@ -2241,8 +2231,6 @@ void cpu_init(void)
struct task_struct *cur = current;
int cpu = raw_smp_processor_id();
- ucode_cpu_init(cpu);
-
#ifdef CONFIG_NUMA
if (this_cpu_read(numa_node) == 0 &&
early_cpu_to_node(cpu) != NUMA_NO_NODE)
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index bbd1dc38ea03..9373ec01c5ae 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -37,6 +37,16 @@
#include "internal.h"
+struct ucode_patch {
+ struct list_head plist;
+ void *data;
+ unsigned int size;
+ u32 patch_id;
+ u16 equiv_cpu;
+};
+
+static LIST_HEAD(microcode_cache);
+
#define UCODE_MAGIC 0x00414d44
#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
#define UCODE_UCODE_TYPE 0x00000001
@@ -121,24 +131,20 @@ static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
/*
* Check whether there is a valid microcode container file at the beginning
- * of @buf of size @buf_size. Set @early to use this function in the early path.
+ * of @buf of size @buf_size.
*/
-static bool verify_container(const u8 *buf, size_t buf_size, bool early)
+static bool verify_container(const u8 *buf, size_t buf_size)
{
u32 cont_magic;
if (buf_size <= CONTAINER_HDR_SZ) {
- if (!early)
- pr_debug("Truncated microcode container header.\n");
-
+ pr_debug("Truncated microcode container header.\n");
return false;
}
cont_magic = *(const u32 *)buf;
if (cont_magic != UCODE_MAGIC) {
- if (!early)
- pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
-
+ pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
return false;
}
@@ -147,23 +153,20 @@ static bool verify_container(const u8 *buf, size_t buf_size, bool early)
/*
* Check whether there is a valid, non-truncated CPU equivalence table at the
- * beginning of @buf of size @buf_size. Set @early to use this function in the
- * early path.
+ * beginning of @buf of size @buf_size.
*/
-static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
+static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
{
const u32 *hdr = (const u32 *)buf;
u32 cont_type, equiv_tbl_len;
- if (!verify_container(buf, buf_size, early))
+ if (!verify_container(buf, buf_size))
return false;
cont_type = hdr[1];
if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
- if (!early)
- pr_debug("Wrong microcode container equivalence table type: %u.\n",
- cont_type);
-
+ pr_debug("Wrong microcode container equivalence table type: %u.\n",
+ cont_type);
return false;
}
@@ -172,9 +175,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
equiv_tbl_len = hdr[2];
if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
buf_size < equiv_tbl_len) {
- if (!early)
- pr_debug("Truncated equivalence table.\n");
-
+ pr_debug("Truncated equivalence table.\n");
return false;
}
@@ -183,22 +184,19 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
/*
* Check whether there is a valid, non-truncated microcode patch section at the
- * beginning of @buf of size @buf_size. Set @early to use this function in the
- * early path.
+ * beginning of @buf of size @buf_size.
*
* On success, @sh_psize returns the patch size according to the section header,
* to the caller.
*/
static bool
-__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early)
+__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
{
u32 p_type, p_size;
const u32 *hdr;
if (buf_size < SECTION_HDR_SIZE) {
- if (!early)
- pr_debug("Truncated patch section.\n");
-
+ pr_debug("Truncated patch section.\n");
return false;
}
@@ -207,17 +205,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early
p_size = hdr[1];
if (p_type != UCODE_UCODE_TYPE) {
- if (!early)
- pr_debug("Invalid type field (0x%x) in container file section header.\n",
- p_type);
-
+ pr_debug("Invalid type field (0x%x) in container file section header.\n",
+ p_type);
return false;
}
if (p_size < sizeof(struct microcode_header_amd)) {
- if (!early)
- pr_debug("Patch of size %u too short.\n", p_size);
-
+ pr_debug("Patch of size %u too short.\n", p_size);
return false;
}
@@ -269,7 +263,7 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size
* 0: success
*/
static int
-verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early)
+verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
{
struct microcode_header_amd *mc_hdr;
unsigned int ret;
@@ -277,7 +271,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
u16 proc_id;
u8 patch_fam;
- if (!__verify_patch_section(buf, buf_size, &sh_psize, early))
+ if (!__verify_patch_section(buf, buf_size, &sh_psize))
return -1;
/*
@@ -292,16 +286,13 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
* size sh_psize, as the section claims.
*/
if (buf_size < sh_psize) {
- if (!early)
- pr_debug("Patch of size %u truncated.\n", sh_psize);
-
+ pr_debug("Patch of size %u truncated.\n", sh_psize);
return -1;
}
ret = __verify_patch_size(family, sh_psize, buf_size);
if (!ret) {
- if (!early)
- pr_debug("Per-family patch size mismatch.\n");
+ pr_debug("Per-family patch size mismatch.\n");
return -1;
}
@@ -309,8 +300,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
- if (!early)
- pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
+ pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
return -1;
}
@@ -337,7 +327,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
u16 eq_id;
u8 *buf;
- if (!verify_equivalence_table(ucode, size, true))
+ if (!verify_equivalence_table(ucode, size))
return 0;
buf = ucode;
@@ -364,7 +354,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
u32 patch_size;
int ret;
- ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true);
+ ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size);
if (ret < 0) {
/*
* Patch verification failed, skip to the next container, if
@@ -456,14 +446,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
{
struct cont_desc desc = { 0 };
struct microcode_amd *mc;
- u32 rev, dummy, *new_rev;
bool ret = false;
-
-#ifdef CONFIG_X86_32
- new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
-#else
- new_rev = &ucode_new_rev;
-#endif
+ u32 rev, dummy;
desc.cpuid_1_eax = cpuid_1_eax;
@@ -484,8 +468,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
return ret;
if (!__apply_microcode_amd(mc)) {
- *new_rev = mc->hdr.patch_id;
- ret = true;
+ ucode_new_rev = mc->hdr.patch_id;
+ ret = true;
}
return ret;
@@ -501,7 +485,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
if (family >= 0x15)
snprintf(fw_name, sizeof(fw_name),
- "amd-ucode/microcode_amd_fam%.2xh.bin", family);
+ "amd-ucode/microcode_amd_fam%02hhxh.bin", family);
if (firmware_request_builtin(&fw, fw_name)) {
cp->size = fw.size;
@@ -512,36 +496,23 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
return false;
}
-static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
+static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
{
- struct ucode_cpu_info *uci;
struct cpio_data cp;
- const char *path;
- bool use_pa;
-
- if (IS_ENABLED(CONFIG_X86_32)) {
- uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
- path = (const char *)__pa_nodebug(ucode_path);
- use_pa = true;
- } else {
- uci = ucode_cpu_info;
- path = ucode_path;
- use_pa = false;
- }
if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
- cp = find_microcode_in_initrd(path, use_pa);
-
- /* Needed in load_microcode_amd() */
- uci->cpu_sig.sig = cpuid_1_eax;
+ cp = find_microcode_in_initrd(ucode_path);
*ret = cp;
}
-static void apply_ucode_from_containers(unsigned int cpuid_1_eax)
+void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
{
struct cpio_data cp = { };
+ /* Needed in load_microcode_amd() */
+ ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
+
find_blobs_in_containers(cpuid_1_eax, &cp);
if (!(cp.data && cp.size))
return;
@@ -549,20 +520,20 @@ static void apply_ucode_from_containers(unsigned int cpuid_1_eax)
early_apply_microcode(cpuid_1_eax, cp.data, cp.size);
}
-void load_ucode_amd_early(unsigned int cpuid_1_eax)
-{
- return apply_ucode_from_containers(cpuid_1_eax);
-}
-
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
-int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
+static int __init save_microcode_in_initrd(void)
{
+ unsigned int cpuid_1_eax = native_cpuid_eax(1);
+ struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
enum ucode_state ret;
struct cpio_data cp;
- cp = find_microcode_in_initrd(ucode_path, false);
+ if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
+ return 0;
+
+ find_blobs_in_containers(cpuid_1_eax, &cp);
if (!(cp.data && cp.size))
return -EINVAL;
@@ -578,6 +549,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
return 0;
}
+early_initcall(save_microcode_in_initrd);
/*
* a small, trivial cache of per-family ucode patches
@@ -631,7 +603,6 @@ static struct ucode_patch *find_patch(unsigned int cpu)
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u16 equiv_id;
-
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
if (!equiv_id)
return NULL;
@@ -733,12 +704,20 @@ out:
return ret;
}
+void load_ucode_amd_ap(unsigned int cpuid_1_eax)
+{
+ unsigned int cpu = smp_processor_id();
+
+ ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
+ apply_microcode_amd(cpu);
+}
+
static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
{
u32 equiv_tbl_len;
const u32 *hdr;
- if (!verify_equivalence_table(buf, buf_size, false))
+ if (!verify_equivalence_table(buf, buf_size))
return 0;
hdr = (const u32 *)buf;
@@ -784,7 +763,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
u16 proc_id;
int ret;
- ret = verify_patch(family, fw, leftover, patch_size, false);
+ ret = verify_patch(family, fw, leftover, patch_size);
if (ret)
return ret;
@@ -909,6 +888,9 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
enum ucode_state ret = UCODE_NFOUND;
const struct firmware *fw;
+ if (force_minrev)
+ return UCODE_NFOUND;
+
if (c->x86 >= 0x15)
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
@@ -918,7 +900,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
}
ret = UCODE_ERROR;
- if (!verify_container(fw->data, fw->size, false))
+ if (!verify_container(fw->data, fw->size))
goto fw_release;
ret = load_microcode_amd(c->x86, fw->data, fw->size);
@@ -938,10 +920,11 @@ static void microcode_fini_cpu_amd(int cpu)
}
static struct microcode_ops microcode_amd_ops = {
- .request_microcode_fw = request_microcode_amd,
- .collect_cpu_info = collect_cpu_info_amd,
- .apply_microcode = apply_microcode_amd,
- .microcode_fini_cpu = microcode_fini_cpu_amd,
+ .request_microcode_fw = request_microcode_amd,
+ .collect_cpu_info = collect_cpu_info_amd,
+ .apply_microcode = apply_microcode_amd,
+ .microcode_fini_cpu = microcode_fini_cpu_amd,
+ .nmi_safe = true,
};
struct microcode_ops * __init init_amd_microcode(void)
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 6cc7a2c181da..666d25bbc5ad 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -23,6 +23,7 @@
#include <linux/miscdevice.h>
#include <linux/capability.h>
#include <linux/firmware.h>
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -31,6 +32,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
+#include <asm/apic.h>
#include <asm/cpu_device_id.h>
#include <asm/perf_event.h>
#include <asm/processor.h>
@@ -42,11 +44,10 @@
#define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops;
-static bool dis_ucode_ldr = true;
+bool dis_ucode_ldr = true;
-bool initrd_gone;
-
-LIST_HEAD(microcode_cache);
+bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
+module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
/*
* Synchronization.
@@ -90,10 +91,7 @@ static bool amd_check_current_patch_level(void)
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
- if (IS_ENABLED(CONFIG_X86_32))
- levels = (u32 *)__pa_nodebug(&final_levels);
- else
- levels = final_levels;
+ levels = final_levels;
for (i = 0; levels[i]; i++) {
if (lvl == levels[i])
@@ -105,17 +103,8 @@ static bool amd_check_current_patch_level(void)
static bool __init check_loader_disabled_bsp(void)
{
static const char *__dis_opt_str = "dis_ucode_ldr";
-
-#ifdef CONFIG_X86_32
- const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
- const char *option = (const char *)__pa_nodebug(__dis_opt_str);
- bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
-
-#else /* CONFIG_X86_64 */
const char *cmdline = boot_command_line;
const char *option = __dis_opt_str;
- bool *res = &dis_ucode_ldr;
-#endif
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
@@ -123,17 +112,17 @@ static bool __init check_loader_disabled_bsp(void)
* that's good enough as they don't land on the BSP path anyway.
*/
if (native_cpuid_ecx(1) & BIT(31))
- return *res;
+ return true;
if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
if (amd_check_current_patch_level())
- return *res;
+ return true;
}
if (cmdline_find_option_bool(cmdline, option) <= 0)
- *res = false;
+ dis_ucode_ldr = false;
- return *res;
+ return dis_ucode_ldr;
}
void __init load_ucode_bsp(void)
@@ -168,23 +157,14 @@ void __init load_ucode_bsp(void)
if (intel)
load_ucode_intel_bsp();
else
- load_ucode_amd_early(cpuid_1_eax);
-}
-
-static bool check_loader_disabled_ap(void)
-{
-#ifdef CONFIG_X86_32
- return *((bool *)__pa_nodebug(&dis_ucode_ldr));
-#else
- return dis_ucode_ldr;
-#endif
+ load_ucode_amd_bsp(cpuid_1_eax);
}
void load_ucode_ap(void)
{
unsigned int cpuid_1_eax;
- if (check_loader_disabled_ap())
+ if (dis_ucode_ldr)
return;
cpuid_1_eax = native_cpuid_eax(1);
@@ -196,97 +176,44 @@ void load_ucode_ap(void)
break;
case X86_VENDOR_AMD:
if (x86_family(cpuid_1_eax) >= 0x10)
- load_ucode_amd_early(cpuid_1_eax);
- break;
- default:
- break;
- }
-}
-
-static int __init save_microcode_in_initrd(void)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
- int ret = -EINVAL;
-
- switch (c->x86_vendor) {
- case X86_VENDOR_INTEL:
- if (c->x86 >= 6)
- ret = save_microcode_in_initrd_intel();
- break;
- case X86_VENDOR_AMD:
- if (c->x86 >= 0x10)
- ret = save_microcode_in_initrd_amd(cpuid_eax(1));
+ load_ucode_amd_ap(cpuid_1_eax);
break;
default:
break;
}
-
- initrd_gone = true;
-
- return ret;
}
-struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
+struct cpio_data __init find_microcode_in_initrd(const char *path)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long start = 0;
size_t size;
#ifdef CONFIG_X86_32
- struct boot_params *params;
-
- if (use_pa)
- params = (struct boot_params *)__pa_nodebug(&boot_params);
- else
- params = &boot_params;
-
- size = params->hdr.ramdisk_size;
-
- /*
- * Set start only if we have an initrd image. We cannot use initrd_start
- * because it is not set that early yet.
- */
+ size = boot_params.hdr.ramdisk_size;
+ /* Early load on BSP has a temporary mapping. */
if (size)
- start = params->hdr.ramdisk_image;
+ start = initrd_start_early;
-# else /* CONFIG_X86_64 */
+#else /* CONFIG_X86_64 */
size = (unsigned long)boot_params.ext_ramdisk_size << 32;
size |= boot_params.hdr.ramdisk_size;
if (size) {
start = (unsigned long)boot_params.ext_ramdisk_image << 32;
start |= boot_params.hdr.ramdisk_image;
-
start += PAGE_OFFSET;
}
-# endif
+#endif
/*
* Fixup the start address: after reserve_initrd() runs, initrd_start
* has the virtual address of the beginning of the initrd. It also
* possibly relocates the ramdisk. In either case, initrd_start contains
* the updated address so use that instead.
- *
- * initrd_gone is for the hotplug case where we've thrown out initrd
- * already.
*/
- if (!use_pa) {
- if (initrd_gone)
- return (struct cpio_data){ NULL, 0, "" };
- if (initrd_start)
- start = initrd_start;
- } else {
- /*
- * The picture with physical addresses is a bit different: we
- * need to get the *physical* address to which the ramdisk was
- * relocated, i.e., relocated_ramdisk (not initrd_start) and
- * since we're running from physical addresses, we need to access
- * relocated_ramdisk through its *physical* address too.
- */
- u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
- if (*rr)
- start = *rr;
- }
+ if (initrd_start)
+ start = initrd_start;
return find_cpio_data(path, (void *)start, size, NULL);
#else /* !CONFIG_BLK_DEV_INITRD */
@@ -330,117 +257,298 @@ static struct platform_device *microcode_pdev;
* requirement can be relaxed in the future. Right now, this is conservative
* and good.
*/
-#define SPINUNIT 100 /* 100 nsec */
+enum sibling_ctrl {
+ /* Spinwait with timeout */
+ SCTRL_WAIT,
+ /* Invoke the microcode_apply() callback */
+ SCTRL_APPLY,
+ /* Proceed without invoking the microcode_apply() callback */
+ SCTRL_DONE,
+};
+
+struct microcode_ctrl {
+ enum sibling_ctrl ctrl;
+ enum ucode_state result;
+ unsigned int ctrl_cpu;
+ bool nmi_enabled;
+};
-static int check_online_cpus(void)
+DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
+static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
+static atomic_t late_cpus_in, offline_in_nmi;
+static unsigned int loops_per_usec;
+static cpumask_t cpu_offline_mask;
+
+static noinstr bool wait_for_cpus(atomic_t *cnt)
{
- unsigned int cpu;
+ unsigned int timeout, loops;
- /*
- * Make sure all CPUs are online. It's fine for SMT to be disabled if
- * all the primary threads are still online.
- */
- for_each_present_cpu(cpu) {
- if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
- pr_err("Not all CPUs online, aborting microcode update.\n");
- return -EINVAL;
+ WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0);
+
+ for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
+ if (!raw_atomic_read(cnt))
+ return true;
+
+ for (loops = 0; loops < loops_per_usec; loops++)
+ cpu_relax();
+
+ /* If invoked directly, tickle the NMI watchdog */
+ if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
+ instrumentation_begin();
+ touch_nmi_watchdog();
+ instrumentation_end();
}
}
-
- return 0;
+ /* Prevent the late comers from making progress and let them time out */
+ raw_atomic_inc(cnt);
+ return false;
}
-static atomic_t late_cpus_in;
-static atomic_t late_cpus_out;
-
-static int __wait_for_cpus(atomic_t *t, long long timeout)
+static noinstr bool wait_for_ctrl(void)
{
- int all_cpus = num_online_cpus();
-
- atomic_inc(t);
+ unsigned int timeout, loops;
- while (atomic_read(t) < all_cpus) {
- if (timeout < SPINUNIT) {
- pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
- all_cpus - atomic_read(t));
- return 1;
- }
+ for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
+ if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT)
+ return true;
- ndelay(SPINUNIT);
- timeout -= SPINUNIT;
+ for (loops = 0; loops < loops_per_usec; loops++)
+ cpu_relax();
- touch_nmi_watchdog();
+ /* If invoked directly, tickle the NMI watchdog */
+ if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
+ instrumentation_begin();
+ touch_nmi_watchdog();
+ instrumentation_end();
+ }
}
- return 0;
+ return false;
}
/*
- * Returns:
- * < 0 - on error
- * 0 - success (no update done or microcode was updated)
+ * Protected against instrumentation up to the point where the primary
+ * thread completed the update. See microcode_nmi_handler() for details.
*/
-static int __reload_late(void *info)
+static noinstr bool load_secondary_wait(unsigned int ctrl_cpu)
{
- int cpu = smp_processor_id();
- enum ucode_state err;
- int ret = 0;
+ /* Initial rendezvous to ensure that all CPUs have arrived */
+ if (!wait_for_cpus(&late_cpus_in)) {
+ raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
+ return false;
+ }
/*
- * Wait for all CPUs to arrive. A load will not be attempted unless all
- * CPUs show up.
- * */
- if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
- return -1;
+ * Wait for primary threads to complete. If one of them hangs due
+ * to the update, there is no way out. This is non-recoverable
+ * because the CPU might hold locks or resources and confuse the
+ * scheduler, watchdogs etc. There is no way to safely evacuate the
+ * machine.
+ */
+ if (wait_for_ctrl())
+ return true;
+ instrumentation_begin();
+ panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu);
+ instrumentation_end();
+}
+
+/*
+ * Protected against instrumentation up to the point where the primary
+ * thread completed the update. See microcode_nmi_handler() for details.
+ */
+static noinstr void load_secondary(unsigned int cpu)
+{
+ unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu);
+ enum ucode_state ret;
+
+ if (!load_secondary_wait(ctrl_cpu)) {
+ instrumentation_begin();
+ pr_err_once("load: %d CPUs timed out\n",
+ atomic_read(&late_cpus_in) - 1);
+ instrumentation_end();
+ return;
+ }
+
+ /* Primary thread completed. Allow to invoke instrumentable code */
+ instrumentation_begin();
/*
- * On an SMT system, it suffices to load the microcode on one sibling of
- * the core because the microcode engine is shared between the threads.
- * Synchronization still needs to take place so that no concurrent
- * loading attempts happen on multiple threads of an SMT core. See
- * below.
+ * If the primary succeeded then invoke the apply() callback,
+ * otherwise copy the state from the primary thread.
*/
- if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
- err = microcode_ops->apply_microcode(cpu);
+ if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY)
+ ret = microcode_ops->apply_microcode(cpu);
else
- goto wait_for_siblings;
+ ret = per_cpu(ucode_ctrl.result, ctrl_cpu);
- if (err >= UCODE_NFOUND) {
- if (err == UCODE_ERROR) {
- pr_warn("Error reloading microcode on CPU %d\n", cpu);
- ret = -1;
- }
+ this_cpu_write(ucode_ctrl.result, ret);
+ this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
+ instrumentation_end();
+}
+
+static void __load_primary(unsigned int cpu)
+{
+ struct cpumask *secondaries = topology_sibling_cpumask(cpu);
+ enum sibling_ctrl ctrl;
+ enum ucode_state ret;
+ unsigned int sibling;
+
+ /* Initial rendezvous to ensure that all CPUs have arrived */
+ if (!wait_for_cpus(&late_cpus_in)) {
+ this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
+ pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1);
+ return;
}
-wait_for_siblings:
- if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC))
- panic("Timeout during microcode update!\n");
+ ret = microcode_ops->apply_microcode(cpu);
+ this_cpu_write(ucode_ctrl.result, ret);
+ this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
/*
- * At least one thread has completed update on each core.
- * For others, simply call the update to make sure the
- * per-cpu cpuinfo can be updated with right microcode
- * revision.
+ * If the update was successful, let the siblings run the apply()
+ * callback. If not, tell them it's done. This also covers the
+ * case where the CPU has uniform loading at package or system
+ * scope implemented but does not advertise it.
*/
- if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
- err = microcode_ops->apply_microcode(cpu);
+ if (ret == UCODE_UPDATED || ret == UCODE_OK)
+ ctrl = SCTRL_APPLY;
+ else
+ ctrl = SCTRL_DONE;
+
+ for_each_cpu(sibling, secondaries) {
+ if (sibling != cpu)
+ per_cpu(ucode_ctrl.ctrl, sibling) = ctrl;
+ }
+}
+
+static bool kick_offline_cpus(unsigned int nr_offl)
+{
+ unsigned int cpu, timeout;
+
+ for_each_cpu(cpu, &cpu_offline_mask) {
+ /* Enable the rendezvous handler and send NMI */
+ per_cpu(ucode_ctrl.nmi_enabled, cpu) = true;
+ apic_send_nmi_to_offline_cpu(cpu);
+ }
+
+ /* Wait for them to arrive */
+ for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) {
+ if (atomic_read(&offline_in_nmi) == nr_offl)
+ return true;
+ udelay(1);
+ }
+ /* Let the others time out */
+ return false;
+}
+
+static void release_offline_cpus(void)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, &cpu_offline_mask)
+ per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE;
+}
+
+static void load_primary(unsigned int cpu)
+{
+ unsigned int nr_offl = cpumask_weight(&cpu_offline_mask);
+ bool proceed = true;
+
+ /* Kick soft-offlined SMT siblings if required */
+ if (!cpu && nr_offl)
+ proceed = kick_offline_cpus(nr_offl);
+
+ /* If the soft-offlined CPUs did not respond, abort */
+ if (proceed)
+ __load_primary(cpu);
- return ret;
+ /* Unconditionally release soft-offlined SMT siblings if required */
+ if (!cpu && nr_offl)
+ release_offline_cpus();
}
/*
- * Reload microcode late on all CPUs. Wait for a sec until they
- * all gather together.
+ * Minimal stub rendezvous handler for soft-offlined CPUs which participate
+ * in the NMI rendezvous to protect against a concurrent NMI on affected
+ * CPUs.
*/
-static int microcode_reload_late(void)
+void noinstr microcode_offline_nmi_handler(void)
{
- int old = boot_cpu_data.microcode, ret;
+ if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
+ return;
+ raw_cpu_write(ucode_ctrl.nmi_enabled, false);
+ raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE);
+ raw_atomic_inc(&offline_in_nmi);
+ wait_for_ctrl();
+}
+
+static noinstr bool microcode_update_handler(void)
+{
+ unsigned int cpu = raw_smp_processor_id();
+
+ if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) {
+ instrumentation_begin();
+ load_primary(cpu);
+ instrumentation_end();
+ } else {
+ load_secondary(cpu);
+ }
+
+ instrumentation_begin();
+ touch_nmi_watchdog();
+ instrumentation_end();
+
+ return true;
+}
+
+/*
+ * Protection against instrumentation is required for CPUs which are not
+ * safe against an NMI which is delivered to the secondary SMT sibling
+ * while the primary thread updates the microcode. Instrumentation can end
+ * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI
+ * which is the opposite of what the NMI rendezvous is trying to achieve.
+ *
+ * The primary thread is safe versus instrumentation as the actual
+ * microcode update handles this correctly. It's only the sibling code
+ * path which must be NMI safe until the primary thread completed the
+ * update.
+ */
+bool noinstr microcode_nmi_handler(void)
+{
+ if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
+ return false;
+
+ raw_cpu_write(ucode_ctrl.nmi_enabled, false);
+ return microcode_update_handler();
+}
+
+static int load_cpus_stopped(void *unused)
+{
+ if (microcode_ops->use_nmi) {
+ /* Enable the NMI handler and raise NMI */
+ this_cpu_write(ucode_ctrl.nmi_enabled, true);
+ apic->send_IPI(smp_processor_id(), NMI_VECTOR);
+ } else {
+ /* Just invoke the handler directly */
+ microcode_update_handler();
+ }
+ return 0;
+}
+
+static int load_late_stop_cpus(bool is_safe)
+{
+ unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0;
+ unsigned int nr_offl, offline = 0;
+ int old_rev = boot_cpu_data.microcode;
struct cpuinfo_x86 prev_info;
- pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
- pr_err("You should switch to early loading, if possible.\n");
+ if (!is_safe) {
+ pr_err("Late microcode loading without minimal revision check.\n");
+ pr_err("You should switch to early loading, if possible.\n");
+ }
- atomic_set(&late_cpus_in, 0);
- atomic_set(&late_cpus_out, 0);
+ atomic_set(&late_cpus_in, num_online_cpus());
+ atomic_set(&offline_in_nmi, 0);
+ loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000);
/*
* Take a snapshot before the microcode update in order to compare and
@@ -448,52 +556,162 @@ static int microcode_reload_late(void)
*/
store_cpu_caps(&prev_info);
- ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
- if (!ret) {
- pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n",
- old, boot_cpu_data.microcode);
- microcode_check(&prev_info);
- } else {
- pr_info("Reload failed, current microcode revision: 0x%x\n",
- boot_cpu_data.microcode);
+ if (microcode_ops->use_nmi)
+ static_branch_enable_cpuslocked(&microcode_nmi_handler_enable);
+
+ stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask);
+
+ if (microcode_ops->use_nmi)
+ static_branch_disable_cpuslocked(&microcode_nmi_handler_enable);
+
+ /* Analyze the results */
+ for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
+ switch (per_cpu(ucode_ctrl.result, cpu)) {
+ case UCODE_UPDATED: updated++; break;
+ case UCODE_TIMEOUT: timedout++; break;
+ case UCODE_OK: siblings++; break;
+ case UCODE_OFFLINE: offline++; break;
+ default: failed++; break;
+ }
+ }
+
+ if (microcode_ops->finalize_late_load)
+ microcode_ops->finalize_late_load(!updated);
+
+ if (!updated) {
+ /* Nothing changed. */
+ if (!failed && !timedout)
+ return 0;
+
+ nr_offl = cpumask_weight(&cpu_offline_mask);
+ if (offline < nr_offl) {
+ pr_warn("%u offline siblings did not respond.\n",
+ nr_offl - atomic_read(&offline_in_nmi));
+ return -EIO;
+ }
+ pr_err("update failed: %u CPUs failed %u CPUs timed out\n",
+ failed, timedout);
+ return -EIO;
+ }
+
+ if (!is_safe || failed || timedout)
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+
+ pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings);
+ if (failed || timedout) {
+ pr_err("load incomplete. %u CPUs timed out or failed\n",
+ num_online_cpus() - (updated + siblings));
+ }
+ pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode);
+ microcode_check(&prev_info);
+
+ return updated + siblings == num_online_cpus() ? 0 : -EIO;
+}
+
+/*
+ * This function does two things:
+ *
+ * 1) Ensure that all required CPUs which are present and have been booted
+ * once are online.
+ *
+ * To pass this check, all primary threads must be online.
+ *
+ * If the microcode load is not safe against NMI then all SMT threads
+ * must be online as well because they still react to NMIs when they are
+ * soft-offlined and parked in one of the play_dead() variants. So if a
+ * NMI hits while the primary thread updates the microcode the resulting
+ * behaviour is undefined. The default play_dead() implementation on
+ * modern CPUs uses MWAIT, which is also not guaranteed to be safe
+ * against a microcode update which affects MWAIT.
+ *
+ * As soft-offlined CPUs still react on NMIs, the SMT sibling
+ * restriction can be lifted when the vendor driver signals to use NMI
+ * for rendezvous and the APIC provides a mechanism to send an NMI to a
+ * soft-offlined CPU. The soft-offlined CPUs are then able to
+ * participate in the rendezvous in a trivial stub handler.
+ *
+ * 2) Initialize the per CPU control structure and create a cpumask
+ * which contains "offline"; secondary threads, so they can be handled
+ * correctly by a control CPU.
+ */
+static bool setup_cpus(void)
+{
+ struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, };
+ bool allow_smt_offline;
+ unsigned int cpu;
+
+ allow_smt_offline = microcode_ops->nmi_safe ||
+ (microcode_ops->use_nmi && apic->nmi_to_offline_cpu);
+
+ cpumask_clear(&cpu_offline_mask);
+
+ for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
+ /*
+ * Offline CPUs sit in one of the play_dead() functions
+ * with interrupts disabled, but they still react on NMIs
+ * and execute arbitrary code. Also MWAIT being updated
+ * while the offline CPU sits there is not necessarily safe
+ * on all CPU variants.
+ *
+ * Mark them in the offline_cpus mask which will be handled
+ * by CPU0 later in the update process.
+ *
+ * Ensure that the primary thread is online so that it is
+ * guaranteed that all cores are updated.
+ */
+ if (!cpu_online(cpu)) {
+ if (topology_is_primary_thread(cpu) || !allow_smt_offline) {
+ pr_err("CPU %u not online, loading aborted\n", cpu);
+ return false;
+ }
+ cpumask_set_cpu(cpu, &cpu_offline_mask);
+ per_cpu(ucode_ctrl, cpu) = ctrl;
+ continue;
+ }
+
+ /*
+ * Initialize the per CPU state. This is core scope for now,
+ * but prepared to take package or system scope into account.
+ */
+ ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu));
+ per_cpu(ucode_ctrl, cpu) = ctrl;
}
+ return true;
+}
- return ret;
+static int load_late_locked(void)
+{
+ if (!setup_cpus())
+ return -EBUSY;
+
+ switch (microcode_ops->request_microcode_fw(0, &microcode_pdev->dev)) {
+ case UCODE_NEW:
+ return load_late_stop_cpus(false);
+ case UCODE_NEW_SAFE:
+ return load_late_stop_cpus(true);
+ case UCODE_NFOUND:
+ return -ENOENT;
+ default:
+ return -EBADFD;
+ }
}
static ssize_t reload_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- enum ucode_state tmp_ret = UCODE_OK;
- int bsp = boot_cpu_data.cpu_index;
unsigned long val;
- ssize_t ret = 0;
+ ssize_t ret;
ret = kstrtoul(buf, 0, &val);
if (ret || val != 1)
return -EINVAL;
cpus_read_lock();
-
- ret = check_online_cpus();
- if (ret)
- goto put;
-
- tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev);
- if (tmp_ret != UCODE_NEW)
- goto put;
-
- ret = microcode_reload_late();
-put:
+ ret = load_late_locked();
cpus_read_unlock();
- if (ret == 0)
- ret = size;
-
- add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
-
- return ret;
+ return ret ? : size;
}
static DEVICE_ATTR_WO(reload);
@@ -535,17 +753,6 @@ static void microcode_fini_cpu(int cpu)
microcode_ops->microcode_fini_cpu(cpu);
}
-static enum ucode_state microcode_init_cpu(int cpu)
-{
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-
- memset(uci, 0, sizeof(*uci));
-
- microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
-
- return microcode_ops->apply_microcode(cpu);
-}
-
/**
* microcode_bsp_resume - Update boot CPU microcode during resume.
*/
@@ -564,19 +771,18 @@ static struct syscore_ops mc_syscore_ops = {
.resume = microcode_bsp_resume,
};
-static int mc_cpu_starting(unsigned int cpu)
-{
- enum ucode_state err = microcode_ops->apply_microcode(cpu);
-
- pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err);
-
- return err == UCODE_ERROR;
-}
-
static int mc_cpu_online(unsigned int cpu)
{
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct device *dev = get_cpu_device(cpu);
+ memset(uci, 0, sizeof(*uci));
+
+ microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
+ cpu_data(cpu).microcode = uci->cpu_sig.rev;
+ if (!cpu)
+ boot_cpu_data.microcode = uci->cpu_sig.rev;
+
if (sysfs_create_group(&dev->kobj, &mc_attr_group))
pr_err("Failed to create group for CPU%d\n", cpu);
return 0;
@@ -584,33 +790,13 @@ static int mc_cpu_online(unsigned int cpu)
static int mc_cpu_down_prep(unsigned int cpu)
{
- struct device *dev;
-
- dev = get_cpu_device(cpu);
+ struct device *dev = get_cpu_device(cpu);
microcode_fini_cpu(cpu);
-
- /* Suspend is in progress, only remove the interface */
sysfs_remove_group(&dev->kobj, &mc_attr_group);
- pr_debug("%s: CPU%d\n", __func__, cpu);
-
return 0;
}
-static void setup_online_cpu(struct work_struct *work)
-{
- int cpu = smp_processor_id();
- enum ucode_state err;
-
- err = microcode_init_cpu(cpu);
- if (err == UCODE_ERROR) {
- pr_err("Error applying microcode on CPU%d\n", cpu);
- return;
- }
-
- mc_cpu_online(cpu);
-}
-
static struct attribute *cpu_root_microcode_attrs[] = {
#ifdef CONFIG_MICROCODE_LATE_LOADING
&dev_attr_reload.attr,
@@ -656,14 +842,9 @@ static int __init microcode_init(void)
}
}
- /* Do per-CPU setup */
- schedule_on_each_cpu(setup_online_cpu);
-
register_syscore_ops(&mc_syscore_ops);
- cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
- mc_cpu_starting, NULL);
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
- mc_cpu_online, mc_cpu_down_prep);
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
+ mc_cpu_online, mc_cpu_down_prep);
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
@@ -674,5 +855,4 @@ static int __init microcode_init(void)
return error;
}
-fs_initcall(save_microcode_in_initrd);
late_initcall(microcode_init);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 94dd6af9c963..6024feb98d29 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -14,7 +14,6 @@
#include <linux/earlycpio.h>
#include <linux/firmware.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -32,11 +31,14 @@
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
+#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL)
+
/* Current microcode patch used in early patching on the APs. */
-static struct microcode_intel *intel_ucode_patch;
+static struct microcode_intel *ucode_patch_va __read_mostly;
+static struct microcode_intel *ucode_patch_late __read_mostly;
/* last level cache size per core */
-static int llc_size_per_core;
+static unsigned int llc_size_per_core __ro_after_init;
/* microcode format is extended from prescott processors */
struct extended_signature {
@@ -66,60 +68,52 @@ static inline unsigned int exttable_size(struct extended_sigtable *et)
return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE;
}
-int intel_cpu_collect_info(struct ucode_cpu_info *uci)
+void intel_collect_cpu_info(struct cpu_signature *sig)
{
- unsigned int val[2];
- unsigned int family, model;
- struct cpu_signature csig = { 0 };
- unsigned int eax, ebx, ecx, edx;
-
- memset(uci, 0, sizeof(*uci));
-
- eax = 0x00000001;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
- csig.sig = eax;
+ sig->sig = cpuid_eax(1);
+ sig->pf = 0;
+ sig->rev = intel_get_microcode_revision();
- family = x86_family(eax);
- model = x86_model(eax);
+ if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) {
+ unsigned int val[2];
- if (model >= 5 || family > 6) {
/* get processor flags from MSR 0x17 */
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
- csig.pf = 1 << ((val[1] >> 18) & 7);
+ sig->pf = 1 << ((val[1] >> 18) & 7);
}
+}
+EXPORT_SYMBOL_GPL(intel_collect_cpu_info);
- csig.rev = intel_get_microcode_revision();
-
- uci->cpu_sig = csig;
+static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2,
+ unsigned int pf2)
+{
+ if (s1->sig != sig2)
+ return false;
- return 0;
+ /* Processor flags are either both 0 or they intersect. */
+ return ((!s1->pf && !pf2) || (s1->pf & pf2));
}
-EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
-/*
- * Returns 1 if update has been found, 0 otherwise.
- */
-int intel_find_matching_signature(void *mc, unsigned int csig, int cpf)
+bool intel_find_matching_signature(void *mc, struct cpu_signature *sig)
{
struct microcode_header_intel *mc_hdr = mc;
- struct extended_sigtable *ext_hdr;
struct extended_signature *ext_sig;
+ struct extended_sigtable *ext_hdr;
int i;
- if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
- return 1;
+ if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf))
+ return true;
/* Look for ext. headers: */
if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE)
- return 0;
+ return false;
ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE;
ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
for (i = 0; i < ext_hdr->count; i++) {
- if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
- return 1;
+ if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf))
+ return true;
ext_sig++;
}
return 0;
@@ -240,264 +234,91 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type)
}
EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);
-/*
- * Returns 1 if update has been found, 0 otherwise.
- */
-static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
+static void update_ucode_pointer(struct microcode_intel *mc)
{
- struct microcode_header_intel *mc_hdr = mc;
-
- if (mc_hdr->rev <= new_rev)
- return 0;
-
- return intel_find_matching_signature(mc, csig, cpf);
-}
-
-static struct ucode_patch *memdup_patch(void *data, unsigned int size)
-{
- struct ucode_patch *p;
-
- p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
- if (!p)
- return NULL;
-
- p->data = kmemdup(data, size, GFP_KERNEL);
- if (!p->data) {
- kfree(p);
- return NULL;
- }
-
- return p;
-}
-
-static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
-{
- struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
- struct ucode_patch *iter, *tmp, *p = NULL;
- bool prev_found = false;
- unsigned int sig, pf;
-
- mc_hdr = (struct microcode_header_intel *)data;
-
- list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
- mc_saved_hdr = (struct microcode_header_intel *)iter->data;
- sig = mc_saved_hdr->sig;
- pf = mc_saved_hdr->pf;
-
- if (intel_find_matching_signature(data, sig, pf)) {
- prev_found = true;
-
- if (mc_hdr->rev <= mc_saved_hdr->rev)
- continue;
-
- p = memdup_patch(data, size);
- if (!p)
- pr_err("Error allocating buffer %p\n", data);
- else {
- list_replace(&iter->plist, &p->plist);
- kfree(iter->data);
- kfree(iter);
- }
- }
- }
+ kvfree(ucode_patch_va);
/*
- * There weren't any previous patches found in the list cache; save the
- * newly found.
+ * Save the virtual address for early loading and for eventual free
+ * on late loading.
*/
- if (!prev_found) {
- p = memdup_patch(data, size);
- if (!p)
- pr_err("Error allocating buffer for %p\n", data);
- else
- list_add_tail(&p->plist, &microcode_cache);
- }
-
- if (!p)
- return;
+ ucode_patch_va = mc;
+}
- if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
- return;
+static void save_microcode_patch(struct microcode_intel *patch)
+{
+ unsigned int size = get_totalsize(&patch->hdr);
+ struct microcode_intel *mc;
- /*
- * Save for early loading. On 32-bit, that needs to be a physical
- * address as the APs are running from physical addresses, before
- * paging has been enabled.
- */
- if (IS_ENABLED(CONFIG_X86_32))
- intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
+ mc = kvmemdup(patch, size, GFP_KERNEL);
+ if (mc)
+ update_ucode_pointer(mc);
else
- intel_ucode_patch = p->data;
+ pr_err("Unable to allocate microcode memory size: %u\n", size);
}
-/*
- * Get microcode matching with BSP's model. Only CPUs with the same model as
- * BSP can stay in the platform.
- */
-static struct microcode_intel *
-scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
+/* Scan blob for microcode matching the boot CPUs family, model, stepping */
+static __init struct microcode_intel *scan_microcode(void *data, size_t size,
+ struct ucode_cpu_info *uci,
+ bool save)
{
struct microcode_header_intel *mc_header;
struct microcode_intel *patch = NULL;
+ u32 cur_rev = uci->cpu_sig.rev;
unsigned int mc_size;
- while (size) {
- if (size < sizeof(struct microcode_header_intel))
- break;
-
+ for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) {
mc_header = (struct microcode_header_intel *)data;
mc_size = get_totalsize(mc_header);
- if (!mc_size ||
- mc_size > size ||
+ if (!mc_size || mc_size > size ||
intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0)
break;
- size -= mc_size;
-
- if (!intel_find_matching_signature(data, uci->cpu_sig.sig,
- uci->cpu_sig.pf)) {
- data += mc_size;
+ if (!intel_find_matching_signature(data, &uci->cpu_sig))
continue;
- }
+ /*
+ * For saving the early microcode, find the matching revision which
+ * was loaded on the BSP.
+ *
+ * On the BSP during early boot, find a newer revision than
+ * actually loaded in the CPU.
+ */
if (save) {
- save_microcode_patch(uci, data, mc_size);
- goto next;
- }
-
-
- if (!patch) {
- if (!has_newer_microcode(data,
- uci->cpu_sig.sig,
- uci->cpu_sig.pf,
- uci->cpu_sig.rev))
- goto next;
-
- } else {
- struct microcode_header_intel *phdr = &patch->hdr;
-
- if (!has_newer_microcode(data,
- phdr->sig,
- phdr->pf,
- phdr->rev))
- goto next;
+ if (cur_rev != mc_header->rev)
+ continue;
+ } else if (cur_rev >= mc_header->rev) {
+ continue;
}
- /* We have a newer patch, save it. */
patch = data;
-
-next:
- data += mc_size;
- }
-
- if (size)
- return NULL;
-
- return patch;
-}
-
-static bool load_builtin_intel_microcode(struct cpio_data *cp)
-{
- unsigned int eax = 1, ebx, ecx = 0, edx;
- struct firmware fw;
- char name[30];
-
- if (IS_ENABLED(CONFIG_X86_32))
- return false;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
-
- sprintf(name, "intel-ucode/%02x-%02x-%02x",
- x86_family(eax), x86_model(eax), x86_stepping(eax));
-
- if (firmware_request_builtin(&fw, name)) {
- cp->size = fw.size;
- cp->data = (void *)fw.data;
- return true;
- }
-
- return false;
-}
-
-static void print_ucode_info(int old_rev, int new_rev, unsigned int date)
-{
- pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n",
- old_rev,
- new_rev,
- date & 0xffff,
- date >> 24,
- (date >> 16) & 0xff);
-}
-
-#ifdef CONFIG_X86_32
-
-static int delay_ucode_info;
-static int current_mc_date;
-static int early_old_rev;
-
-/*
- * Print early updated ucode info after printk works. This is delayed info dump.
- */
-void show_ucode_info_early(void)
-{
- struct ucode_cpu_info uci;
-
- if (delay_ucode_info) {
- intel_cpu_collect_info(&uci);
- print_ucode_info(early_old_rev, uci.cpu_sig.rev, current_mc_date);
- delay_ucode_info = 0;
+ cur_rev = mc_header->rev;
}
-}
-
-/*
- * At this point, we can not call printk() yet. Delay printing microcode info in
- * show_ucode_info_early() until printk() works.
- */
-static void print_ucode(int old_rev, int new_rev, int date)
-{
- int *delay_ucode_info_p;
- int *current_mc_date_p;
- int *early_old_rev_p;
-
- delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
- current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
- early_old_rev_p = (int *)__pa_nodebug(&early_old_rev);
-
- *delay_ucode_info_p = 1;
- *current_mc_date_p = date;
- *early_old_rev_p = old_rev;
-}
-#else
-static inline void print_ucode(int old_rev, int new_rev, int date)
-{
- print_ucode_info(old_rev, new_rev, date);
+ return size ? NULL : patch;
}
-#endif
-static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
+static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
+ struct microcode_intel *mc,
+ u32 *cur_rev)
{
- struct microcode_intel *mc;
- u32 rev, old_rev;
+ u32 rev;
- mc = uci->mc;
if (!mc)
- return 0;
+ return UCODE_NFOUND;
/*
* Save us the MSR write below - which is a particular expensive
* operation - when the other hyperthread has updated the microcode
* already.
*/
- rev = intel_get_microcode_revision();
- if (rev >= mc->hdr.rev) {
- uci->cpu_sig.rev = rev;
+ *cur_rev = intel_get_microcode_revision();
+ if (*cur_rev >= mc->hdr.rev) {
+ uci->cpu_sig.rev = *cur_rev;
return UCODE_OK;
}
- old_rev = rev;
-
/*
* Writeback and invalidate caches before updating microcode to avoid
* internal issues depending on what the microcode is updating.
@@ -509,247 +330,182 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
rev = intel_get_microcode_revision();
if (rev != mc->hdr.rev)
- return -1;
+ return UCODE_ERROR;
uci->cpu_sig.rev = rev;
+ return UCODE_UPDATED;
+}
- if (early)
- print_ucode(old_rev, uci->cpu_sig.rev, mc->hdr.date);
- else
- print_ucode_info(old_rev, uci->cpu_sig.rev, mc->hdr.date);
+static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci)
+{
+ struct microcode_intel *mc = uci->mc;
+ enum ucode_state ret;
+ u32 cur_rev, date;
- return 0;
+ ret = __apply_microcode(uci, mc, &cur_rev);
+ if (ret == UCODE_UPDATED) {
+ date = mc->hdr.date;
+ pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n",
+ cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff);
+ }
+ return ret;
}
-int __init save_microcode_in_initrd_intel(void)
+static __init bool load_builtin_intel_microcode(struct cpio_data *cp)
{
- struct ucode_cpu_info uci;
- struct cpio_data cp;
-
- /*
- * initrd is going away, clear patch ptr. We will scan the microcode one
- * last time before jettisoning and save a patch, if found. Then we will
- * update that pointer too, with a stable patch address to use when
- * resuming the cores.
- */
- intel_ucode_patch = NULL;
+ unsigned int eax = 1, ebx, ecx = 0, edx;
+ struct firmware fw;
+ char name[30];
- if (!load_builtin_intel_microcode(&cp))
- cp = find_microcode_in_initrd(ucode_path, false);
+ if (IS_ENABLED(CONFIG_X86_32))
+ return false;
- if (!(cp.data && cp.size))
- return 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
- intel_cpu_collect_info(&uci);
+ sprintf(name, "intel-ucode/%02x-%02x-%02x",
+ x86_family(eax), x86_model(eax), x86_stepping(eax));
- scan_microcode(cp.data, cp.size, &uci, true);
- return 0;
+ if (firmware_request_builtin(&fw, name)) {
+ cp->size = fw.size;
+ cp->data = (void *)fw.data;
+ return true;
+ }
+ return false;
}
-/*
- * @res_patch, output: a pointer to the patch we found.
- */
-static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
+static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save)
{
- static const char *path;
struct cpio_data cp;
- bool use_pa;
-
- if (IS_ENABLED(CONFIG_X86_32)) {
- path = (const char *)__pa_nodebug(ucode_path);
- use_pa = true;
- } else {
- path = ucode_path;
- use_pa = false;
- }
- /* try built-in microcode first */
if (!load_builtin_intel_microcode(&cp))
- cp = find_microcode_in_initrd(path, use_pa);
+ cp = find_microcode_in_initrd(ucode_path);
if (!(cp.data && cp.size))
return NULL;
- intel_cpu_collect_info(uci);
+ intel_collect_cpu_info(&uci->cpu_sig);
- return scan_microcode(cp.data, cp.size, uci, false);
+ return scan_microcode(cp.data, cp.size, uci, save);
}
-void __init load_ucode_intel_bsp(void)
+/*
+ * Invoked from an early init call to save the microcode blob which was
+ * selected during early boot when mm was not usable. The microcode must be
+ * saved because initrd is going away. It's an early init call so the APs
+ * just can use the pointer and do not have to scan initrd/builtin firmware
+ * again.
+ */
+static int __init save_builtin_microcode(void)
{
- struct microcode_intel *patch;
struct ucode_cpu_info uci;
- patch = __load_ucode_intel(&uci);
- if (!patch)
- return;
+ if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
+ return 0;
- uci.mc = patch;
+ if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return 0;
- apply_microcode_early(&uci, true);
+ uci.mc = get_microcode_blob(&uci, true);
+ if (uci.mc)
+ save_microcode_patch(uci.mc);
+ return 0;
}
+early_initcall(save_builtin_microcode);
-void load_ucode_intel_ap(void)
+/* Load microcode on BSP from initrd or builtin blobs */
+void __init load_ucode_intel_bsp(void)
{
- struct microcode_intel *patch, **iup;
struct ucode_cpu_info uci;
- if (IS_ENABLED(CONFIG_X86_32))
- iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
- else
- iup = &intel_ucode_patch;
-
- if (!*iup) {
- patch = __load_ucode_intel(&uci);
- if (!patch)
- return;
-
- *iup = patch;
- }
-
- uci.mc = *iup;
-
- apply_microcode_early(&uci, true);
+ uci.mc = get_microcode_blob(&uci, false);
+ if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED)
+ ucode_patch_va = UCODE_BSP_LOADED;
}
-static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
+void load_ucode_intel_ap(void)
{
- struct microcode_header_intel *phdr;
- struct ucode_patch *iter, *tmp;
-
- list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
-
- phdr = (struct microcode_header_intel *)iter->data;
-
- if (phdr->rev <= uci->cpu_sig.rev)
- continue;
-
- if (!intel_find_matching_signature(phdr,
- uci->cpu_sig.sig,
- uci->cpu_sig.pf))
- continue;
+ struct ucode_cpu_info uci;
- return iter->data;
- }
- return NULL;
+ uci.mc = ucode_patch_va;
+ if (uci.mc)
+ apply_microcode_early(&uci);
}
+/* Reload microcode on resume */
void reload_ucode_intel(void)
{
- struct microcode_intel *p;
- struct ucode_cpu_info uci;
-
- intel_cpu_collect_info(&uci);
-
- p = find_patch(&uci);
- if (!p)
- return;
-
- uci.mc = p;
+ struct ucode_cpu_info uci = { .mc = ucode_patch_va, };
- apply_microcode_early(&uci, false);
+ if (uci.mc)
+ apply_microcode_early(&uci);
}
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu_num);
- unsigned int val[2];
-
- memset(csig, 0, sizeof(*csig));
-
- csig->sig = cpuid_eax(0x00000001);
-
- if ((c->x86_model >= 5) || (c->x86 > 6)) {
- /* get processor flags from MSR 0x17 */
- rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
- csig->pf = 1 << ((val[1] >> 18) & 7);
- }
-
- csig->rev = c->microcode;
-
+ intel_collect_cpu_info(csig);
return 0;
}
-static enum ucode_state apply_microcode_intel(int cpu)
+static enum ucode_state apply_microcode_late(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
- struct microcode_intel *mc;
+ struct microcode_intel *mc = ucode_patch_late;
enum ucode_state ret;
- static int prev_rev;
- u32 rev;
+ u32 cur_rev;
- /* We should bind the task to the CPU */
- if (WARN_ON(raw_smp_processor_id() != cpu))
+ if (WARN_ON_ONCE(smp_processor_id() != cpu))
return UCODE_ERROR;
- /* Look for a newer patch in our cache: */
- mc = find_patch(uci);
- if (!mc) {
- mc = uci->mc;
- if (!mc)
- return UCODE_NFOUND;
- }
+ ret = __apply_microcode(uci, mc, &cur_rev);
+ if (ret != UCODE_UPDATED && ret != UCODE_OK)
+ return ret;
- /*
- * Save us the MSR write below - which is a particular expensive
- * operation - when the other hyperthread has updated the microcode
- * already.
- */
- rev = intel_get_microcode_revision();
- if (rev >= mc->hdr.rev) {
- ret = UCODE_OK;
- goto out;
+ if (!cpu && uci->cpu_sig.rev != cur_rev) {
+ pr_info("Updated to revision 0x%x, date = %04x-%02x-%02x\n",
+ uci->cpu_sig.rev, mc->hdr.date & 0xffff, mc->hdr.date >> 24,
+ (mc->hdr.date >> 16) & 0xff);
}
- /*
- * Writeback and invalidate caches before updating microcode to avoid
- * internal issues depending on what the microcode is updating.
- */
- native_wbinvd();
+ cpu_data(cpu).microcode = uci->cpu_sig.rev;
+ if (!cpu)
+ boot_cpu_data.microcode = uci->cpu_sig.rev;
- /* write microcode via MSR 0x79 */
- wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+ return ret;
+}
- rev = intel_get_microcode_revision();
+static bool ucode_validate_minrev(struct microcode_header_intel *mc_header)
+{
+ int cur_rev = boot_cpu_data.microcode;
- if (rev != mc->hdr.rev) {
- pr_err("CPU%d update to revision 0x%x failed\n",
- cpu, mc->hdr.rev);
- return UCODE_ERROR;
+ /*
+ * When late-loading, ensure the header declares a minimum revision
+ * required to perform a late-load. The previously reserved field
+ * is 0 in older microcode blobs.
+ */
+ if (!mc_header->min_req_ver) {
+ pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n");
+ return false;
}
- if (bsp && rev != prev_rev) {
- pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
- rev,
- mc->hdr.date & 0xffff,
- mc->hdr.date >> 24,
- (mc->hdr.date >> 16) & 0xff);
- prev_rev = rev;
+ /*
+ * Check whether the current revision is either greater or equal to
+ * to the minimum revision specified in the header.
+ */
+ if (cur_rev < mc_header->min_req_ver) {
+ pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev);
+ pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver);
+ return false;
}
-
- ret = UCODE_UPDATED;
-
-out:
- uci->cpu_sig.rev = rev;
- c->microcode = rev;
-
- /* Update boot_cpu_data's revision too, if we're on the BSP: */
- if (bsp)
- boot_cpu_data.microcode = rev;
-
- return ret;
+ return true;
}
-static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
+static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- unsigned int curr_mc_size = 0, new_mc_size = 0;
- enum ucode_state ret = UCODE_OK;
- int new_rev = uci->cpu_sig.rev;
+ bool is_safe, new_is_safe = false;
+ int cur_rev = uci->cpu_sig.rev;
+ unsigned int curr_mc_size = 0;
u8 *new_mc = NULL, *mc = NULL;
- unsigned int csig, cpf;
while (iov_iter_count(iter)) {
struct microcode_header_intel mc_header;
@@ -758,68 +514,66 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
pr_err("error! Truncated or inaccessible header in microcode data file\n");
- break;
+ goto fail;
}
mc_size = get_totalsize(&mc_header);
if (mc_size < sizeof(mc_header)) {
pr_err("error! Bad data in microcode data file (totalsize too small)\n");
- break;
+ goto fail;
}
data_size = mc_size - sizeof(mc_header);
if (data_size > iov_iter_count(iter)) {
pr_err("error! Bad data in microcode data file (truncated file?)\n");
- break;
+ goto fail;
}
/* For performance reasons, reuse mc area when possible */
if (!mc || mc_size > curr_mc_size) {
- vfree(mc);
- mc = vmalloc(mc_size);
+ kvfree(mc);
+ mc = kvmalloc(mc_size, GFP_KERNEL);
if (!mc)
- break;
+ goto fail;
curr_mc_size = mc_size;
}
memcpy(mc, &mc_header, sizeof(mc_header));
data = mc + sizeof(mc_header);
if (!copy_from_iter_full(data, data_size, iter) ||
- intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) {
- break;
- }
+ intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0)
+ goto fail;
- csig = uci->cpu_sig.sig;
- cpf = uci->cpu_sig.pf;
- if (has_newer_microcode(mc, csig, cpf, new_rev)) {
- vfree(new_mc);
- new_rev = mc_header.rev;
- new_mc = mc;
- new_mc_size = mc_size;
- mc = NULL; /* trigger new vmalloc */
- ret = UCODE_NEW;
- }
- }
+ if (cur_rev >= mc_header.rev)
+ continue;
- vfree(mc);
+ if (!intel_find_matching_signature(mc, &uci->cpu_sig))
+ continue;
- if (iov_iter_count(iter)) {
- vfree(new_mc);
- return UCODE_ERROR;
+ is_safe = ucode_validate_minrev(&mc_header);
+ if (force_minrev && !is_safe)
+ continue;
+
+ kvfree(new_mc);
+ cur_rev = mc_header.rev;
+ new_mc = mc;
+ new_is_safe = is_safe;
+ mc = NULL;
}
+ if (iov_iter_count(iter))
+ goto fail;
+
+ kvfree(mc);
if (!new_mc)
return UCODE_NFOUND;
- vfree(uci->mc);
- uci->mc = (struct microcode_intel *)new_mc;
-
- /* Save for CPU hotplug */
- save_microcode_patch(uci, new_mc, new_mc_size);
+ ucode_patch_late = (struct microcode_intel *)new_mc;
+ return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW;
- pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
- cpu, new_rev, uci->cpu_sig.rev);
-
- return ret;
+fail:
+ kvfree(mc);
+ kvfree(new_mc);
+ return UCODE_ERROR;
}
static bool is_blacklisted(unsigned int cpu)
@@ -868,26 +622,36 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
kvec.iov_base = (void *)firmware->data;
kvec.iov_len = firmware->size;
iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size);
- ret = generic_load_microcode(cpu, &iter);
+ ret = parse_microcode_blobs(cpu, &iter);
release_firmware(firmware);
return ret;
}
+static void finalize_late_load(int result)
+{
+ if (!result)
+ update_ucode_pointer(ucode_patch_late);
+ else
+ kvfree(ucode_patch_late);
+ ucode_patch_late = NULL;
+}
+
static struct microcode_ops microcode_intel_ops = {
- .request_microcode_fw = request_microcode_fw,
- .collect_cpu_info = collect_cpu_info,
- .apply_microcode = apply_microcode_intel,
+ .request_microcode_fw = request_microcode_fw,
+ .collect_cpu_info = collect_cpu_info,
+ .apply_microcode = apply_microcode_late,
+ .finalize_late_load = finalize_late_load,
+ .use_nmi = IS_ENABLED(CONFIG_X86_64),
};
-static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
+static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c)
{
u64 llc_size = c->x86_cache_size * 1024ULL;
do_div(llc_size, c->x86_max_cores);
-
- return (int)llc_size;
+ llc_size_per_core = (unsigned int)llc_size;
}
struct microcode_ops * __init init_intel_microcode(void)
@@ -900,7 +664,7 @@ struct microcode_ops * __init init_intel_microcode(void)
return NULL;
}
- llc_size_per_core = calc_llc_size_per_core(c);
+ calc_llc_size_per_core(c);
return &microcode_intel_ops;
}
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
index bf883aa71233..f8047b12329a 100644
--- a/arch/x86/kernel/cpu/microcode/internal.h
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -8,43 +8,37 @@
#include <asm/cpu.h>
#include <asm/microcode.h>
-struct ucode_patch {
- struct list_head plist;
- void *data; /* Intel uses only this one */
- unsigned int size;
- u32 patch_id;
- u16 equiv_cpu;
-};
-
-extern struct list_head microcode_cache;
-
struct device;
enum ucode_state {
UCODE_OK = 0,
UCODE_NEW,
+ UCODE_NEW_SAFE,
UCODE_UPDATED,
UCODE_NFOUND,
UCODE_ERROR,
+ UCODE_TIMEOUT,
+ UCODE_OFFLINE,
};
struct microcode_ops {
enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);
-
void (*microcode_fini_cpu)(int cpu);
/*
- * The generic 'microcode_core' part guarantees that
- * the callbacks below run on a target cpu when they
- * are being called.
+ * The generic 'microcode_core' part guarantees that the callbacks
+ * below run on a target CPU when they are being called.
* See also the "Synchronization" section in microcode_core.c.
*/
- enum ucode_state (*apply_microcode)(int cpu);
- int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
+ enum ucode_state (*apply_microcode)(int cpu);
+ int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
+ void (*finalize_late_load)(int result);
+ unsigned int nmi_safe : 1,
+ use_nmi : 1;
};
extern struct ucode_cpu_info ucode_cpu_info[];
-struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
+struct cpio_data find_microcode_in_initrd(const char *path);
#define MAX_UCODE_COUNT 128
@@ -94,12 +88,12 @@ static inline unsigned int x86_cpuid_family(void)
return x86_family(eax);
}
-extern bool initrd_gone;
+extern bool dis_ucode_ldr;
+extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
void load_ucode_amd_bsp(unsigned int family);
void load_ucode_amd_ap(unsigned int family);
-void load_ucode_amd_early(unsigned int cpuid_1_eax);
int save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu);
struct microcode_ops *init_amd_microcode(void);
@@ -107,7 +101,6 @@ void exit_amd_microcode(void);
#else /* CONFIG_CPU_SUP_AMD */
static inline void load_ucode_amd_bsp(unsigned int family) { }
static inline void load_ucode_amd_ap(unsigned int family) { }
-static inline void load_ucode_amd_early(unsigned int family) { }
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) { }
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
@@ -117,13 +110,11 @@ static inline void exit_amd_microcode(void) { }
#ifdef CONFIG_CPU_SUP_INTEL
void load_ucode_intel_bsp(void);
void load_ucode_intel_ap(void);
-int save_microcode_in_initrd_intel(void);
void reload_ucode_intel(void);
struct microcode_ops *init_intel_microcode(void);
#else /* CONFIG_CPU_SUP_INTEL */
static inline void load_ucode_intel_bsp(void) { }
static inline void load_ucode_intel_ap(void) { }
-static inline int save_microcode_in_initrd_intel(void) { return -EINVAL; }
static inline void reload_ucode_intel(void) { }
static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
#endif /* !CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 246a609f889b..de001b2146ab 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -19,6 +19,7 @@
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/bios_ebda.h>
+#include <asm/microcode.h>
#include <asm/tlbflush.h>
#include <asm/bootparam_utils.h>
@@ -29,11 +30,33 @@ static void __init i386_default_early_setup(void)
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
}
+#ifdef CONFIG_MICROCODE_INITRD32
+unsigned long __initdata initrd_start_early;
+static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end;
+
+static void zap_early_initrd_mapping(void)
+{
+ pte_t *pl2p = initrd_pl2p_start;
+
+ for (; pl2p < initrd_pl2p_end; pl2p++) {
+ *pl2p = (pte_t){ .pte = 0 };
+
+ if (!IS_ENABLED(CONFIG_X86_PAE))
+ *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0};
+ }
+}
+#else
+static inline void zap_early_initrd_mapping(void) { }
+#endif
+
asmlinkage __visible void __init __noreturn i386_start_kernel(void)
{
/* Make sure IDT is set up before any exception happens */
idt_setup_early_handler();
+ load_ucode_bsp();
+ zap_early_initrd_mapping();
+
cr4_init_shadow();
sanitize_boot_params(&boot_params);
@@ -69,52 +92,83 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void)
* to the first kernel PMD. Note the upper half of each PMD or PTE are
* always zero at this stage.
*/
-void __init mk_early_pgtbl_32(void);
-void __init mk_early_pgtbl_32(void)
-{
-#ifdef __pa
-#undef __pa
-#endif
-#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
- pte_t pte, *ptep;
- int i;
- unsigned long *ptr;
- /* Enough space to fit pagetables for the low memory linear map */
- const unsigned long limit = __pa(_end) +
- (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
#ifdef CONFIG_X86_PAE
- pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd);
-#define SET_PL2(pl2, val) { (pl2).pmd = (val); }
+typedef pmd_t pl2_t;
+#define pl2_base initial_pg_pmd
+#define SET_PL2(val) { .pmd = (val), }
#else
- pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table);
-#define SET_PL2(pl2, val) { (pl2).pgd = (val); }
+typedef pgd_t pl2_t;
+#define pl2_base initial_page_table
+#define SET_PL2(val) { .pgd = (val), }
#endif
- ptep = (pte_t *)__pa(__brk_base);
- pte.pte = PTE_IDENT_ATTR;
-
+static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t **pl2p,
+ const unsigned long limit)
+{
while ((pte.pte & PTE_PFN_MASK) < limit) {
+ pl2_t pl2 = SET_PL2((unsigned long)*ptep | PDE_IDENT_ATTR);
+ int i;
+
+ **pl2p = pl2;
+ if (!IS_ENABLED(CONFIG_X86_PAE)) {
+ /* Kernel PDE entry */
+ *(*pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
+ }
- SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR);
- *pl2p = pl2;
-#ifndef CONFIG_X86_PAE
- /* Kernel PDE entry */
- *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
-#endif
for (i = 0; i < PTRS_PER_PTE; i++) {
- *ptep = pte;
+ **ptep = pte;
pte.pte += PAGE_SIZE;
- ptep++;
+ (*ptep)++;
}
-
- pl2p++;
+ (*pl2p)++;
}
+ return pte;
+}
+
+void __init __no_stack_protector mk_early_pgtbl_32(void)
+{
+ /* Enough space to fit pagetables for the low memory linear map */
+ unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
+ pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base);
+ struct boot_params __maybe_unused *params;
+ pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base);
+ unsigned long *ptr;
+
+ pte.pte = PTE_IDENT_ATTR;
+ pte = init_map(pte, &ptep, &pl2p, limit);
- ptr = (unsigned long *)__pa(&max_pfn_mapped);
+ ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped);
/* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
*ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
- ptr = (unsigned long *)__pa(&_brk_end);
+ ptr = (unsigned long *)__pa_nodebug(&_brk_end);
*ptr = (unsigned long)ptep + PAGE_OFFSET;
-}
+#ifdef CONFIG_MICROCODE_INITRD32
+ /* Running on a hypervisor? */
+ if (native_cpuid_ecx(1) & BIT(31))
+ return;
+
+ params = (struct boot_params *)__pa_nodebug(&boot_params);
+ if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
+ return;
+
+ /* Save the virtual start address */
+ ptr = (unsigned long *)__pa_nodebug(&initrd_start_early);
+ *ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET;
+ *ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK;
+
+ /* Save PLP2 for cleanup */
+ ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start);
+ *ptr = (unsigned long)pl2p + PAGE_OFFSET;
+
+ limit = (unsigned long)params->hdr.ramdisk_image;
+ pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit);
+ limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size;
+
+ init_map(pte, &ptep, &pl2p, limit);
+
+ ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end);
+ *ptr = (unsigned long)pl2p + PAGE_OFFSET;
+#endif
+}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b6554212b7c7..487ac57e2c81 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -118,11 +118,6 @@ SYM_CODE_START(startup_32)
movl %eax, pa(olpc_ofw_pgd)
#endif
-#ifdef CONFIG_MICROCODE
- /* Early load ucode on BSP. */
- call load_ucode_bsp
-#endif
-
/* Create early pagetables. */
call mk_early_pgtbl_32
@@ -157,11 +152,6 @@ SYM_FUNC_START(startup_32_smp)
movl %eax,%ss
leal -__PAGE_OFFSET(%ecx),%esp
-#ifdef CONFIG_MICROCODE
- /* Early load ucode on AP. */
- call load_ucode_ap
-#endif
-
.Ldefault_entry:
movl $(CR0_STATE & ~X86_CR0_PG),%eax
movl %eax,%cr0
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 4766b6bed443..17e955ab69fe 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,6 +33,7 @@
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
+#include <asm/microcode.h>
#include <asm/sev.h>
#define CREATE_TRACE_POINTS
@@ -343,6 +344,9 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
instrumentation_begin();
+ if (microcode_nmi_handler_enabled() && microcode_nmi_handler())
+ goto out;
+
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
if (handled) {
@@ -498,8 +502,11 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
if (IS_ENABLED(CONFIG_NMI_CHECK_CPU))
raw_atomic_long_inc(&nsp->idt_calls);
- if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
+ if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) {
+ if (microcode_nmi_handler_enabled())
+ microcode_offline_nmi_handler();
return;
+ }
if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
this_cpu_write(nmi_state, NMI_LATCHED);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c4aca66f0902..2cc2aa120b4b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -272,12 +272,9 @@ static void notrace start_secondary(void *unused)
cpu_init_exception_handling();
/*
- * 32-bit systems load the microcode from the ASM startup code for
- * historical reasons.
- *
- * On 64-bit systems load it before reaching the AP alive
- * synchronization point below so it is not part of the full per
- * CPU serialized bringup part when "parallel" bringup is enabled.
+ * Load the microcode before reaching the AP alive synchronization
+ * point below so it is not part of the full per CPU serialized
+ * bringup part when "parallel" bringup is enabled.
*
* That's even safe when hyperthreading is enabled in the CPU as
* the core code starts the primary threads first and leaves the
@@ -290,8 +287,7 @@ static void notrace start_secondary(void *unused)
* CPUID, MSRs etc. must be strictly serialized to maintain
* software state correctness.
*/
- if (IS_ENABLED(CONFIG_X86_64))
- load_ucode_ap();
+ load_ucode_ap();
/*
* Synchronization point with the hotplug core. Sets this CPUs