summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/smp.c
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2023-11-23 23:34:22 +0300
committerIngo Molnar <mingo@kernel.org>2023-11-30 22:25:09 +0300
commit9d1c8f21533729b6ead531b676fa7d327cf00819 (patch)
tree0881d03fa28d8c2555adcf30a0e6bf0ed50d3deb /arch/x86/kernel/smp.c
parent4604c052b84d66407f5e68045a1939685eac401e (diff)
downloadlinux-9d1c8f21533729b6ead531b676fa7d327cf00819.tar.xz
x86/smp: Move the call to smp_processor_id() after the early exit in native_stop_other_cpus()
Improve code generation in native_stop_other_cpus() a tiny bit: smp_processor_id() accesses a per-CPU variable, so the compiler is not able to move the call after the early exit on its own. Also rename the "cpu" variable to a more descriptive "this_cpu", and use 'cpu' as a separate iterator variable later in the function. No functional change intended. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20231123203605.3474745-1-ubizjak@gmail.com
Diffstat (limited to 'arch/x86/kernel/smp.c')
-rw-r--r--arch/x86/kernel/smp.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 6eb06d001bcc..65dd44e3fc1c 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -148,14 +148,15 @@ static int register_stop_handler(void)
static void native_stop_other_cpus(int wait)
{
- unsigned int cpu = smp_processor_id();
+ unsigned int this_cpu;
unsigned long flags, timeout;
if (reboot_force)
return;
/* Only proceed if this is the first CPU to reach this code */
- if (atomic_cmpxchg(&stopping_cpu, -1, cpu) != -1)
+ this_cpu = smp_processor_id();
+ if (atomic_cmpxchg(&stopping_cpu, -1, this_cpu) != -1)
return;
/* For kexec, ensure that offline CPUs are out of MWAIT and in HLT */
@@ -190,7 +191,7 @@ static void native_stop_other_cpus(int wait)
* NMIs.
*/
cpumask_copy(&cpus_stop_mask, cpu_online_mask);
- cpumask_clear_cpu(cpu, &cpus_stop_mask);
+ cpumask_clear_cpu(this_cpu, &cpus_stop_mask);
if (!cpumask_empty(&cpus_stop_mask)) {
apic_send_IPI_allbutself(REBOOT_VECTOR);
@@ -234,6 +235,8 @@ static void native_stop_other_cpus(int wait)
* CPUs to stop.
*/
if (!smp_no_nmi_ipi && !register_stop_handler()) {
+ unsigned int cpu;
+
pr_emerg("Shutting down cpus with NMI\n");
for_each_cpu(cpu, &cpus_stop_mask)