summaryrefslogtreecommitdiff
path: root/arch/loongarch/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/kernel/smp.c')
-rw-r--r--arch/loongarch/kernel/smp.c52
1 files changed, 34 insertions, 18 deletions
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 99ba7a56edf9..b8c53b755a25 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -25,6 +25,7 @@
#include <asm/idle.h>
#include <asm/loongson.h>
#include <asm/mmu_context.h>
+#include <asm/numa.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/time.h>
@@ -222,6 +223,9 @@ void loongson3_init_secondary(void)
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+#ifdef CONFIG_NUMA
+ numa_add_cpu(cpu);
+#endif
per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu_data[cpu].core =
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
@@ -254,6 +258,9 @@ int loongson3_cpu_disable(void)
if (io_master(cpu))
return -EBUSY;
+#ifdef CONFIG_NUMA
+ numa_remove_cpu(cpu);
+#endif
set_cpu_online(cpu, false);
calculate_cpu_foreign_map();
local_irq_save(flags);
@@ -478,14 +485,36 @@ void calculate_cpu_foreign_map(void)
/* Preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
{
- unsigned int cpu;
+ unsigned int cpu, node, rr_node;
set_cpu_possible(0, true);
set_cpu_online(0, true);
set_my_cpu_offset(per_cpu_offset(0));
- for_each_possible_cpu(cpu)
- set_cpu_numa_node(cpu, 0);
+ rr_node = first_node(node_online_map);
+ for_each_possible_cpu(cpu) {
+ node = early_cpu_to_node(cpu);
+
+ /*
+ * The mapping between present cpus and nodes has been
+ * built during MADT and SRAT parsing.
+ *
+ * If possible cpus = present cpus here, early_cpu_to_node
+ * will return valid node.
+ *
+ * If possible cpus > present cpus here (e.g. some possible
+ * cpus will be added by cpu-hotplug later), for possible but
+ * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
+ * and we just map them to online nodes in round-robin way.
+ * Once hotplugged, new correct mapping will be built for them.
+ */
+ if (node != NUMA_NO_NODE)
+ set_cpu_numa_node(cpu, node);
+ else {
+ set_cpu_numa_node(cpu, rr_node);
+ rr_node = next_node_in(rr_node, node_online_map);
+ }
+ }
}
/* called from main before smp_init() */
@@ -651,17 +680,10 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
} else {
unsigned int cpu;
- int exec = vma->vm_flags & VM_EXEC;
for_each_online_cpu(cpu) {
- /*
- * flush_cache_range() will only fully flush icache if
- * the VMA is executable, otherwise we must invalidate
- * ASID without it appearing to has_valid_asid() as if
- * mm has been completely unused by that CPU.
- */
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
- cpu_context(cpu, mm) = !exec;
+ cpu_context(cpu, mm) = 0;
}
local_flush_tlb_range(vma, start, end);
}
@@ -706,14 +728,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
unsigned int cpu;
for_each_online_cpu(cpu) {
- /*
- * flush_cache_page() only does partial flushes, so
- * invalidate ASID without it appearing to
- * has_valid_asid() as if mm has been completely unused
- * by that CPU.
- */
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
- cpu_context(cpu, vma->vm_mm) = 1;
+ cpu_context(cpu, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
}