summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
authorGary Guo <gary@garyguo.net>2019-03-27 03:41:25 +0300
committerPalmer Dabbelt <palmer@sifive.com>2019-05-17 06:42:12 +0300
commit58de77545e53b94cd6c816776197dade598632c5 (patch)
tree2472a19173ceea0148de0e34c5de2fc2ecac6968 /arch/riscv
parentf91253a3d005796404ae0e578b3394459b5f9b71 (diff)
downloadlinux-58de77545e53b94cd6c816776197dade598632c5.tar.xz
riscv: move flush_icache_{all,mm} to cacheflush.c
Currently, flush_icache_all is macro-expanded into a SBI call, yet no asm/sbi.h is included in asm/cacheflush.h. This could be moved to mm/cacheflush.c instead (SBI call will dominate performance-wise and there is no worry to not have it inlined. Currently, flush_icache_mm stays in kernel/smp.c, which looks like a hack to prevent it from being compiled when CONFIG_SMP=n. It should also be in mm/cacheflush.c. Signed-off-by: Gary Guo <gary@garyguo.net> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/include/asm/cacheflush.h2
-rw-r--r--arch/riscv/kernel/smp.c49
-rw-r--r--arch/riscv/mm/cacheflush.c61
3 files changed, 62 insertions, 50 deletions
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8f13074413a7..1f4ba68ab9aa 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page)
#else /* CONFIG_SMP */
-#define flush_icache_all() sbi_remote_fence_i(NULL)
+void flush_icache_all(void);
void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 9253de5d91b6..b2537ffa855c 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -205,52 +205,3 @@ void smp_send_reschedule(int cpu)
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
-/*
- * Performs an icache flush for the given MM context. RISC-V has no direct
- * mechanism for instruction cache shoot downs, so instead we send an IPI that
- * informs the remote harts they need to flush their local instruction caches.
- * To avoid pathologically slow behavior in a common case (a bunch of
- * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
- * IPIs for harts that are not currently executing a MM context and instead
- * schedule a deferred local instruction cache flush to be performed before
- * execution resumes on each hart.
- */
-void flush_icache_mm(struct mm_struct *mm, bool local)
-{
- unsigned int cpu;
- cpumask_t others, hmask, *mask;
-
- preempt_disable();
-
- /* Mark every hart's icache as needing a flush for this MM. */
- mask = &mm->context.icache_stale_mask;
- cpumask_setall(mask);
- /* Flush this hart's I$ now, and mark it as flushed. */
- cpu = smp_processor_id();
- cpumask_clear_cpu(cpu, mask);
- local_flush_icache_all();
-
- /*
- * Flush the I$ of other harts concurrently executing, and mark them as
- * flushed.
- */
- cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
- local |= cpumask_empty(&others);
- if (mm != current->active_mm || !local) {
- cpumask_clear(&hmask);
- riscv_cpuid_to_hartid_mask(&others, &hmask);
- sbi_remote_fence_i(hmask.bits);
- } else {
- /*
- * It's assumed that at least one strongly ordered operation is
- * performed on this hart between setting a hart's cpumask bit
- * and scheduling this MM context on that hart. Sending an SBI
- * remote message will do this, but in the case where no
- * messages are sent we still need to order this hart's writes
- * with flush_icache_deferred().
- */
- smp_mb();
- }
-
- preempt_enable();
-}
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 498c0a0814fe..497b7d07af0c 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -14,6 +14,67 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#ifdef CONFIG_SMP
+
+#include <asm/sbi.h>
+
+void flush_icache_all(void)
+{
+ sbi_remote_fence_i(NULL);
+}
+
+/*
+ * Performs an icache flush for the given MM context. RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+ unsigned int cpu;
+ cpumask_t others, hmask, *mask;
+
+ preempt_disable();
+
+ /* Mark every hart's icache as needing a flush for this MM. */
+ mask = &mm->context.icache_stale_mask;
+ cpumask_setall(mask);
+ /* Flush this hart's I$ now, and mark it as flushed. */
+ cpu = smp_processor_id();
+ cpumask_clear_cpu(cpu, mask);
+ local_flush_icache_all();
+
+ /*
+ * Flush the I$ of other harts concurrently executing, and mark them as
+ * flushed.
+ */
+ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+ local |= cpumask_empty(&others);
+ if (mm != current->active_mm || !local) {
+ cpumask_clear(&hmask);
+ riscv_cpuid_to_hartid_mask(&others, &hmask);
+ sbi_remote_fence_i(hmask.bits);
+ } else {
+ /*
+ * It's assumed that at least one strongly ordered operation is
+ * performed on this hart between setting a hart's cpumask bit
+ * and scheduling this MM context on that hart. Sending an SBI
+ * remote message will do this, but in the case where no
+ * messages are sent we still need to order this hart's writes
+ * with flush_icache_deferred().
+ */
+ smp_mb();
+ }
+
+ preempt_enable();
+}
+
+#endif /* CONFIG_SMP */
+
void flush_icache_pte(pte_t pte)
{
struct page *page = pte_page(pte);