summaryrefslogtreecommitdiff
path: root/arch/riscv/mm/tlbflush.c
diff options
context:
space:
mode:
authorSergey Matyukevich <sergey.matyukevich@syntacore.com>2022-08-29 23:52:19 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-01-07 13:11:53 +0300
commit85292a29689a740b36ae2ff41b92bc51eb97e01f (patch)
tree33ff13f84d696113fb153dd6c78ee3c36eaf1536 /arch/riscv/mm/tlbflush.c
parent00777a099574ad99a07d3a6db2f62c95b420ff2e (diff)
downloadlinux-85292a29689a740b36ae2ff41b92bc51eb97e01f.tar.xz
riscv: mm: notify remote harts about mmu cache updates
commit 4bd1d80efb5af640f99157f39b50fb11326ce641 upstream. Current implementation of update_mmu_cache function performs local TLB flush. It does not take into account ASID information. Besides, it does not take into account other harts currently running the same mm context or possible migration of the running context to other harts. Meanwhile TLB flush is not performed for every context switch if ASID support is enabled. Patch [1] proposed to add ASID support to update_mmu_cache to avoid flushing local TLB entirely. This patch takes into account other harts currently running the same mm context as well as possible migration of this context to other harts. For this purpose the approach from flush_icache_mm is reused. Remote harts currently running the same mm context are informed via SBI calls that they need to flush their local TLBs. All the other harts are marked as needing a deferred TLB flush when this mm context runs on them. [1] https://lore.kernel.org/linux-riscv/20220821013926.8968-1-tjytimi@163.com/ Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com> Fixes: 65d4b9c53017 ("RISC-V: Implement ASID allocator") Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-riscv/20220829205219.283543-1-geomatsi@gmail.com/#t Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/riscv/mm/tlbflush.c')
-rw-r--r--arch/riscv/mm/tlbflush.c28
1 files changed, 11 insertions, 17 deletions
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 37ed760d007c..ce7dfc81bb3f 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -5,23 +5,7 @@
#include <linux/sched.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
-
-static inline void local_flush_tlb_all_asid(unsigned long asid)
-{
- __asm__ __volatile__ ("sfence.vma x0, %0"
- :
- : "r" (asid)
- : "memory");
-}
-
-static inline void local_flush_tlb_page_asid(unsigned long addr,
- unsigned long asid)
-{
- __asm__ __volatile__ ("sfence.vma %0, %1"
- :
- : "r" (addr), "r" (asid)
- : "memory");
-}
+#include <asm/tlbflush.h>
void flush_tlb_all(void)
{
@@ -31,6 +15,7 @@ void flush_tlb_all(void)
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
+ struct cpumask *pmask = &mm->context.tlb_stale_mask;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
bool broadcast;
@@ -44,6 +29,15 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id);
+ /*
+ * TLB will be immediately flushed on harts concurrently
+ * executing this MM context. TLB flush on other harts
+ * is deferred until this MM context migrates there.
+ */
+ cpumask_setall(pmask);
+ cpumask_clear_cpu(cpuid, pmask);
+ cpumask_andnot(pmask, pmask, cmask);
+
if (broadcast) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else if (size <= stride) {