summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-08-27 14:00:17 +0300
committerIngo Molnar <mingo@kernel.org>2019-04-03 11:32:41 +0300
commite7fd28a706bfaf9cd65dccf18140187f7ad04839 (patch)
tree23077332371304ff92fd2b9ac64286afd5c69e16 /include/asm-generic
parented6a79352cad00e9a49d6e438be40e45107207bf (diff)
downloadlinux-e7fd28a706bfaf9cd65dccf18140187f7ad04839.tar.xz
asm-generic/tlb, arch: Provide generic VIPT cache flush
The one obvious thing SH and ARM want is a sensible default for tlb_start_vma(). (also: https://lkml.org/lkml/2004/1/15/6 ) Avoid all VIPT architectures providing their own tlb_start_vma() implementation and rely on architectures to provide a no-op flush_cache_range() when it is not relevant. This patch makes tlb_start_vma() default to flush_cache_range(), which should be right and sufficient. The only exceptions that I found where (oddly): - m68k-mmu - sparc64 - unicore Those architectures appear to have flush_cache_range(), but their current tlb_start_vma() does not call it. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Miller <davem@davemloft.net> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nick Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/tlb.h19
1 files changed, 11 insertions, 8 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e75620e41ba4..f0aa53db5e60 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -19,6 +19,7 @@
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
#ifdef CONFIG_MMU
@@ -356,17 +357,19 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
* the vmas are adjusted to only cover the region to be torn down.
*/
#ifndef tlb_start_vma
-#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_start_vma(tlb, vma) \
+do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
#endif
-#define __tlb_end_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- tlb_flush_mmu_tlbonly(tlb); \
- } while (0)
-
#ifndef tlb_end_vma
-#define tlb_end_vma __tlb_end_vma
+#define tlb_end_vma(tlb, vma) \
+do { \
+ if (!tlb->fullmm) \
+ tlb_flush_mmu_tlbonly(tlb); \
+} while (0)
#endif
#ifndef __tlb_remove_tlb_entry