From 06dfae39d20091f3c8aa995f00505e1e148b0b3f Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Thu, 6 Jul 2023 23:45:08 +0800 Subject: arc: mm: convert to GENERIC_IOREMAP By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() and iounmap() are all visible and available to arch. Arch needs to provide wrapper functions to override the generic versions if there's arch specific handling in its ioremap_prot(), ioremap() or iounmap(). This change will simplify implementation by removing duplicated code with generic_ioremap_prot() and generic_iounmap(), and has the equivalent functioality as before. Here, add wrapper functions ioremap_prot() and iounmap() for arc's special operation when ioremap_prot() and iounmap(). Link: https://lkml.kernel.org/r/20230706154520.11257-8-bhe@redhat.com Signed-off-by: Baoquan He Reviewed-by: Christoph Hellwig Reviewed-by: Mike Rapoport (IBM) Cc: Vineet Gupta Cc: Alexander Gordeev Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Chris Zankel Cc: David Laight Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Helge Deller Cc: "James E.J. Bottomley" Cc: John Paul Adrian Glaubitz Cc: Jonas Bonn Cc: Kefeng Wang Cc: Matthew Wilcox Cc: Max Filippov Cc: Michael Ellerman Cc: Nathan Chancellor Cc: Nicholas Piggin Cc: Niklas Schnelle Cc: Rich Felker Cc: Stafford Horne Cc: Stefan Kristiansson Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/arc/Kconfig | 1 + arch/arc/include/asm/io.h | 7 +++---- arch/arc/mm/ioremap.c | 49 ++++------------------------------------------- 3 files changed, 8 insertions(+), 49 deletions(-) (limited to 'arch/arc') diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 96cf8720bb93..6f4995ad9873 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -26,6 +26,7 @@ config ARC select GENERIC_PENDING_IRQ if SMP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD + select GENERIC_IOREMAP select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4 diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 80347382a380..4fdb7350636c 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -21,8 +21,9 @@ #endif extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size); -extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, - unsigned long flags); +#define ioremap ioremap +#define ioremap_prot ioremap_prot +#define iounmap iounmap static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { return (void __iomem *)port; @@ -32,8 +33,6 @@ static inline void ioport_unmap(void __iomem *addr) { } -extern void iounmap(const volatile void __iomem *addr); - /* * io{read,write}{16,32}be() macros */ diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index 712c2311daef..b07004d53267 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -8,7 +8,6 @@ #include #include #include -#include #include static inline bool arc_uncached_addr_space(phys_addr_t paddr) @@ -25,13 +24,6 @@ static inline bool arc_uncached_addr_space(phys_addr_t paddr) void __iomem *ioremap(phys_addr_t paddr, unsigned long size) { - phys_addr_t end; - - /* Don't allow wraparound or zero size */ - end = paddr + size - 1; - if (!size || (end < paddr)) - return NULL; - /* * If the region is h/w uncached, MMU mapping can be elided as optim * The cast to u32 is fine as this region can only be inside 4GB @@ -51,55 +43,22 @@ EXPORT_SYMBOL(ioremap); * ARC hardware uncached region, this one still goes thru the MMU as caller * might need finer access control (R/W/X) */ -void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, +void __iomem *ioremap_prot(phys_addr_t paddr, size_t size, unsigned long flags) { - unsigned int off; - unsigned long vaddr; - struct vm_struct *area; - phys_addr_t end; pgprot_t prot = __pgprot(flags); - /* Don't allow wraparound, zero size */ - end = paddr + size - 1; - if ((!size) || (end < paddr)) - return NULL; - - /* An early platform driver might end up here */ - if (!slab_is_available()) - return NULL; - /* force uncached */ - prot = pgprot_noncached(prot); - - /* Mappings have to be page-aligned */ - off = paddr & ~PAGE_MASK; - paddr &= PAGE_MASK_PHYS; - size = PAGE_ALIGN(end + 1) - paddr; - - /* - * Ok, go for it.. - */ - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - area->phys_addr = paddr; - vaddr = (unsigned long)area->addr; - if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) { - vunmap((void __force *)vaddr); - return NULL; - } - return (void __iomem *)(off + (char __iomem *)vaddr); + return generic_ioremap_prot(paddr, size, pgprot_noncached(prot)); } EXPORT_SYMBOL(ioremap_prot); - -void iounmap(const volatile void __iomem *addr) +void iounmap(volatile void __iomem *addr) { /* weird double cast to handle phys_addr_t > 32 bits */ if (arc_uncached_addr_space((phys_addr_t)(u32)addr)) return; - vfree((void *)(PAGE_MASK & (unsigned long __force)addr)); + generic_iounmap(addr); } EXPORT_SYMBOL(iounmap); -- cgit v1.2.3 From ac4cfaccedac891d29560ddfb64cb5c1e710e1e1 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 2 Aug 2023 16:13:36 +0100 Subject: arc: implement the new page table range API Add PFN_PTE_SHIFT, update_mmu_cache_range(), flush_dcache_folio() and flush_icache_pages(). Change the PG_dc_clean flag from being per-page to per-folio (which means it cannot always be set as we don't know that all pages in this folio were cleaned). Enhance the internal flush routines to take the number of pages to flush. Link: https://lkml.kernel.org/r/20230802151406.3735276-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Mike Rapoport (IBM) Cc: Vineet Gupta Signed-off-by: Andrew Morton --- arch/arc/include/asm/cacheflush.h | 7 +++- arch/arc/include/asm/pgtable-bits-arcv2.h | 12 +++--- arch/arc/include/asm/pgtable-levels.h | 1 + arch/arc/mm/cache.c | 61 +++++++++++++++++++------------ arch/arc/mm/tlb.c | 18 +++++---- 5 files changed, 59 insertions(+), 40 deletions(-) (limited to 'arch/arc') diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index e201b4b1655a..04f65f588510 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h @@ -25,17 +25,20 @@ * in update_mmu_cache() */ #define flush_icache_page(vma, page) +#define flush_icache_pages(vma, page, nr) void flush_cache_all(void); void flush_icache_range(unsigned long kstart, unsigned long kend); void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len); -void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr); -void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr); +void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr); +void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 void flush_dcache_page(struct page *page); +void flush_dcache_folio(struct folio *folio); +#define flush_dcache_folio flush_dcache_folio void dma_cache_wback_inv(phys_addr_t start, unsigned long sz); void dma_cache_inv(phys_addr_t start, unsigned long sz); diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h index 6e9f8ca6d6a1..ee78ab30958d 100644 --- a/arch/arc/include/asm/pgtable-bits-arcv2.h +++ b/arch/arc/include/asm/pgtable-bits-arcv2.h @@ -100,14 +100,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); } -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) -{ - set_pte(ptep, pteval); -} +struct vm_fault; +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr); -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t *ptep); +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) /* * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h index ef68758b69f7..fc417c75c24d 100644 --- a/arch/arc/include/asm/pgtable-levels.h +++ b/arch/arc/include/asm/pgtable-levels.h @@ -169,6 +169,7 @@ #define pte_ERROR(e) \ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define PFN_PTE_SHIFT PAGE_SHIFT #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) #define pte_clear(mm,addr,ptep) set_pte_at(mm, addr, ptep, __pte(0)) diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 55c6de138eae..3c16ee942a5c 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -752,17 +752,17 @@ static inline void arc_slc_enable(void) * There's a corollary case, where kernel READs from a userspace mapped page. * If the U-mapping is not congruent to K-mapping, former needs flushing. */ -void flush_dcache_page(struct page *page) +void flush_dcache_folio(struct folio *folio) { struct address_space *mapping; if (!cache_is_vipt_aliasing()) { - clear_bit(PG_dc_clean, &page->flags); + clear_bit(PG_dc_clean, &folio->flags); return; } /* don't handle anon pages here */ - mapping = page_mapping_file(page); + mapping = folio_flush_mapping(folio); if (!mapping) return; @@ -771,17 +771,27 @@ void flush_dcache_page(struct page *page) * Make a note that K-mapping is dirty */ if (!mapping_mapped(mapping)) { - clear_bit(PG_dc_clean, &page->flags); - } else if (page_mapcount(page)) { - + clear_bit(PG_dc_clean, &folio->flags); + } else if (folio_mapped(folio)) { /* kernel reading from page with U-mapping */ - phys_addr_t paddr = (unsigned long)page_address(page); - unsigned long vaddr = page->index << PAGE_SHIFT; + phys_addr_t paddr = (unsigned long)folio_address(folio); + unsigned long vaddr = folio_pos(folio); + /* + * vaddr is not actually the virtual address, but is + * congruent to every user mapping. + */ if (addr_not_cache_congruent(paddr, vaddr)) - __flush_dcache_page(paddr, vaddr); + __flush_dcache_pages(paddr, vaddr, + folio_nr_pages(folio)); } } +EXPORT_SYMBOL(flush_dcache_folio); + +void flush_dcache_page(struct page *page) +{ + return flush_dcache_folio(page_folio(page)); +} EXPORT_SYMBOL(flush_dcache_page); /* @@ -921,18 +931,18 @@ void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len) } /* wrapper to compile time eliminate alignment checks in flush loop */ -void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr) +void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr) { - __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); + __ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE); } /* * wrapper to clearout kernel or userspace mappings of a page * For kernel mappings @vaddr == @paddr */ -void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr) +void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr) { - __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); + __dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV); } noinline void flush_cache_all(void) @@ -962,10 +972,10 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, u_vaddr &= PAGE_MASK; - __flush_dcache_page(paddr, u_vaddr); + __flush_dcache_pages(paddr, u_vaddr, 1); if (vma->vm_flags & VM_EXEC) - __inv_icache_page(paddr, u_vaddr); + __inv_icache_pages(paddr, u_vaddr, 1); } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, @@ -978,9 +988,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long u_vaddr) { /* TBD: do we really need to clear the kernel mapping */ - __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr); - __flush_dcache_page((phys_addr_t)page_address(page), - (phys_addr_t)page_address(page)); + __flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1); + __flush_dcache_pages((phys_addr_t)page_address(page), + (phys_addr_t)page_address(page), 1); } @@ -989,6 +999,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, void copy_user_highpage(struct page *to, struct page *from, unsigned long u_vaddr, struct vm_area_struct *vma) { + struct folio *src = page_folio(from); + struct folio *dst = page_folio(to); void *kfrom = kmap_atomic(from); void *kto = kmap_atomic(to); int clean_src_k_mappings = 0; @@ -1005,7 +1017,7 @@ void copy_user_highpage(struct page *to, struct page *from, * addr_not_cache_congruent() is 0 */ if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { - __flush_dcache_page((unsigned long)kfrom, u_vaddr); + __flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1); clean_src_k_mappings = 1; } @@ -1019,17 +1031,17 @@ void copy_user_highpage(struct page *to, struct page *from, * non copied user pages (e.g. read faults which wire in pagecache page * directly). */ - clear_bit(PG_dc_clean, &to->flags); + clear_bit(PG_dc_clean, &dst->flags); /* * if SRC was already usermapped and non-congruent to kernel mapping * sync the kernel mapping back to physical page */ if (clean_src_k_mappings) { - __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom); - set_bit(PG_dc_clean, &from->flags); + __flush_dcache_pages((unsigned long)kfrom, + (unsigned long)kfrom, 1); } else { - clear_bit(PG_dc_clean, &from->flags); + clear_bit(PG_dc_clean, &src->flags); } kunmap_atomic(kto); @@ -1038,8 +1050,9 @@ void copy_user_highpage(struct page *to, struct page *from, void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) { + struct folio *folio = page_folio(page); clear_page(to); - clear_bit(PG_dc_clean, &page->flags); + clear_bit(PG_dc_clean, &folio->flags); } EXPORT_SYMBOL(clear_user_page); diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 5f71445f26bd..6f40f37e6550 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -467,8 +467,8 @@ void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep) * Note that flush (when done) involves both WBACK - so physical page is * in sync as well as INV - so any non-congruent aliases don't remain */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, - pte_t *ptep) +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr) { unsigned long vaddr = vaddr_unaligned & PAGE_MASK; phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS; @@ -491,15 +491,19 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, */ if ((vma->vm_flags & VM_EXEC) || addr_not_cache_congruent(paddr, vaddr)) { - - int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); + struct folio *folio = page_folio(page); + int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); if (dirty) { + unsigned long offset = offset_in_folio(folio, paddr); + nr = folio_nr_pages(folio); + paddr -= offset; + vaddr -= offset; /* wback + inv dcache lines (K-mapping) */ - __flush_dcache_page(paddr, paddr); + __flush_dcache_pages(paddr, paddr, nr); /* invalidate any existing icache lines (U-mapping) */ if (vma->vm_flags & VM_EXEC) - __inv_icache_page(paddr, vaddr); + __inv_icache_pages(paddr, vaddr, nr); } } } @@ -531,7 +535,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) { pte_t pte = __pte(pmd_val(*pmd)); - update_mmu_cache(vma, addr, &pte); + update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR); } void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, -- cgit v1.2.3 From 203b7b6aad6769a43987deb81c35456de8bb16c7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 2 Aug 2023 16:13:59 +0100 Subject: mm: rationalise flush_icache_pages() and flush_icache_page() Move the default (no-op) implementation of flush_icache_pages() to from . Remove the flush_icache_page() wrapper from each architecture into . Link: https://lkml.kernel.org/r/20230802151406.3735276-32-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- arch/alpha/include/asm/cacheflush.h | 5 +---- arch/arc/include/asm/cacheflush.h | 9 --------- arch/arm/include/asm/cacheflush.h | 7 ------- arch/csky/abiv1/inc/abi/cacheflush.h | 1 - arch/csky/abiv2/inc/abi/cacheflush.h | 1 - arch/hexagon/include/asm/cacheflush.h | 2 +- arch/loongarch/include/asm/cacheflush.h | 2 -- arch/m68k/include/asm/cacheflush_mm.h | 1 - arch/mips/include/asm/cacheflush.h | 6 ------ arch/nios2/include/asm/cacheflush.h | 2 +- arch/parisc/include/asm/cacheflush.h | 2 +- arch/sh/include/asm/cacheflush.h | 2 +- arch/sparc/include/asm/cacheflush_32.h | 2 -- arch/sparc/include/asm/cacheflush_64.h | 3 --- arch/xtensa/include/asm/cacheflush.h | 4 ---- include/asm-generic/cacheflush.h | 12 ------------ include/linux/cacheflush.h | 9 +++++++++ 17 files changed, 14 insertions(+), 56 deletions(-) (limited to 'arch/arc') diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h index 3956460e69e2..36a7e924c3b9 100644 --- a/arch/alpha/include/asm/cacheflush.h +++ b/arch/alpha/include/asm/cacheflush.h @@ -53,10 +53,6 @@ extern void flush_icache_user_page(struct vm_area_struct *vma, #define flush_icache_user_page flush_icache_user_page #endif /* CONFIG_SMP */ -/* This is used only in __do_fault and do_swap_page. */ -#define flush_icache_page(vma, page) \ - flush_icache_user_page((vma), (page), 0, 0) - /* * Both implementations of flush_icache_user_page flush the entire * address space, so one call, no matter how many pages. @@ -66,6 +62,7 @@ static inline void flush_icache_pages(struct vm_area_struct *vma, { flush_icache_user_page(vma, page, 0, 0); } +#define flush_icache_pages flush_icache_pages #include diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index 04f65f588510..bd5b1a9a0544 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h @@ -18,15 +18,6 @@ #include #include -/* - * Semantically we need this because icache doesn't snoop dcache/dma. - * However ARC Cache flush requires paddr as well as vaddr, latter not available - * in the flush_icache_page() API. So we no-op it but do the equivalent work - * in update_mmu_cache() - */ -#define flush_icache_page(vma, page) -#define flush_icache_pages(vma, page, nr) - void flush_cache_all(void); void flush_icache_range(unsigned long kstart, unsigned long kend); diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 841e268d2374..f6181f69577f 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -321,13 +321,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma, #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) -/* - * We don't appear to need to do anything here. In fact, if we did, we'd - * duplicate cache flushing elsewhere performed by flush_dcache_page(). - */ -#define flush_icache_page(vma,page) do { } while (0) -#define flush_icache_pages(vma, page, nr) do { } while (0) - /* * flush_cache_vmap() is used when creating mappings (eg, via vmap, * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h index 0d6cb65624c4..908d8b0bc4fd 100644 --- a/arch/csky/abiv1/inc/abi/cacheflush.h +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -45,7 +45,6 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u #define flush_cache_vmap(start, end) cache_wbinv_all() #define flush_cache_vunmap(start, end) cache_wbinv_all() -#define flush_icache_page(vma, page) do {} while (0); #define flush_icache_range(start, end) cache_wbinv_range(start, end) #define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) #define flush_icache_deferred(mm) do {} while (0); diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h index 9c728933a776..40be16907267 100644 --- a/arch/csky/abiv2/inc/abi/cacheflush.h +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -33,7 +33,6 @@ static inline void flush_dcache_page(struct page *page) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_icache_page(vma, page) do { } while (0) #define flush_icache_range(start, end) cache_wbinv_range(start, end) diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h index dc3f500a5a01..bfff514a81c8 100644 --- a/arch/hexagon/include/asm/cacheflush.h +++ b/arch/hexagon/include/asm/cacheflush.h @@ -18,7 +18,7 @@ * - flush_cache_range(vma, start, end) flushes a range of pages * - flush_icache_range(start, end) flush a range of instructions * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache - * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache + * - flush_icache_pages(vma, pg, nr) flushes(invalidates) nr pages for icache * * Need to doublecheck which one is really needed for ptrace stuff to work. */ diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h index 88a44da50a3b..80bd74106985 100644 --- a/arch/loongarch/include/asm/cacheflush.h +++ b/arch/loongarch/include/asm/cacheflush.h @@ -46,8 +46,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end); #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) -#define flush_icache_page(vma, page) do { } while (0) -#define flush_icache_pages(vma, page) do { } while (0) #define flush_icache_user_page(vma, page, addr, len) do { } while (0) #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h index 88eb85e81ef6..ed12358c4783 100644 --- a/arch/m68k/include/asm/cacheflush_mm.h +++ b/arch/m68k/include/asm/cacheflush_mm.h @@ -261,7 +261,6 @@ static inline void __flush_pages_to_ram(void *vaddr, unsigned int nr) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_pages(vma, page, nr) \ __flush_pages_to_ram(page_address(page), nr) -#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len); diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 0f389bc7cb90..f36c2519ed97 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -82,12 +82,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma, __flush_anon_page(page, vmaddr); } -static inline void flush_icache_pages(struct vm_area_struct *vma, - struct page *page, unsigned int nr) -{ -} -#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) - extern void (*flush_icache_range)(unsigned long start, unsigned long end); extern void (*local_flush_icache_range)(unsigned long start, unsigned long end); extern void (*__flush_icache_user_range)(unsigned long start, diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h index 8624ca83cffe..7c48c5213fb7 100644 --- a/arch/nios2/include/asm/cacheflush.h +++ b/arch/nios2/include/asm/cacheflush.h @@ -35,7 +35,7 @@ void flush_dcache_folio(struct folio *folio); extern void flush_icache_range(unsigned long start, unsigned long end); void flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr); -#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1); +#define flush_icache_pages flush_icache_pages #define flush_cache_vmap(start, end) flush_dcache_range(start, end) #define flush_cache_vunmap(start, end) flush_dcache_range(start, end) diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index b77c3e0c37d3..b4006f2a9705 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -60,7 +60,7 @@ static inline void flush_dcache_page(struct page *page) void flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr); -#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) +#define flush_icache_pages flush_icache_pages #define flush_icache_range(s,e) do { \ flush_kernel_dcache_range_asm(s,e); \ diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 9fceef6f3e00..878b6b551bd2 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -53,7 +53,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end); #define flush_icache_user_range flush_icache_range void flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr); -#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) +#define flush_icache_pages flush_icache_pages extern void flush_cache_sigtramp(unsigned long address); struct flusher_data { diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h index c8dd971f0e88..f3b7270bf71b 100644 --- a/arch/sparc/include/asm/cacheflush_32.h +++ b/arch/sparc/include/asm/cacheflush_32.h @@ -16,8 +16,6 @@ #define flush_cache_page(vma,addr,pfn) \ sparc32_cachetlb_ops->cache_page(vma, addr) #define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma, pg) do { } while (0) -#define flush_icache_pages(vma, pg, nr) do { } while (0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h index a9a719f04d06..0e879004efff 100644 --- a/arch/sparc/include/asm/cacheflush_64.h +++ b/arch/sparc/include/asm/cacheflush_64.h @@ -53,9 +53,6 @@ static inline void flush_dcache_page(struct page *page) flush_dcache_folio(page_folio(page)); } -#define flush_icache_page(vma, pg) do { } while(0) -#define flush_icache_pages(vma, pg, nr) do { } while(0) - void flush_ptrace_access(struct vm_area_struct *, struct page *, unsigned long uaddr, void *kaddr, unsigned long len, int write); diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 35153f6725e4..785a00ce83c1 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -160,10 +160,6 @@ void local_flush_cache_page(struct vm_area_struct *vma, __invalidate_icache_range(start,(end) - (start)); \ } while (0) -/* This is not required, see Documentation/core-api/cachetlb.rst */ -#define flush_icache_page(vma,page) do { } while (0) -#define flush_icache_pages(vma, page, nr) do { } while (0) - #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 09d51a680765..84ec53ccc450 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -77,18 +77,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) #define flush_icache_user_range flush_icache_range #endif -#ifndef flush_icache_page -static inline void flush_icache_pages(struct vm_area_struct *vma, - struct page *page, unsigned int nr) -{ -} - -static inline void flush_icache_page(struct vm_area_struct *vma, - struct page *page) -{ -} -#endif - #ifndef flush_icache_user_page static inline void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h index 82136f3fcf54..55f297b2c23f 100644 --- a/include/linux/cacheflush.h +++ b/include/linux/cacheflush.h @@ -17,4 +17,13 @@ static inline void flush_dcache_folio(struct folio *folio) #define flush_dcache_folio flush_dcache_folio #endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */ +#ifndef flush_icache_pages +static inline void flush_icache_pages(struct vm_area_struct *vma, + struct page *page, unsigned int nr) +{ +} +#endif + +#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) + #endif /* _LINUX_CACHEFLUSH_H */ -- cgit v1.2.3