From d3b9f659fac6e5f74f857364c5ed08b94e7f94ae Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Aug 2019 16:03:47 +0200 Subject: microblaze/nommu: use the generic uncached segment support Stop providing our own arch alloc/free hooks for nommu platforms and just expose the segment offset and use the generic dma-direct allocator. Signed-off-by: Christoph Hellwig Signed-off-by: Michal Simek --- arch/microblaze/mm/consistent.c | 93 ++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 52 deletions(-) (limited to 'arch/microblaze/mm') diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index bc7042209c57..1a859e8b58c2 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -42,21 +42,48 @@ #include #include -#ifndef CONFIG_MMU -/* I have to use dcache values because I can't relate on ram size */ -# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) -#endif +void arch_dma_prep_coherent(struct page *page, size_t size) +{ + phys_addr_t paddr = page_to_phys(page); + + flush_dcache_range(paddr, paddr + size); +} +#ifndef CONFIG_MMU /* - * Consistent memory allocators. Used for DMA devices that want to - * share uncached memory with the processor core. - * My crufty no-MMU approach is simple. In the HW platform we can optionally - * mirror the DDR up above the processor cacheable region. So, memory accessed - * in this mirror region will not be cached. It's alloced from the same - * pool as normal memory, but the handle we return is shifted up into the - * uncached region. This will no doubt cause big problems if memory allocated - * here is not also freed properly. -- JW + * Consistent memory allocators. Used for DMA devices that want to share + * uncached memory with the processor core. My crufty no-MMU approach is + * simple. In the HW platform we can optionally mirror the DDR up above the + * processor cacheable region. So, memory accessed in this mirror region will + * not be cached. It's alloced from the same pool as normal memory, but the + * handle we return is shifted up into the uncached region. This will no doubt + * cause big problems if memory allocated here is not also freed properly. -- JW + * + * I have to use dcache values because I can't relate on ram size: */ +#ifdef CONFIG_XILINX_UNCACHED_SHADOW +#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) +#else +#define UNCACHED_SHADOW_MASK 0 +#endif /* CONFIG_XILINX_UNCACHED_SHADOW */ + +void *uncached_kernel_address(void *ptr) +{ + unsigned long addr = (unsigned long)ptr; + + addr |= UNCACHED_SHADOW_MASK; + if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high) + pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); + return (void *)addr; +} + +void *cached_kernel_address(void *ptr) +{ + unsigned long addr = (unsigned long)ptr; + + return (void *)(addr & ~UNCACHED_SHADOW_MASK); +} +#else /* CONFIG_MMU */ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { @@ -64,12 +91,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *ret; unsigned int i, err = 0; struct page *page, *end; - -#ifdef CONFIG_MMU phys_addr_t pa; struct vm_struct *area; unsigned long va; -#endif if (in_interrupt()) BUG(); @@ -86,26 +110,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, * we need to ensure that there are no cachelines in use, * or worse dirty in this area. */ - flush_dcache_range(virt_to_phys((void *)vaddr), - virt_to_phys((void *)vaddr) + size); - -#ifndef CONFIG_MMU - ret = (void *)vaddr; - /* - * Here's the magic! Note if the uncached shadow is not implemented, - * it's up to the calling code to also test that condition and make - * other arranegments, such as manually flushing the cache and so on. - */ -# ifdef CONFIG_XILINX_UNCACHED_SHADOW - ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); -# endif - if ((unsigned int)ret > cpuinfo.dcache_base && - (unsigned int)ret < cpuinfo.dcache_high) - pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); + arch_dma_prep_coherent(virt_to_page((unsigned long)vaddr), size); - /* dma_handle is same as physical (shadowed) address */ - *dma_handle = (dma_addr_t)ret; -#else /* Allocate some common virtual space to map the new pages. */ area = get_vm_area(size, VM_ALLOC); if (!area) { @@ -117,7 +123,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, /* This gives us the real physical address of the first page. */ *dma_handle = pa = __virt_to_phys(vaddr); -#endif /* * free wasted pages. We skip the first page since we know @@ -131,10 +136,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, split_page(page, order); for (i = 0; i < size && err == 0; i += PAGE_SIZE) { -#ifdef CONFIG_MMU /* MS: This is the whole magic - use cache inhibit pages */ err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE); -#endif SetPageReserved(page); page++; @@ -154,7 +157,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, return ret; } -#ifdef CONFIG_MMU static pte_t *consistent_virt_to_pte(void *vaddr) { unsigned long addr = (unsigned long)vaddr; @@ -172,7 +174,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr, return pte_pfn(*ptep); } -#endif /* * free page(s) as defined by the above mapping. @@ -187,18 +188,6 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, size = PAGE_ALIGN(size); -#ifndef CONFIG_MMU - /* Clear SHADOW_MASK bit in address, and free as per usual */ -# ifdef CONFIG_XILINX_UNCACHED_SHADOW - vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); -# endif - page = virt_to_page(vaddr); - - do { - __free_reserved_page(page); - page++; - } while (size -= PAGE_SIZE); -#else do { pte_t *ptep = consistent_virt_to_pte(vaddr); unsigned long pfn; @@ -216,5 +205,5 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, /* flush tlb */ flush_tlb_all(); -#endif } +#endif /* CONFIG_MMU */ -- cgit v1.2.3 From 04e3543e228fcbb000de922ec5d366f398bdd9ae Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Aug 2019 16:03:48 +0200 Subject: microblaze: use the generic dma coherent remap allocator This switches to using common code for the DMA allocations, including potential use of the CMA allocator if configured. Switching to the generic code enables DMA allocations from atomic context, which is required by the DMA API documentation, and also adds various other minor features drivers start relying upon. It also makes sure we have on tested code base for all architectures that require uncached pte bits for coherent DMA allocations. Signed-off-by: Christoph Hellwig Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 1 + arch/microblaze/mm/consistent.c | 152 ++-------------------------------------- 2 files changed, 5 insertions(+), 148 deletions(-) (limited to 'arch/microblaze/mm') diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index a0d749c309f3..e477896fbae6 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -17,6 +17,7 @@ config MICROBLAZE select TIMER_OF select CLONE_BACKWARDS3 select COMMON_CLK + select DMA_DIRECT_REMAP if MMU select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 1a859e8b58c2..0e0f733eb846 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -4,43 +4,16 @@ * Copyright (C) 2010 Michal Simek * Copyright (C) 2010 PetaLogix * Copyright (C) 2005 John Williams - * - * Based on PowerPC version derived from arch/arm/mm/consistent.c - * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) - * Copyright (C) 2000 Russell King */ -#include -#include -#include #include -#include #include #include -#include -#include #include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include #include - -#include -#include -#include -#include -#include -#include -#include #include -#include +#include void arch_dma_prep_coherent(struct page *page, size_t size) { @@ -84,126 +57,9 @@ void *cached_kernel_address(void *ptr) return (void *)(addr & ~UNCACHED_SHADOW_MASK); } #else /* CONFIG_MMU */ -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - unsigned long order, vaddr; - void *ret; - unsigned int i, err = 0; - struct page *page, *end; - phys_addr_t pa; - struct vm_struct *area; - unsigned long va; - - if (in_interrupt()) - BUG(); - - /* Only allocate page size areas. */ - size = PAGE_ALIGN(size); - order = get_order(size); - - vaddr = __get_free_pages(gfp | __GFP_ZERO, order); - if (!vaddr) - return NULL; - - /* - * we need to ensure that there are no cachelines in use, - * or worse dirty in this area. - */ - arch_dma_prep_coherent(virt_to_page((unsigned long)vaddr), size); - - /* Allocate some common virtual space to map the new pages. */ - area = get_vm_area(size, VM_ALLOC); - if (!area) { - free_pages(vaddr, order); - return NULL; - } - va = (unsigned long) area->addr; - ret = (void *)va; - - /* This gives us the real physical address of the first page. */ - *dma_handle = pa = __virt_to_phys(vaddr); - - /* - * free wasted pages. We skip the first page since we know - * that it will have count = 1 and won't require freeing. - * We also mark the pages in use as reserved so that - * remap_page_range works. - */ - page = virt_to_page(vaddr); - end = page + (1 << order); - - split_page(page, order); - - for (i = 0; i < size && err == 0; i += PAGE_SIZE) { - /* MS: This is the whole magic - use cache inhibit pages */ - err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE); - - SetPageReserved(page); - page++; - } - - /* Free the otherwise unused pages. */ - while (page < end) { - __free_page(page); - page++; - } - - if (err) { - free_pages(vaddr, order); - return NULL; - } - - return ret; -} - -static pte_t *consistent_virt_to_pte(void *vaddr) -{ - unsigned long addr = (unsigned long)vaddr; - - return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr); -} - -long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr, - dma_addr_t dma_addr) +static int __init atomic_pool_init(void) { - pte_t *ptep = consistent_virt_to_pte(vaddr); - - if (pte_none(*ptep) || !pte_present(*ptep)) - return 0; - - return pte_pfn(*ptep); -} - -/* - * free page(s) as defined by the above mapping. - */ -void arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, unsigned long attrs) -{ - struct page *page; - - if (in_interrupt()) - BUG(); - - size = PAGE_ALIGN(size); - - do { - pte_t *ptep = consistent_virt_to_pte(vaddr); - unsigned long pfn; - - if (!pte_none(*ptep) && pte_present(*ptep)) { - pfn = pte_pfn(*ptep); - pte_clear(&init_mm, (unsigned int)vaddr, ptep); - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - __free_reserved_page(page); - } - } - vaddr += PAGE_SIZE; - } while (size -= PAGE_SIZE); - - /* flush tlb */ - flush_tlb_all(); + return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL)); } +postcore_initcall(atomic_pool_init); #endif /* CONFIG_MMU */ -- cgit v1.2.3