From 8e23c82c68635a8859820bc6feb0fb3798aed943 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 2 Sep 2019 10:44:19 +0200 Subject: xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance Copy the arm64 code that uses the dma-direct/swiotlb helpers for DMA on-coherent devices. Signed-off-by: Christoph Hellwig Reviewed-by: Stefano Stabellini --- arch/arm/include/asm/device.h | 3 -- arch/arm/include/asm/xen/page-coherent.h | 72 ++++++++++++-------------------- arch/arm/mm/dma-mapping.c | 8 +--- 3 files changed, 28 insertions(+), 55 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index f6955b55c544..c675bc0d5aa8 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -14,9 +14,6 @@ struct dev_archdata { #endif #ifdef CONFIG_ARM_DMA_USE_IOMMU struct dma_iommu_mapping *mapping; -#endif -#ifdef CONFIG_XEN - const struct dma_map_ops *dev_dma_ops; #endif unsigned int dma_coherent:1; unsigned int dma_ops_setup:1; diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 2c403e7c782d..602ac02f154c 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -6,23 +6,37 @@ #include #include -static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev) -{ - if (dev && dev->archdata.dev_dma_ops) - return dev->archdata.dev_dma_ops; - return get_arch_dma_ops(NULL); -} - static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { - return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); + return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); } static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { - xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); + dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + + if (pfn_valid(pfn)) + dma_direct_sync_single_for_cpu(hwdev, handle, size, dir); + else + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + if (pfn_valid(pfn)) + dma_direct_sync_single_for_device(hwdev, handle, size, dir); + else + __xen_dma_sync_single_for_device(hwdev, handle, size, dir); } static inline void xen_dma_map_page(struct device *hwdev, struct page *page, @@ -36,17 +50,8 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, bool local = (page_pfn <= dev_pfn) && (dev_pfn - page_pfn < compound_pages); - /* - * Dom0 is mapped 1:1, while the Linux page can span across - * multiple Xen pages, it's not possible for it to contain a - * mix of local and foreign Xen pages. So if the first xen_pfn - * == mfn the page is local otherwise it's a foreign page - * grant-mapped in dom0. If the page is local we can safely - * call the native dma_ops function, otherwise we call the xen - * specific function. - */ if (local) - xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); + dma_direct_map_page(hwdev, page, offset, size, dir, attrs); else __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); } @@ -63,33 +68,10 @@ static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, * safely call the native dma_ops function, otherwise we call the xen * specific function. */ - if (pfn_valid(pfn)) { - if (xen_get_dma_ops(hwdev)->unmap_page) - xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); - } else + if (pfn_valid(pfn)) + dma_direct_unmap_page(hwdev, handle, size, dir, attrs); + else __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); } -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - if (pfn_valid(pfn)) { - if (xen_get_dma_ops(hwdev)->sync_single_for_cpu) - xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); - } else - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); -} - -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - if (pfn_valid(pfn)) { - if (xen_get_dma_ops(hwdev)->sync_single_for_device) - xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); - } else - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); -} - #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index d98898893140..143d7c79b4cb 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1105,10 +1105,6 @@ static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) * 32-bit DMA. * Use the generic dma-direct / swiotlb ops code in that case, as that * handles bounce buffering for us. - * - * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the - * latter is also selected by the Xen code, but that code for now relies - * on non-NULL dev_dma_ops. To be cleaned up later. */ if (IS_ENABLED(CONFIG_ARM_LPAE)) return NULL; @@ -2318,10 +2314,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, set_dma_ops(dev, dma_ops); #ifdef CONFIG_XEN - if (xen_initial_domain()) { - dev->archdata.dev_dma_ops = dev->dma_ops; + if (xen_initial_domain()) dev->dma_ops = xen_dma_ops; - } #endif dev->archdata.dma_ops_setup = true; } -- cgit v1.2.3