summaryrefslogtreecommitdiff
path: root/drivers/xen/swiotlb-xen.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r--drivers/xen/swiotlb-xen.c99
1 files changed, 34 insertions, 65 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index df8085b50df1..67aa74d20162 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -36,7 +36,6 @@
#include <xen/hvc-console.h>
#include <asm/dma-mapping.h>
-#include <asm/xen/page-coherent.h>
#include <trace/events/swiotlb.h>
#define MAX_DMA_BITS 32
@@ -104,6 +103,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+#ifdef CONFIG_X86
int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
int rc;
@@ -131,94 +131,58 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
}
static void *
-xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
+xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
- void *ret;
+ u64 dma_mask = dev->coherent_dma_mask;
int order = get_order(size);
- u64 dma_mask = DMA_BIT_MASK(32);
phys_addr_t phys;
- dma_addr_t dev_addr;
-
- /*
- * Ignore region specifiers - the kernel's ideas of
- * pseudo-phys memory layout has nothing to do with the
- * machine physical layout. We can't allocate highmem
- * because we can't return a pointer to it.
- */
- flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
+ void *ret;
- /* Convert the size to actually allocated. */
+ /* Align the allocation to the Xen page size */
size = 1UL << (order + XEN_PAGE_SHIFT);
- /* On ARM this function returns an ioremap'ped virtual address for
- * which virt_to_phys doesn't return the corresponding physical
- * address. In fact on ARM virt_to_phys only works for kernel direct
- * mapped RAM memory. Also see comment below.
- */
- ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
-
+ ret = (void *)__get_free_pages(flags, get_order(size));
if (!ret)
return ret;
-
- if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = hwdev->coherent_dma_mask;
-
- /* At this point dma_handle is the dma address, next we are
- * going to set it to the machine address.
- * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
- * to *dma_handle. */
- phys = dma_to_phys(hwdev, *dma_handle);
- dev_addr = xen_phys_to_dma(hwdev, phys);
- if (((dev_addr + size - 1 <= dma_mask)) &&
- !range_straddles_page_boundary(phys, size))
- *dma_handle = dev_addr;
- else {
- if (xen_create_contiguous_region(phys, order,
- fls64(dma_mask), dma_handle) != 0) {
- xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
- return NULL;
- }
- *dma_handle = phys_to_dma(hwdev, *dma_handle);
+ phys = virt_to_phys(ret);
+
+ *dma_handle = xen_phys_to_dma(dev, phys);
+ if (*dma_handle + size - 1 > dma_mask ||
+ range_straddles_page_boundary(phys, size)) {
+ if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
+ dma_handle) != 0)
+ goto out_free_pages;
SetPageXenRemapped(virt_to_page(ret));
}
+
memset(ret, 0, size);
return ret;
+
+out_free_pages:
+ free_pages((unsigned long)ret, get_order(size));
+ return NULL;
}
static void
-xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dev_addr, unsigned long attrs)
+xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
+ phys_addr_t phys = virt_to_phys(vaddr);
int order = get_order(size);
- phys_addr_t phys;
- u64 dma_mask = DMA_BIT_MASK(32);
- struct page *page;
-
- if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = hwdev->coherent_dma_mask;
-
- /* do not use virt_to_phys because on ARM it doesn't return you the
- * physical address */
- phys = xen_dma_to_phys(hwdev, dev_addr);
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
- if (is_vmalloc_addr(vaddr))
- page = vmalloc_to_page(vaddr);
- else
- page = virt_to_page(vaddr);
+ if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
+ WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
+ return;
- if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
- range_straddles_page_boundary(phys, size)) &&
- TestClearPageXenRemapped(page))
+ if (TestClearPageXenRemapped(virt_to_page(vaddr)))
xen_destroy_contiguous_region(phys, order);
-
- xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
- attrs);
+ free_pages((unsigned long)vaddr, get_order(size));
}
+#endif /* CONFIG_X86 */
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
@@ -421,8 +385,13 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
}
const struct dma_map_ops xen_swiotlb_dma_ops = {
+#ifdef CONFIG_X86
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
+#else
+ .alloc = dma_direct_alloc,
+ .free = dma_direct_free,
+#endif
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,