summaryrefslogtreecommitdiff
path: root/drivers/xen/swiotlb-xen.c
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@xilinx.com>2020-07-11 01:34:19 +0300
committerJuergen Gross <jgross@suse.com>2020-08-04 08:44:19 +0300
commit2cf6a91347b94528c646cbdd9d295770ea7d447d (patch)
treed8a65b1e5ac2c53100808418715522df0f160bf2 /drivers/xen/swiotlb-xen.c
parentae4f0a17ee591c15efbee87746290d22aea81617 (diff)
downloadlinux-2cf6a91347b94528c646cbdd9d295770ea7d447d.tar.xz
swiotlb-xen: add struct device * parameter to xen_phys_to_bus
No functional changes. The parameter is unused in this patch but will be used by next patches. Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Tested-by: Corey Minyard <cminyard@mvista.com> Tested-by: Roman Shaposhnik <roman@zededa.com> Link: https://lore.kernel.org/r/20200710223427.6897-3-sstabellini@kernel.org Signed-off-by: Juergen Gross <jgross@suse.com>
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r--drivers/xen/swiotlb-xen.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 89a775948a02..dbe710a59bf2 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -57,7 +57,7 @@ static unsigned long xen_io_tlb_nslabs;
* can be 32bit when dma_addr_t is 64bit leading to a loss in
* information if the shift is done before casting to 64bit.
*/
-static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
+static inline dma_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
{
unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
@@ -78,9 +78,9 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
return paddr;
}
-static inline dma_addr_t xen_virt_to_bus(void *address)
+static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
{
- return xen_phys_to_bus(virt_to_phys(address));
+ return xen_phys_to_bus(dev, virt_to_phys(address));
}
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
@@ -309,7 +309,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
* Do not use virt_to_phys(ret) because on ARM it doesn't correspond
* to *dma_handle. */
phys = *dma_handle;
- dev_addr = xen_phys_to_bus(phys);
+ dev_addr = xen_phys_to_bus(hwdev, phys);
if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr;
@@ -370,7 +370,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = xen_phys_to_bus(phys);
+ dma_addr_t dev_addr = xen_phys_to_bus(dev, phys);
BUG_ON(dir == DMA_NONE);
/*
@@ -395,7 +395,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR;
phys = map;
- dev_addr = xen_phys_to_bus(map);
+ dev_addr = xen_phys_to_bus(dev, map);
/*
* Ensure that the address returned is DMA'ble
@@ -539,7 +539,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
+ return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {