From 2e727bffbe93750a13d2414f3ce43de2f21600d2 Mon Sep 17 00:00:00 2001 From: David Stevens Date: Wed, 29 Sep 2021 11:32:58 +0900 Subject: iommu/dma: Check CONFIG_SWIOTLB more broadly Introduce a new dev_use_swiotlb function to guard swiotlb code, instead of overloading dev_is_untrusted. This allows CONFIG_SWIOTLB to be checked more broadly, so the swiotlb related code can be removed more aggressively. Signed-off-by: David Stevens Reviewed-by: Robin Murphy Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210929023300.335969-6-stevensd@google.com Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers/iommu/dma-iommu.c') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 4f77c44eaf14..85a005b268f6 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -317,6 +317,11 @@ static bool dev_is_untrusted(struct device *dev) return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; } +static bool dev_use_swiotlb(struct device *dev) +{ + return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); +} + /* sysfs updates are serialised by the mutex of the group owning @domain */ int iommu_dma_init_fq(struct iommu_domain *domain) { @@ -731,7 +736,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); @@ -747,7 +752,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); @@ -765,7 +770,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg; int i; - if (dev_is_untrusted(dev)) + if (dev_use_swiotlb(dev)) for_each_sg(sgl, sg, nelems, i) iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), sg->length, dir); @@ -781,7 +786,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg; int i; - if (dev_is_untrusted(dev)) + if (dev_use_swiotlb(dev)) for_each_sg(sgl, sg, nelems, i) iommu_dma_sync_single_for_device(dev, sg_dma_address(sg), @@ -808,8 +813,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, * If both the physical buffer start address and size are * page aligned, we don't need to use a bounce page. */ - if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) && - iova_offset(iovad, phys | size)) { + if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) { void *padding_start; size_t padding_size; @@ -994,7 +998,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, goto out; } - if (dev_is_untrusted(dev)) + if (dev_use_swiotlb(dev)) return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) @@ -1072,7 +1076,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, struct scatterlist *tmp; int i; - if (dev_is_untrusted(dev)) { + if (dev_use_swiotlb(dev)) { iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); return; } -- cgit v1.2.3