summaryrefslogtreecommitdiff
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 09f6e1c0f9c0..f90251572a5d 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -20,6 +20,7 @@
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/irq.h>
+#include <linux/list_sort.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -414,6 +415,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
return 0;
}
+static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
+ struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
+
+ return res_a->res->start > res_b->res->start;
+}
+
static int iova_reserve_pci_windows(struct pci_dev *dev,
struct iova_domain *iovad)
{
@@ -432,6 +442,7 @@ static int iova_reserve_pci_windows(struct pci_dev *dev,
}
/* Get reserved DMA windows from host bridge */
+ list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
resource_list_for_each_entry(window, &bridge->dma_ranges) {
end = window->res->start - window->offset;
resv_iova:
@@ -440,7 +451,7 @@ resv_iova:
hi = iova_pfn(iovad, end);
reserve_iova(iovad, lo, hi);
} else if (end < start) {
- /* dma_ranges list should be sorted */
+ /* DMA ranges should be non-overlapping */
dev_err(&dev->dev,
"Failed to reserve IOVA [%pa-%pa]\n",
&start, &end);
@@ -776,6 +787,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
dma_addr_t iova;
+ ssize_t ret;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
@@ -813,8 +825,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
- < size)
+ ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
+ if (ret < 0 || ret < size)
goto out_free_sg;
sgt->sgl->dma_address = iova;
@@ -971,6 +983,11 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
void *padding_start;
size_t padding_size, aligned_size;
+ if (!is_swiotlb_active(dev)) {
+ dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+ return DMA_MAPPING_ERROR;
+ }
+
aligned_size = iova_align(iovad, size);
phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
iova_mask(iovad), dir, attrs);
@@ -1209,7 +1226,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* implementation - it knows better than we do.
*/
ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
- if (ret < iova_len)
+ if (ret < 0 || ret < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);