summaryrefslogtreecommitdiff
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c45
1 files changed, 27 insertions, 18 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d991d40f797f..f321279baf9e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -303,13 +303,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn;
+ struct iova_domain *iovad;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
+ iovad = &cookie->iovad;
+
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
@@ -444,13 +446,18 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, dma_addr);
+ struct iommu_iotlb_gather iotlb_gather;
+ size_t unmapped;
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
+ iommu_iotlb_gather_init(&iotlb_gather);
+
+ unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
+ WARN_ON(unmapped != size);
- WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
if (!cookie->fq_domain)
- iommu_tlb_sync(domain);
+ iommu_tlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size);
}
@@ -541,15 +548,6 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages;
}
-static struct page **__iommu_dma_get_pages(void *cpu_addr)
-{
- struct vm_struct *area = find_vm_area(cpu_addr);
-
- if (!area || !area->pages)
- return NULL;
- return area->pages;
-}
-
/**
* iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
* @dev: Device to allocate memory for. Must be a real device
@@ -617,7 +615,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
< size)
goto out_free_sg;
- vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+ vaddr = dma_common_pages_remap(pages, size, prot,
__builtin_return_address(0));
if (!vaddr)
goto out_unmap;
@@ -938,10 +936,10 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
* If it the address is remapped, then it's either non-coherent
* or highmem CMA, or an iommu_dma_alloc_remap() construction.
*/
- pages = __iommu_dma_get_pages(cpu_addr);
+ pages = dma_common_find_pages(cpu_addr);
if (!pages)
page = vmalloc_to_page(cpu_addr);
- dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
+ dma_common_free_remap(cpu_addr, alloc_size);
} else {
/* Lowmem means a coherent atomic or CMA allocation */
page = virt_to_page(cpu_addr);
@@ -965,18 +963,21 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
{
bool coherent = dev_is_dma_coherent(dev);
size_t alloc_size = PAGE_ALIGN(size);
+ int node = dev_to_node(dev);
struct page *page = NULL;
void *cpu_addr;
page = dma_alloc_contiguous(dev, alloc_size, gfp);
if (!page)
+ page = alloc_pages_node(node, gfp, get_order(alloc_size));
+ if (!page)
return NULL;
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
- VM_USERMAP, prot, __builtin_return_address(0));
+ prot, __builtin_return_address(0));
if (!cpu_addr)
goto out_free_pages;
@@ -1042,7 +1043,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return -ENXIO;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
- struct page **pages = __iommu_dma_get_pages(cpu_addr);
+ struct page **pages = dma_common_find_pages(cpu_addr);
if (pages)
return __iommu_dma_mmap(pages, size, vma);
@@ -1064,7 +1065,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
int ret;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
- struct page **pages = __iommu_dma_get_pages(cpu_addr);
+ struct page **pages = dma_common_find_pages(cpu_addr);
if (pages) {
return sg_alloc_table_from_pages(sgt, pages,
@@ -1083,6 +1084,13 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret;
}
+static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+
+ return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
+}
+
static const struct dma_map_ops iommu_dma_ops = {
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
@@ -1098,6 +1106,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.sync_sg_for_device = iommu_dma_sync_sg_for_device,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
+ .get_merge_boundary = iommu_dma_get_merge_boundary,
};
/*