summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
authorOlav Haugan <ohaugan@codeaurora.org>2014-10-25 20:55:16 +0400
committerJoerg Roedel <jroedel@suse.de>2014-11-04 16:53:36 +0300
commit315786ebbf4ad6552b6fd8e0e7b2ea220fcbfdbd (patch)
treed07fec2670951b73bdacd8479deaac09bd48f48b /drivers/iommu/iommu.c
parent0df1f2487d2f0d04703f142813d53615d62a1da4 (diff)
downloadlinux-315786ebbf4ad6552b6fd8e0e7b2ea220fcbfdbd.tar.xz
iommu: Add iommu_map_sg() function
Mapping and unmapping are more often than not in the critical path. map_sg allows IOMMU driver implementations to optimize the process of mapping buffers into the IOMMU page tables. Instead of mapping a buffer one page at a time and requiring potentially expensive TLB operations for each page, this function allows the driver to map all pages in one go and defer TLB maintenance until after all pages have been mapped. Additionally, the mapping operation would be faster in general since clients does not have to keep calling map API over and over again for each physically contiguous chunk of memory that needs to be mapped to a virtually contiguous region. Signed-off-by: Olav Haugan <ohaugan@codeaurora.org> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ed8b04867b1f..46727ce9280d 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1124,6 +1124,31 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
}
EXPORT_SYMBOL_GPL(iommu_unmap);
+size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ int ret;
+ size_t mapped = 0;
+ unsigned int i;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s));
+ size_t page_len = s->offset + s->length;
+
+ ret = iommu_map(domain, iova + mapped, phys, page_len, prot);
+ if (ret) {
+ /* undo mappings already done */
+ iommu_unmap(domain, iova, mapped);
+ mapped = 0;
+ break;
+ }
+ mapped += page_len;
+ }
+
+ return mapped;
+}
+EXPORT_SYMBOL_GPL(default_iommu_map_sg);
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)