summaryrefslogtreecommitdiff
path: root/drivers/iommu/sprd-iommu.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2022-11-15 18:26:36 +0300
committerJoerg Roedel <jroedel@suse.de>2022-11-19 12:44:13 +0300
commita05d5857cec3efef02af557dcb5ed257364356e6 (patch)
treeb44f68e6c7ee95c773f42dbe24d06f1306adad3b /drivers/iommu/sprd-iommu.c
parentb577f7e679b763b706032e7a65c7b3a05c5f2184 (diff)
downloadlinux-a05d5857cec3efef02af557dcb5ed257364356e6.tar.xz
iommu/sprd: Update to {map,unmap}_pages
Now that the core API has a proper notion of multi-page mappings, clean up the old pgsize_bitmap hack by implementing the new interfaces instead. This time we'll get the return values for unmaps correct too. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/9026464e8380b92d10d09103e215eb4306a5df7c.1668100209.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/sprd-iommu.c')
-rw-r--r--drivers/iommu/sprd-iommu.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index e02793375598..219bfa11f7f4 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -271,10 +271,11 @@ static void sprd_iommu_detach_device(struct iommu_domain *domain,
}
static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
- unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
+ size_t size = pgcount * SPRD_IOMMU_PAGE_SIZE;
unsigned long flags;
unsigned int i;
u32 *pgt_base_iova;
@@ -296,35 +297,37 @@ static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova,
pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
spin_lock_irqsave(&dom->pgtlock, flags);
- for (i = 0; i < page_num; i++) {
+ for (i = 0; i < pgcount; i++) {
pgt_base_iova[i] = pabase >> SPRD_IOMMU_PAGE_SHIFT;
pabase += SPRD_IOMMU_PAGE_SIZE;
}
spin_unlock_irqrestore(&dom->pgtlock, flags);
+ *mapped = size;
return 0;
}
static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *iotlb_gather)
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather)
{
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
unsigned long flags;
u32 *pgt_base_iova;
- unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
+ size_t size = pgcount * SPRD_IOMMU_PAGE_SIZE;
unsigned long start = domain->geometry.aperture_start;
unsigned long end = domain->geometry.aperture_end;
if (iova < start || (iova + size) > (end + 1))
- return -EINVAL;
+ return 0;
pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
spin_lock_irqsave(&dom->pgtlock, flags);
- memset(pgt_base_iova, 0, page_num * sizeof(u32));
+ memset(pgt_base_iova, 0, pgcount * sizeof(u32));
spin_unlock_irqrestore(&dom->pgtlock, flags);
- return 0;
+ return size;
}
static void sprd_iommu_sync_map(struct iommu_domain *domain,
@@ -407,13 +410,13 @@ static const struct iommu_ops sprd_iommu_ops = {
.probe_device = sprd_iommu_probe_device,
.device_group = sprd_iommu_device_group,
.of_xlate = sprd_iommu_of_xlate,
- .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
+ .pgsize_bitmap = SPRD_IOMMU_PAGE_SIZE,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sprd_iommu_attach_device,
.detach_dev = sprd_iommu_detach_device,
- .map = sprd_iommu_map,
- .unmap = sprd_iommu_unmap,
+ .map_pages = sprd_iommu_map,
+ .unmap_pages = sprd_iommu_unmap,
.iotlb_sync_map = sprd_iommu_sync_map,
.iotlb_sync = sprd_iommu_sync,
.iova_to_phys = sprd_iommu_iova_to_phys,