summaryrefslogtreecommitdiff
path: root/drivers/iommu/msm_iommu.c
diff options
context:
space:
mode:
authorLu Baolu <baolu.lu@linux.intel.com>2022-02-16 05:52:49 +0300
committerJoerg Roedel <jroedel@suse.de>2022-02-28 15:25:49 +0300
commit9a630a4b41a2639b65d024a5d2d88ed3ecca130a (patch)
treeb681f454d619e835b0e7ea122a788c603b95474a /drivers/iommu/msm_iommu.c
parent41bb23e70b50b89b6137cbecd37009d782454860 (diff)
downloadlinux-9a630a4b41a2639b65d024a5d2d88ed3ecca130a.tar.xz
iommu: Split struct iommu_ops
Move the domain specific operations out of struct iommu_ops into a new structure that only has domain specific operations. This solves the problem of needing to know if the method vector for a given operation needs to be retrieved from the device or the domain. Logically the domain ops are the ones that make sense for external subsystems and endpoint drivers to use, while device ops, with the sole exception of domain_alloc, are IOMMU API internals. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20220216025249.3459465-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/msm_iommu.c')
-rw-r--r--drivers/iommu/msm_iommu.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6a2e511edb85..4f441f1b750d 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -668,25 +668,27 @@ fail:
static struct iommu_ops msm_iommu_ops = {
.domain_alloc = msm_iommu_domain_alloc,
- .domain_free = msm_iommu_domain_free,
- .attach_dev = msm_iommu_attach_dev,
- .detach_dev = msm_iommu_detach_dev,
- .map = msm_iommu_map,
- .unmap = msm_iommu_unmap,
- /*
- * Nothing is needed here, the barrier to guarantee
- * completion of the tlb sync operation is implicitly
- * taken care when the iommu client does a writel before
- * kick starting the other master.
- */
- .iotlb_sync = NULL,
- .iotlb_sync_map = msm_iommu_sync_map,
- .iova_to_phys = msm_iommu_iova_to_phys,
.probe_device = msm_iommu_probe_device,
.release_device = msm_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ /*
+ * Nothing is needed here, the barrier to guarantee
+ * completion of the tlb sync operation is implicitly
+ * taken care when the iommu client does a writel before
+ * kick starting the other master.
+ */
+ .iotlb_sync = NULL,
+ .iotlb_sync_map = msm_iommu_sync_map,
+ .iova_to_phys = msm_iommu_iova_to_phys,
+ .free = msm_iommu_domain_free,
+ }
};
static int msm_iommu_probe(struct platform_device *pdev)