summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2021-12-17 18:30:57 +0300
committerJoerg Roedel <jroedel@suse.de>2021-12-20 11:03:05 +0300
commit649ad9835a3783bcb6c69368fa939e0010abb2c6 (patch)
treecd15efbd451a968cfabde7aad79c78d2e40df006 /drivers/iommu
parentd5c383f2c98ac58c210b266cdaf7b86bc32d1ad1 (diff)
downloadlinux-649ad9835a3783bcb6c69368fa939e0010abb2c6.tar.xz
iommu/iova: Squash flush_cb abstraction
Once again, with iommu-dma now being the only flush queue user, we no longer need the extra level of indirection through flush_cb. Squash that and let the flush queue code call the domain method directly. This does mean temporarily having to carry an additional copy of the IOMMU domain pointer around instead, but only until a later patch untangles it again. Reviewed-by: John Garry <john.garry@huawei.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/e3f9b4acdd6640012ef4fbc819ac868d727b64a9.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dma-iommu.c13
-rw-r--r--drivers/iommu/iova.c11
2 files changed, 6 insertions, 18 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 6691f3cd768f..c63d93581a4e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -282,17 +282,6 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
}
-static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
-{
- struct iommu_dma_cookie *cookie;
- struct iommu_domain *domain;
-
- cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
- domain = cookie->fq_domain;
-
- domain->ops->flush_iotlb_all(domain);
-}
-
static bool dev_is_untrusted(struct device *dev)
{
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
@@ -312,7 +301,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
if (cookie->fq_domain)
return 0;
- ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all);
+ ret = init_iova_flush_queue(&cookie->iovad, domain);
if (ret) {
pr_warn("iova flush queue initialization failed\n");
return ret;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 541857ca4fd5..bbf642940988 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -63,7 +63,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
- iovad->flush_cb = NULL;
+ iovad->fq_domain = NULL;
iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
@@ -90,10 +90,10 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
free_percpu(iovad->fq);
iovad->fq = NULL;
- iovad->flush_cb = NULL;
+ iovad->fq_domain = NULL;
}
-int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
+int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain)
{
struct iova_fq __percpu *queue;
int cpu;
@@ -105,8 +105,6 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
if (!queue)
return -ENOMEM;
- iovad->flush_cb = flush_cb;
-
for_each_possible_cpu(cpu) {
struct iova_fq *fq;
@@ -117,6 +115,7 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
spin_lock_init(&fq->lock);
}
+ iovad->fq_domain = fq_domain;
iovad->fq = queue;
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
@@ -598,7 +597,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
static void iova_domain_flush(struct iova_domain *iovad)
{
atomic64_inc(&iovad->fq_flush_start_cnt);
- iovad->flush_cb(iovad);
+ iovad->fq_domain->ops->flush_iotlb_all(iovad->fq_domain);
atomic64_inc(&iovad->fq_flush_finish_cnt);
}