summaryrefslogtreecommitdiff
path: root/drivers/iommu/mtk_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/mtk_iommu.c')
-rw-r--r--drivers/iommu/mtk_iommu.c96
1 files changed, 67 insertions, 29 deletions
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index a2ec9003826c..6fd75a60abd6 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -210,33 +210,27 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
{
- for_each_m4u(data) {
- if (pm_runtime_get_if_in_use(data->dev) <= 0)
- continue;
-
- writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
- data->base + data->plat_data->inv_sel_reg);
- writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
- wmb(); /* Make sure the tlb flush all done */
+ unsigned long flags;
- pm_runtime_put(data->dev);
- }
+ spin_lock_irqsave(&data->tlb_lock, flags);
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
+ data->base + data->plat_data->inv_sel_reg);
+ writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
+ wmb(); /* Make sure the tlb flush all done */
+ spin_unlock_irqrestore(&data->tlb_lock, flags);
}
static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
size_t granule,
struct mtk_iommu_data *data)
{
- bool has_pm = !!data->dev->pm_domain;
unsigned long flags;
int ret;
u32 tmp;
for_each_m4u(data) {
- if (has_pm) {
- if (pm_runtime_get_if_in_use(data->dev) <= 0)
- continue;
- }
+ if (pm_runtime_get_if_in_use(data->dev) <= 0)
+ continue;
spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
@@ -252,17 +246,18 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
/* tlb sync */
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
tmp, tmp != 0, 10, 1000);
+
+ /* Clear the CPE status */
+ writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
+ spin_unlock_irqrestore(&data->tlb_lock, flags);
+
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
mtk_iommu_tlb_flush_all(data);
}
- /* Clear the CPE status */
- writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
- spin_unlock_irqrestore(&data->tlb_lock, flags);
- if (has_pm)
- pm_runtime_put(data->dev);
+ pm_runtime_put(data->dev);
}
}
@@ -562,22 +557,52 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data;
+ struct device_link *link;
+ struct device *larbdev;
+ unsigned int larbid, larbidx, i;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return ERR_PTR(-ENODEV); /* Not a iommu client device */
data = dev_iommu_priv_get(dev);
+ /*
+ * Link the consumer device with the smi-larb device(supplier).
+ * The device that connects with each a larb is a independent HW.
+ * All the ports in each a device should be in the same larbs.
+ */
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ for (i = 1; i < fwspec->num_ids; i++) {
+ larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]);
+ if (larbid != larbidx) {
+ dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
+ larbid, larbidx);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ larbdev = data->larb_imu[larbid].dev;
+ link = device_link_add(dev, larbdev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+ if (!link)
+ dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
return &data->iommu;
}
static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct mtk_iommu_data *data;
+ struct device *larbdev;
+ unsigned int larbid;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
+ data = dev_iommu_priv_get(dev);
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ larbdev = data->larb_imu[larbid].dev;
+ device_link_remove(dev, larbdev);
+
iommu_fwspec_free(dev);
}
@@ -658,15 +683,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
static const struct iommu_ops mtk_iommu_ops = {
.domain_alloc = mtk_iommu_domain_alloc,
- .domain_free = mtk_iommu_domain_free,
- .attach_dev = mtk_iommu_attach_device,
- .detach_dev = mtk_iommu_detach_device,
- .map = mtk_iommu_map,
- .unmap = mtk_iommu_unmap,
- .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
- .iotlb_sync = mtk_iommu_iotlb_sync,
- .iotlb_sync_map = mtk_iommu_sync_map,
- .iova_to_phys = mtk_iommu_iova_to_phys,
.probe_device = mtk_iommu_probe_device,
.release_device = mtk_iommu_release_device,
.device_group = mtk_iommu_device_group,
@@ -675,6 +691,17 @@ static const struct iommu_ops mtk_iommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = mtk_iommu_attach_device,
+ .detach_dev = mtk_iommu_detach_device,
+ .map = mtk_iommu_map,
+ .unmap = mtk_iommu_unmap,
+ .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
+ .iotlb_sync = mtk_iommu_iotlb_sync,
+ .iotlb_sync_map = mtk_iommu_sync_map,
+ .iova_to_phys = mtk_iommu_iova_to_phys,
+ .free = mtk_iommu_domain_free,
+ }
};
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
@@ -848,6 +875,10 @@ static int mtk_iommu_probe(struct platform_device *pdev)
plarbdev = of_find_device_by_node(larbnode);
if (!plarbdev) {
of_node_put(larbnode);
+ return -ENODEV;
+ }
+ if (!plarbdev->dev.driver) {
+ of_node_put(larbnode);
return -EPROBE_DEFER;
}
data->larb_imu[id].dev = &plarbdev->dev;
@@ -980,6 +1011,13 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR);
+
+ /*
+ * Users may allocate dma buffer before they call pm_runtime_get,
+ * in which case it will lack the necessary tlb flush.
+ * Thus, make sure to update the tlb after each PM resume.
+ */
+ mtk_iommu_tlb_flush_all(data);
return 0;
}