From 657135f3108122556c3cf60a78c6f0e76aeb60e6 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 17 Aug 2018 23:42:22 +0800 Subject: iommu/arm-smmu-v3: Fix a couple of minor comment typos Fix some comment typos spotted. Signed-off-by: John Garry Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 5059d09f3202..feef1226f705 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -708,7 +708,7 @@ static void queue_inc_prod(struct arm_smmu_queue *q) } /* - * Wait for the SMMU to consume items. If drain is true, wait until the queue + * Wait for the SMMU to consume items. If sync is true, wait until the queue * is empty. Otherwise, wait until there is at least one free slot. */ static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe) @@ -2353,8 +2353,8 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) irq = smmu->combined_irq; if (irq) { /* - * Cavium ThunderX2 implementation doesn't not support unique - * irq lines. Use single irq line for all the SMMUv3 interrupts. + * Cavium ThunderX2 implementation doesn't support unique irq + * lines. Use a single irq line for all the SMMUv3 interrupts. */ ret = devm_request_threaded_irq(smmu->dev, irq, arm_smmu_combined_irq_handler, -- cgit v1.2.3 From 85c7a0f1ef624ef58173ef52ea77780257bdfe04 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 6 Sep 2018 17:59:50 +0100 Subject: iommu/io-pgtable-arm: Fix race handling in split_blk_unmap() In removing the pagetable-wide lock, we gained the possibility of the vanishingly unlikely case where we have a race between two concurrent unmappers splitting the same block entry. The logic to handle this is fairly straightforward - whoever loses the race frees their partial next-level table and instead dereferences the winner's newly-installed entry in order to fall back to a regular unmap, which intentionally echoes the pre-existing case of recursively splitting a 1GB block down to 4KB pages by installing a full table of 2MB blocks first. Unfortunately, the chump who implemented that logic failed to update the condition check for that fallback, meaning that if said race occurs at the last level (where the loser's unmap_idx is valid) then the unmap won't actually happen. Fix that to properly account for both the race and recursive cases. Fixes: 2c3d273eabe8 ("iommu/io-pgtable-arm: Support lockless operation") Signed-off-by: Robin Murphy [will: re-jig control flow to avoid duplicate cmpxchg test] Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 88641b4560bc..2f79efd16a05 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -574,13 +574,12 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, return 0; tablep = iopte_deref(pte, data); + } else if (unmap_idx >= 0) { + io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); + return size; } - if (unmap_idx < 0) - return __arm_lpae_unmap(data, iova, size, lvl, tablep); - - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); - return size; + return __arm_lpae_unmap(data, iova, size, lvl, tablep); } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, -- cgit v1.2.3 From 0f02477d16980938a84aba8688a4e3a303306116 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Sun, 19 Aug 2018 15:51:10 +0800 Subject: iommu/arm-smmu-v3: Fix unexpected CMD_SYNC timeout The condition break condition of: (int)(VAL - sync_idx) >= 0 in the __arm_smmu_sync_poll_msi() polling loop requires that sync_idx must be increased monotonically according to the sequence of the CMDs in the cmdq. However, since the msidata is populated using atomic_inc_return_relaxed() before taking the command-queue spinlock, then the following scenario can occur: CPU0 CPU1 msidata=0 msidata=1 insert cmd1 insert cmd0 smmu execute cmd1 smmu execute cmd0 poll timeout, because msidata=1 is overridden by cmd0, that means VAL=0, sync_idx=1. This is not a functional problem, since the caller will eventually either timeout or exit due to another CMD_SYNC, however it's clearly not what the code is supposed to be doing. Fix it, by incrementing the sequence count with the command-queue lock held, allowing us to drop the atomic operations altogether. Signed-off-by: Zhen Lei [will: dropped the specialised cmd building routine for now] Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index feef1226f705..48baffd07aef 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -567,7 +567,7 @@ struct arm_smmu_device { int gerr_irq; int combined_irq; - atomic_t sync_nr; + u32 sync_nr; unsigned long ias; /* IPA */ unsigned long oas; /* PA */ @@ -948,14 +948,13 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu) struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC, .sync = { - .msidata = atomic_inc_return_relaxed(&smmu->sync_nr), .msiaddr = virt_to_phys(&smmu->sync_count), }, }; - arm_smmu_cmdq_build_cmd(cmd, &ent); - spin_lock_irqsave(&smmu->cmdq.lock, flags); + ent.sync.msidata = ++smmu->sync_nr; + arm_smmu_cmdq_build_cmd(cmd, &ent); arm_smmu_cmdq_insert_cmd(smmu, cmd); spin_unlock_irqrestore(&smmu->cmdq.lock, flags); @@ -2180,7 +2179,6 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu) { int ret; - atomic_set(&smmu->sync_nr, 0); ret = arm_smmu_init_queues(smmu); if (ret) return ret; -- cgit v1.2.3 From 901510ee32f7190902f6fe4affb463e5d86a804c Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Sun, 19 Aug 2018 15:51:11 +0800 Subject: iommu/arm-smmu-v3: Avoid back-to-back CMD_SYNC operations Putting adjacent CMD_SYNCs into the command queue is nonsensical, but can happen when multiple CPUs are inserting commands. Rather than leave the poor old hardware to chew through these operations, we can instead drop the subsequent SYNCs and poll for completion of the first. This has been shown to improve IO performance under pressure, where the number of SYNC operations reduces by about a third: CMD_SYNCs reduced: 19542181 CMD_SYNCs total: 58098548 (include reduced) CMDs total: 116197099 (TLBI:SYNC about 1:1) Signed-off-by: Zhen Lei Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 48baffd07aef..e395f1ff3f81 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -568,6 +568,7 @@ struct arm_smmu_device { int gerr_irq; int combined_irq; u32 sync_nr; + u8 prev_cmd_opcode; unsigned long ias; /* IPA */ unsigned long oas; /* PA */ @@ -901,6 +902,8 @@ static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd) struct arm_smmu_queue *q = &smmu->cmdq.q; bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]); + while (queue_insert_raw(q, cmd) == -ENOSPC) { if (queue_poll_cons(q, false, wfe)) dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); @@ -953,9 +956,16 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu) }; spin_lock_irqsave(&smmu->cmdq.lock, flags); - ent.sync.msidata = ++smmu->sync_nr; - arm_smmu_cmdq_build_cmd(cmd, &ent); - arm_smmu_cmdq_insert_cmd(smmu, cmd); + + /* Piggy-back on the previous command if it's a SYNC */ + if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) { + ent.sync.msidata = smmu->sync_nr; + } else { + ent.sync.msidata = ++smmu->sync_nr; + arm_smmu_cmdq_build_cmd(cmd, &ent); + arm_smmu_cmdq_insert_cmd(smmu, cmd); + } + spin_unlock_irqrestore(&smmu->cmdq.lock, flags); return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata); -- cgit v1.2.3 From 07fdef34d2be6811f00c6f9e4e2a1483cf86696c Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Thu, 20 Sep 2018 17:10:21 +0100 Subject: iommu/arm-smmu-v3: Implement flush_iotlb_all hook .flush_iotlb_all is currently stubbed to arm_smmu_iotlb_sync() since the only time it would ever need to actually do anything is for callers doing their own explicit batching, e.g.: iommu_unmap_fast(domain, ...); iommu_unmap_fast(domain, ...); iommu_iotlb_flush_all(domain, ...); where since io-pgtable still issues the TLBI commands implicitly in the unmap instead of implementing .iotlb_range_add, the "flush" only needs to ensure completion of those already-in-flight invalidations. However, we're about to start using it in anger with flush queues, so let's get a proper implementation wired up. Signed-off-by: Zhen Lei Reviewed-by: Robin Murphy [rm: document why it wasn't a bug] Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index e395f1ff3f81..f10c852479fc 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1781,6 +1781,14 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) return ops->unmap(ops, iova, size); } +static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + if (smmu_domain->smmu) + arm_smmu_tlb_inv_context(smmu_domain); +} + static void arm_smmu_iotlb_sync(struct iommu_domain *domain) { struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; @@ -2008,7 +2016,7 @@ static struct iommu_ops arm_smmu_ops = { .attach_dev = arm_smmu_attach_dev, .map = arm_smmu_map, .unmap = arm_smmu_unmap, - .flush_iotlb_all = arm_smmu_iotlb_sync, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, .add_device = arm_smmu_add_device, -- cgit v1.2.3 From 7d321bd3542500caf125249f44dc37cb4e738013 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 1 Oct 2018 12:42:49 +0100 Subject: iommu/arm-smmu: Ensure that page-table updates are visible before TLBI The IO-pgtable code relies on the driver TLB invalidation callbacks to ensure that all page-table updates are visible to the IOMMU page-table walker. In the case that the page-table walker is cache-coherent, we cannot rely on an implicit DSB from the DMA-mapping code, so we must ensure that we execute a DSB in our tlb_add_flush() callback prior to triggering the invalidation. Cc: Cc: Robin Murphy Fixes: 2df7a25ce4a7 ("iommu/arm-smmu: Clean up DMA API usage") Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index fd1b80ef9490..e7cbf4fcf61d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -469,6 +469,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); + if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + if (stage1) { reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; @@ -510,6 +513,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, struct arm_smmu_domain *smmu_domain = cookie; void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu); + if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); } -- cgit v1.2.3 From 2da274cdf998a1c12afa6b5975db2df1df01edf1 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Thu, 20 Sep 2018 17:10:22 +0100 Subject: iommu/dma: Add support for non-strict mode With the flush queue infrastructure already abstracted into IOVA domains, hooking it up in iommu-dma is pretty simple. Since there is a degree of dependency on the IOMMU driver knowing what to do to play along, we key the whole thing off a domain attribute which will be set on default DMA ops domains to request non-strict invalidation. That way, drivers can indicate the appropriate support by acknowledging the attribute, and we can easily fall back to strict invalidation otherwise. The flush queue callback needs a handle on the iommu_domain which owns our cookie, so we have to add a pointer back to that, but neatly, that's also sufficient to indicate whether we're using a flush queue or not, and thus which way to release IOVAs. The only slight subtlety is switching __iommu_dma_unmap() from calling iommu_unmap() to explicit iommu_unmap_fast()/iommu_tlb_sync() so that we can elide the sync entirely in non-strict mode. Signed-off-by: Zhen Lei [rm: convert to domain attribute, tweak comments and commit message] Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/dma-iommu.c | 32 +++++++++++++++++++++++++++++++- include/linux/iommu.h | 1 + 2 files changed, 32 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 511ff9a1d6d9..cc1bf786cfac 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -55,6 +55,9 @@ struct iommu_dma_cookie { }; struct list_head msi_page_list; spinlock_t msi_lock; + + /* Domain for flush queue callback; NULL if flush queue not in use */ + struct iommu_domain *fq_domain; }; static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) @@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev, return ret; } +static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) +{ + struct iommu_dma_cookie *cookie; + struct iommu_domain *domain; + + cookie = container_of(iovad, struct iommu_dma_cookie, iovad); + domain = cookie->fq_domain; + /* + * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE + * implies that ops->flush_iotlb_all must be non-NULL. + */ + domain->ops->flush_iotlb_all(domain); +} + /** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() @@ -275,6 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; unsigned long order, base_pfn, end_pfn; + int attr; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -308,6 +326,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } init_iova_domain(iovad, 1UL << order, base_pfn); + + if (!cookie->fq_domain && !iommu_domain_get_attr(domain, + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { + cookie->fq_domain = domain; + init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); + } + if (!dev) return 0; @@ -393,6 +418,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, /* The MSI case is only ever cleaning up its most recent allocation */ if (cookie->type == IOMMU_DMA_MSI_COOKIE) cookie->msi_iova -= size; + else if (cookie->fq_domain) /* non-strict mode */ + queue_iova(iovad, iova_pfn(iovad, iova), + size >> iova_shift(iovad), 0); else free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad)); @@ -408,7 +436,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, dma_addr -= iova_off; size = iova_align(iovad, size + iova_off); - WARN_ON(iommu_unmap(domain, dma_addr, size) != size); + WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size); + if (!cookie->fq_domain) + iommu_tlb_sync(domain); iommu_dma_free_iova(cookie, dma_addr, size); } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 87994c265bf5..decabe8e8dbe 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -124,6 +124,7 @@ enum iommu_attr { DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_FSL_PAMUV1, DOMAIN_ATTR_NESTING, /* two stages of translation */ + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_MAX, }; -- cgit v1.2.3 From 68a6efe86f6a16e25556a2aff40efad41097b486 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Thu, 20 Sep 2018 17:10:23 +0100 Subject: iommu: Add "iommu.strict" command line option Add a generic command line option to enable lazy unmapping via IOVA flush queues, which will initally be suuported by iommu-dma. This echoes the semantics of "intel_iommu=strict" (albeit with the opposite default value), but in the driver-agnostic fashion of "iommu.passthrough". Signed-off-by: Zhen Lei [rm: move handling out of SMMUv3 driver, clean up documentation] Signed-off-by: Robin Murphy [will: dropped broken printk when parsing command-line option] Signed-off-by: Will Deacon --- Documentation/admin-guide/kernel-parameters.txt | 12 ++++++++++++ drivers/iommu/iommu.c | 14 ++++++++++++++ 2 files changed, 26 insertions(+) (limited to 'drivers/iommu') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 9871e649ffef..92ae12aeabf4 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1749,6 +1749,18 @@ nobypass [PPC/POWERNV] Disable IOMMU bypass, using IOMMU for PCI devices. + iommu.strict= [ARM64] Configure TLB invalidation behaviour + Format: { "0" | "1" } + 0 - Lazy mode. + Request that DMA unmap operations use deferred + invalidation of hardware TLBs, for increased + throughput at the cost of reduced device isolation. + Will fall back to strict mode if not supported by + the relevant IOMMU driver. + 1 - Strict mode (default). + DMA unmap operations invalidate IOMMU hardware TLBs + synchronously. + iommu.passthrough= [ARM64] Configure DMA to bypass the IOMMU by default. Format: { "0" | "1" } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8c15c5980299..2b6dad2aa9f1 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -41,6 +41,7 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; #else static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; #endif +static bool iommu_dma_strict __read_mostly = true; struct iommu_callback_data { const struct iommu_ops *ops; @@ -131,6 +132,12 @@ static int __init iommu_set_def_domain_type(char *str) } early_param("iommu.passthrough", iommu_set_def_domain_type); +static int __init iommu_dma_setup(char *str) +{ + return kstrtobool(str, &iommu_dma_strict); +} +early_param("iommu.strict", iommu_dma_setup); + static ssize_t iommu_group_attr_show(struct kobject *kobj, struct attribute *__attr, char *buf) { @@ -1072,6 +1079,13 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) group->default_domain = dom; if (!group->domain) group->domain = dom; + + if (dom && !iommu_dma_strict) { + int attr = 1; + iommu_domain_set_attr(dom, + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, + &attr); + } } ret = iommu_group_add_device(group, dev); -- cgit v1.2.3 From b6b65ca20bc93d14319f9b5cf98fd3c19a4244e3 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Thu, 20 Sep 2018 17:10:24 +0100 Subject: iommu/io-pgtable-arm: Add support for non-strict mode Non-strict mode is simply a case of skipping 'regular' leaf TLBIs, since the sync is already factored out into ops->iotlb_sync at the core API level. Non-leaf invalidations where we change the page table structure itself still have to be issued synchronously in order to maintain walk caches correctly. To save having to reason about it too much, make sure the invalidation in arm_lpae_split_blk_unmap() just performs its own unconditional sync to minimise the window in which we're technically violating the break- before-make requirement on a live mapping. This might work out redundant with an outer-level sync for strict unmaps, but we'll never be splitting blocks on a DMA fastpath anyway. Signed-off-by: Zhen Lei [rm: tweak comment, commit message, split_blk_unmap logic and barriers] Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 14 ++++++++++++-- drivers/iommu/io-pgtable.h | 5 +++++ 2 files changed, 17 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 2f79efd16a05..237cacd4a62b 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -576,6 +576,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, tablep = iopte_deref(pte, data); } else if (unmap_idx >= 0) { io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); + io_pgtable_tlb_sync(&data->iop); return size; } @@ -609,6 +610,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, io_pgtable_tlb_sync(iop); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); + } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { + /* + * Order the PTE update against queueing the IOVA, to + * guarantee that a flush callback from a different CPU + * has observed it before the TLBIALL can be issued. + */ + smp_wmb(); } else { io_pgtable_tlb_add_flush(iop, iova, size, size, true); } @@ -771,7 +779,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) u64 reg; struct arm_lpae_io_pgtable *data; - if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA)) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA | + IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -863,7 +872,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) struct arm_lpae_io_pgtable *data; /* The NS quirk doesn't apply at stage 2 */ - if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA | + IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; data = arm_lpae_alloc_pgtable(cfg); diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 2df79093cad9..47d5ae559329 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -71,12 +71,17 @@ struct io_pgtable_cfg { * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a * software-emulated IOMMU), such that pagetable updates need not * be treated as explicit DMA data. + * + * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs + * on unmap, for DMA domains using the flush queue mechanism for + * delayed invalidation. */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) #define IO_PGTABLE_QUIRK_NO_DMA BIT(4) + #define IO_PGTABLE_QUIRK_NON_STRICT BIT(5) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; -- cgit v1.2.3 From 9662b99a19abccb0b7bfc91abb3fec1447c35bf0 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Thu, 20 Sep 2018 17:10:25 +0100 Subject: iommu/arm-smmu-v3: Add support for non-strict mode Now that io-pgtable knows how to dodge strict TLB maintenance, all that's left to do is bridge the gap between the IOMMU core requesting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE for default domains, and showing the appropriate IO_PGTABLE_QUIRK_NON_STRICT flag to alloc_io_pgtable_ops(). Signed-off-by: Zhen Lei [rm: convert to domain attribute, tweak commit message] Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 79 ++++++++++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 23 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index f10c852479fc..db402e8b068b 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -612,6 +612,7 @@ struct arm_smmu_domain { struct mutex init_mutex; /* Protects smmu pointer */ struct io_pgtable_ops *pgtbl_ops; + bool non_strict; enum arm_smmu_domain_stage stage; union { @@ -1407,6 +1408,12 @@ static void arm_smmu_tlb_inv_context(void *cookie) cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; } + /* + * NOTE: when io-pgtable is in non-strict mode, we may get here with + * PTEs previously cleared by unmaps on the current CPU not yet visible + * to the SMMU. We are relying on the DSB implicit in queue_inc_prod() + * to guarantee those are observed before the TLBI. Do be careful, 007. + */ arm_smmu_cmdq_issue_cmd(smmu, &cmd); __arm_smmu_tlb_sync(smmu); } @@ -1633,6 +1640,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) if (smmu->features & ARM_SMMU_FEAT_COHERENCY) pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA; + if (smmu_domain->non_strict) + pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; + pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) return -ENOMEM; @@ -1934,15 +1944,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; - - switch (attr) { - case DOMAIN_ATTR_NESTING: - *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); - return 0; + switch (domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + switch (attr) { + case DOMAIN_ATTR_NESTING: + *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); + return 0; + default: + return -ENODEV; + } + break; + case IOMMU_DOMAIN_DMA: + switch (attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + *(int *)data = smmu_domain->non_strict; + return 0; + default: + return -ENODEV; + } + break; default: - return -ENODEV; + return -EINVAL; } } @@ -1952,26 +1974,37 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; - mutex_lock(&smmu_domain->init_mutex); - switch (attr) { - case DOMAIN_ATTR_NESTING: - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; + switch (domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + switch (attr) { + case DOMAIN_ATTR_NESTING: + if (smmu_domain->smmu) { + ret = -EPERM; + goto out_unlock; + } + + if (*(int *)data) + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + else + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + break; + default: + ret = -ENODEV; + } + break; + case IOMMU_DOMAIN_DMA: + switch(attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + smmu_domain->non_strict = *(int *)data; + break; + default: + ret = -ENODEV; } - - if (*(int *)data) - smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - break; default: - ret = -ENODEV; + ret = -EINVAL; } out_unlock: -- cgit v1.2.3 From b2dfeba654cb08db327d0ed4547b66c2f8fce997 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 20 Sep 2018 17:10:26 +0100 Subject: iommu/io-pgtable-arm-v7s: Add support for non-strict mode As for LPAE, it's simply a case of skipping the leaf invalidation for a regular unmap, and ensuring that the one in split_blk_unmap() is paired with an explicit sync ASAP rather than relying on one which might only eventually happen way down the line. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index b5948ba6b3b3..445c3bde0480 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -587,6 +587,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, } io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); + io_pgtable_tlb_sync(&data->iop); return size; } @@ -642,6 +643,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, io_pgtable_tlb_sync(iop); ptep = iopte_deref(pte[i], lvl); __arm_v7s_free_table(ptep, lvl + 1, data); + } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { + /* + * Order the PTE update against queueing the IOVA, to + * guarantee that a flush callback from a different CPU + * has observed it before the TLBIALL can be issued. + */ + smp_wmb(); } else { io_pgtable_tlb_add_flush(iop, iova, blk_size, blk_size, true); @@ -712,7 +720,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_TLBI_ON_MAP | IO_PGTABLE_QUIRK_ARM_MTK_4GB | - IO_PGTABLE_QUIRK_NO_DMA)) + IO_PGTABLE_QUIRK_NO_DMA | + IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ -- cgit v1.2.3 From 44f6876a00e83df5fd28681502b19b0f51e4a3c6 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 20 Sep 2018 17:10:27 +0100 Subject: iommu/arm-smmu: Support non-strict mode All we need is to wire up .flush_iotlb_all properly and implement the domain attribute, and iommu-dma and io-pgtable will do the rest for us. The only real subtlety is documenting the barrier semantics we're introducing between io-pgtable and the drivers for non-strict flushes. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 93 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 66 insertions(+), 27 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e7cbf4fcf61d..1030027cbcc6 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -246,6 +246,7 @@ struct arm_smmu_domain { const struct iommu_gather_ops *tlb_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; + bool non_strict; struct mutex init_mutex; /* Protects smmu pointer */ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ struct iommu_domain domain; @@ -447,7 +448,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie) struct arm_smmu_cfg *cfg = &smmu_domain->cfg; void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); - writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); + /* + * NOTE: this is not a relaxed write; it needs to guarantee that PTEs + * cleared by the current CPU are visible to the SMMU before the TLBI. + */ + writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); arm_smmu_tlb_sync_context(cookie); } @@ -457,7 +462,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *base = ARM_SMMU_GR0(smmu); - writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); + /* NOTE: see above */ + writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); arm_smmu_tlb_sync_global(smmu); } @@ -869,6 +875,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA; + if (smmu_domain->non_strict) + pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; + smmu_domain->smmu = smmu; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) { @@ -1258,6 +1267,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, return ops->unmap(ops, iova, size); } +static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + if (smmu_domain->tlb_ops) + smmu_domain->tlb_ops->tlb_flush_all(smmu_domain); +} + static void arm_smmu_iotlb_sync(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); @@ -1476,15 +1493,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; - - switch (attr) { - case DOMAIN_ATTR_NESTING: - *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); - return 0; + switch(domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + switch (attr) { + case DOMAIN_ATTR_NESTING: + *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); + return 0; + default: + return -ENODEV; + } + break; + case IOMMU_DOMAIN_DMA: + switch (attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + *(int *)data = smmu_domain->non_strict; + return 0; + default: + return -ENODEV; + } + break; default: - return -ENODEV; + return -EINVAL; } } @@ -1494,28 +1523,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; - mutex_lock(&smmu_domain->init_mutex); - switch (attr) { - case DOMAIN_ATTR_NESTING: - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; + switch(domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + switch (attr) { + case DOMAIN_ATTR_NESTING: + if (smmu_domain->smmu) { + ret = -EPERM; + goto out_unlock; + } + + if (*(int *)data) + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + else + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + break; + default: + ret = -ENODEV; + } + break; + case IOMMU_DOMAIN_DMA: + switch (attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + smmu_domain->non_strict = *(int *)data; + break; + default: + ret = -ENODEV; } - - if (*(int *)data) - smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - break; default: - ret = -ENODEV; + ret = -EINVAL; } - out_unlock: mutex_unlock(&smmu_domain->init_mutex); return ret; @@ -1568,7 +1607,7 @@ static struct iommu_ops arm_smmu_ops = { .attach_dev = arm_smmu_attach_dev, .map = arm_smmu_map, .unmap = arm_smmu_unmap, - .flush_iotlb_all = arm_smmu_iotlb_sync, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, .add_device = arm_smmu_add_device, -- cgit v1.2.3