From 791c2b17fb4023f21c3cbf5f268af01d9b8cb7cc Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 13 Apr 2023 14:40:25 +0100 Subject: iommu: Optimise PCI SAC address trick Per the reasoning in commit 4bf7fda4dce2 ("iommu/dma: Add config for PCI SAC address trick") and its subsequent revert, this mechanism no longer serves its original purpose, but now only works around broken hardware/drivers in a way that is unfortunately too impactful to remove. This does not, however, prevent us from solving the performance impact which that workaround has on large-scale systems that don't need it. Once the 32-bit IOVA space fills up and a workload starts allocating and freeing on both sides of the boundary, the opportunistic SAC allocation can then end up spending significant time hunting down scattered fragments of free 32-bit space, or just reestablishing max32_alloc_size. This can easily be exacerbated by a change in allocation pattern, such as by changing the network MTU, which can increase pressure on the 32-bit space by leaving a large quantity of cached IOVAs which are now the wrong size to be recycled, but also won't be freed since the non-opportunistic allocations can still be satisfied from the whole 64-bit space without triggering the reclaim path. However, in the context of a workaround where smaller DMA addresses aren't simply a preference but a necessity, if we get to that point at all then in fact it's already the endgame. The nature of the allocator is currently such that the first IOVA we give to a device after the 32-bit space runs out will be the highest possible address for that device, ever. If that works, then great, we know we can optimise for speed by always allocating from the full range. And if it doesn't, then the worst has already happened and any brokenness is now showing, so there's little point in continuing to try to hide it. To that end, implement a flag to refine the SAC business into a per-device policy that can automatically get itself out of the way if and when it stops being useful. CC: Linus Torvalds CC: Jakub Kicinski Reviewed-by: John Garry Signed-off-by: Robin Murphy Tested-by: Vasant Hegde Tested-by: Jakub Kicinski Link: https://lore.kernel.org/r/b8502b115b915d2a3fabde367e099e39106686c8.1681392791.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux/iommu.h') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index d31642596675..b1dcb1b9b170 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -409,6 +409,7 @@ struct iommu_fault_param { * @priv: IOMMU Driver private data * @max_pasids: number of PASIDs this device can consume * @attach_deferred: the dma domain attachment is deferred + * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. * struct iommu_group *iommu_group; @@ -422,6 +423,7 @@ struct dev_iommu { void *priv; u32 max_pasids; u32 attach_deferred:1; + u32 pci_32bit_workaround:1; }; int iommu_device_register(struct iommu_device *iommu, -- cgit v1.2.3 From 4298780126c298f20ae4bc8676591eaf8c48767e Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Wed, 9 Aug 2023 20:47:54 +0800 Subject: iommu: Generalize PASID 0 for normal DMA w/o PASID PCIe Process address space ID (PASID) is used to tag DMA traffic, it provides finer grained isolation than requester ID (RID). For each device/RID, 0 is a special PASID for the normal DMA (no PASID). This is universal across all architectures that supports PASID, therefore warranted to be reserved globally and declared in the common header. Consequently, we can avoid the conflict between different PASID use cases in the generic code. e.g. SVA and DMA API with PASIDs. This paved away for device drivers to choose global PASID policy while continue doing normal DMA. Noting that VT-d could support none-zero RID/NO_PASID, but currently not used. Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Reviewed-by: Jean-Philippe Brucker Reviewed-by: Jason Gunthorpe Signed-off-by: Jacob Pan Link: https://lore.kernel.org/r/20230802212427.1497170-2-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 2 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 16 ++++++++-------- drivers/iommu/intel/iommu.c | 24 ++++++++++++------------ drivers/iommu/intel/pasid.c | 2 +- drivers/iommu/intel/pasid.h | 2 -- include/linux/iommu.h | 1 + 6 files changed, 23 insertions(+), 24 deletions(-) (limited to 'include/linux/iommu.h') diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index a5a63b1c947e..5e6b39881c04 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -80,7 +80,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid) * be some overlap between use of both ASIDs, until we invalidate the * TLB. */ - arm_smmu_write_ctx_desc(smmu_domain, 0, cd); + arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd); /* Invalidate TLB entries previously associated with that context */ arm_smmu_tlb_inv_asid(smmu, asid); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 9b0dc3505601..ee70687f060b 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1059,7 +1059,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid, /* * This function handles the following cases: * - * (1) Install primary CD, for normal DMA traffic (SSID = 0). + * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0). * (2) Install a secondary CD, for SID+SSID traffic. * (3) Update ASID of a CD. Atomically write the first 64 bits of the * CD, then invalidate the old entry and mappings. @@ -1607,7 +1607,7 @@ static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt) sid = FIELD_GET(PRIQ_0_SID, evt[0]); ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]); - ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0; + ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : IOMMU_NO_PASID; last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]); grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]); @@ -1748,7 +1748,7 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, */ *cmd = (struct arm_smmu_cmdq_ent) { .opcode = CMDQ_OP_ATC_INV, - .substream_valid = !!ssid, + .substream_valid = (ssid != IOMMU_NO_PASID), .atc.ssid = ssid, }; @@ -1795,7 +1795,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_batch cmds; - arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); + arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd); cmds.num = 0; for (i = 0; i < master->num_streams; i++) { @@ -1875,7 +1875,7 @@ static void arm_smmu_tlb_inv_context(void *cookie) cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); } - arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); + arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); } static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, @@ -1968,7 +1968,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, * Unfortunately, this can't be leaf-only since we may have * zapped an entire table. */ - arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size); + arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size); } void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, @@ -2142,7 +2142,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, * the master has been added to the devices list for this domain. * This isn't an issue because the STE hasn't been installed yet. */ - ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd); + ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd); if (ret) goto out_free_cd_tables; @@ -2328,7 +2328,7 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master) pdev = to_pci_dev(master->dev); atomic_inc(&smmu_domain->nr_ats_masters); - arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); + arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); if (pci_enable_ats(pdev, stu)) dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); } diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 5c8c5cdc36cf..ddff43def3ab 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -877,7 +877,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, } /* For request-without-pasid, get the pasid from context entry */ if (intel_iommu_sm && pasid == IOMMU_PASID_INVALID) - pasid = PASID_RID2PASID; + pasid = IOMMU_NO_PASID; dir_index = pasid >> PASID_PDE_SHIFT; pde = &dir[dir_index]; @@ -1449,7 +1449,7 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info, qdep = info->ats_qdep; qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, qdep, addr, mask); - quirk_extra_dev_tlb_flush(info, addr, mask, PASID_RID2PASID, qdep); + quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep); } static void iommu_flush_dev_iotlb(struct dmar_domain *domain, @@ -1484,7 +1484,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, ih = 1 << 6; if (domain->use_first_level) { - qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); + qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, pages, ih); } else { unsigned long bitmask = aligned_pages - 1; @@ -1554,7 +1554,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) u16 did = domain_id_iommu(dmar_domain, iommu); if (dmar_domain->use_first_level) - qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0); + qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, 0, -1, 0); else iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); @@ -1940,7 +1940,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, context_pdts(pds); /* Setup the RID_PASID field: */ - context_set_sm_rid2pasid(context, PASID_RID2PASID); + context_set_sm_rid2pasid(context, IOMMU_NO_PASID); /* * Setup the Device-TLB enable bit and Page request @@ -2420,13 +2420,13 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, /* Setup the PASID entry for requests without PASID: */ if (hw_pass_through && domain_type_is_si(domain)) ret = intel_pasid_setup_pass_through(iommu, domain, - dev, PASID_RID2PASID); + dev, IOMMU_NO_PASID); else if (domain->use_first_level) ret = domain_setup_first_level(iommu, domain, dev, - PASID_RID2PASID); + IOMMU_NO_PASID); else ret = intel_pasid_setup_second_level(iommu, domain, - dev, PASID_RID2PASID); + dev, IOMMU_NO_PASID); if (ret) { dev_err(dev, "Setup RID2PASID failed\n"); device_block_translation(dev); @@ -3968,7 +3968,7 @@ static void dmar_remove_one_dev_info(struct device *dev) if (!dev_is_real_dma_subdevice(info->dev)) { if (dev_is_pci(info->dev) && sm_supported(iommu)) intel_pasid_tear_down_entry(iommu, info->dev, - PASID_RID2PASID, false); + IOMMU_NO_PASID, false); iommu_disable_pci_caps(info); domain_context_clear(info); @@ -3997,7 +3997,7 @@ static void device_block_translation(struct device *dev) if (!dev_is_real_dma_subdevice(dev)) { if (sm_supported(iommu)) intel_pasid_tear_down_entry(iommu, dev, - PASID_RID2PASID, false); + IOMMU_NO_PASID, false); else domain_context_clear(info); } @@ -4331,7 +4331,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain) list_for_each_entry(info, &domain->devices, link) intel_pasid_setup_page_snoop_control(info->iommu, info->dev, - PASID_RID2PASID); + IOMMU_NO_PASID); } static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain) @@ -4987,7 +4987,7 @@ void quirk_extra_dev_tlb_flush(struct device_domain_info *info, return; sid = PCI_DEVID(info->bus, info->devfn); - if (pasid == PASID_RID2PASID) { + if (pasid == IOMMU_NO_PASID) { qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, qdep, address, mask); } else { diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index c5d479770e12..23dca3bc319d 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -438,7 +438,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu, * SVA usage, device could do DMA with multiple PASIDs. It is more * efficient to flush devTLB specific to the PASID. */ - if (pasid == PASID_RID2PASID) + if (pasid == IOMMU_NO_PASID) qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); else qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index d6b7d21244b1..4e9e68c3c388 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -10,8 +10,6 @@ #ifndef __INTEL_PASID_H #define __INTEL_PASID_H -#define PASID_RID2PASID 0x0 -#define PASID_MIN 0x1 #define PASID_MAX 0x100000 #define PASID_PTE_MASK 0x3F #define PASID_PTE_PRESENT 1 diff --git a/include/linux/iommu.h b/include/linux/iommu.h index d31642596675..2870bc29d456 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -196,6 +196,7 @@ enum iommu_dev_features { IOMMU_DEV_FEAT_IOPF, }; +#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */ #define IOMMU_PASID_INVALID (-1U) typedef unsigned int ioasid_t; -- cgit v1.2.3 From 2dcebc7ddce7ffd4015824227c7623558b89d721 Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Wed, 9 Aug 2023 20:47:55 +0800 Subject: iommu: Move global PASID allocation from SVA to core Intel ENQCMD requires a single PASID to be shared between multiple devices, as the PASID is stored in a single MSR register per-process and userspace can use only that one PASID. This means that the PASID allocation for any ENQCMD using device driver must always come from a shared global pool, regardless of what kind of domain the PASID will be used with. Split the code for the global PASID allocator into iommu_alloc/free_global_pasid() so that drivers can attach non-SVA domains to PASIDs as well. This patch moves global PASID allocation APIs from SVA to IOMMU APIs. Reserved PASIDs, currently only RID_PASID, are excluded from the global PASID allocation. It is expected that device drivers will use the allocated PASIDs to attach to appropriate IOMMU domains for use. Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Reviewed-by: Jason Gunthorpe Signed-off-by: Jacob Pan Link: https://lore.kernel.org/r/20230802212427.1497170-3-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/iommu-sva.c | 29 ++++++++++------------------- drivers/iommu/iommu.c | 28 ++++++++++++++++++++++++++++ include/linux/iommu.h | 10 ++++++++++ 3 files changed, 48 insertions(+), 19 deletions(-) (limited to 'include/linux/iommu.h') diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index 05c0fb2acbc4..b78671a8a914 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -10,34 +10,30 @@ #include "iommu-sva.h" static DEFINE_MUTEX(iommu_sva_lock); -static DEFINE_IDA(iommu_global_pasid_ida); /* Allocate a PASID for the mm within range (inclusive) */ -static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) +static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev) { + ioasid_t pasid; int ret = 0; - if (min == IOMMU_PASID_INVALID || - max == IOMMU_PASID_INVALID || - min == 0 || max < min) - return -EINVAL; - if (!arch_pgtable_dma_compat(mm)) return -EBUSY; mutex_lock(&iommu_sva_lock); /* Is a PASID already associated with this mm? */ if (mm_valid_pasid(mm)) { - if (mm->pasid < min || mm->pasid > max) + if (mm->pasid >= dev->iommu->max_pasids) ret = -EOVERFLOW; goto out; } - ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL); - if (ret < 0) + pasid = iommu_alloc_global_pasid(dev); + if (pasid == IOMMU_PASID_INVALID) { + ret = -ENOSPC; goto out; - - mm->pasid = ret; + } + mm->pasid = pasid; ret = 0; out: mutex_unlock(&iommu_sva_lock); @@ -64,15 +60,10 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm { struct iommu_domain *domain; struct iommu_sva *handle; - ioasid_t max_pasids; int ret; - max_pasids = dev->iommu->max_pasids; - if (!max_pasids) - return ERR_PTR(-EOPNOTSUPP); - /* Allocate mm->pasid if necessary. */ - ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1); + ret = iommu_sva_alloc_pasid(mm, dev); if (ret) return ERR_PTR(ret); @@ -217,5 +208,5 @@ void mm_pasid_drop(struct mm_struct *mm) if (likely(!mm_valid_pasid(mm))) return; - ida_free(&iommu_global_pasid_ida, mm->pasid); + iommu_free_global_pasid(mm->pasid); } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index caaf563d38ae..d199d144460c 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -39,6 +39,7 @@ static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); +static DEFINE_IDA(iommu_global_pasid_ida); static unsigned int iommu_def_domain_type __read_mostly; static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); @@ -3400,3 +3401,30 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, return domain; } + +ioasid_t iommu_alloc_global_pasid(struct device *dev) +{ + int ret; + + /* max_pasids == 0 means that the device does not support PASID */ + if (!dev->iommu->max_pasids) + return IOMMU_PASID_INVALID; + + /* + * max_pasids is set up by vendor driver based on number of PASID bits + * supported but the IDA allocation is inclusive. + */ + ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID, + dev->iommu->max_pasids - 1, GFP_KERNEL); + return ret < 0 ? IOMMU_PASID_INVALID : ret; +} +EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid); + +void iommu_free_global_pasid(ioasid_t pasid) +{ + if (WARN_ON(pasid == IOMMU_PASID_INVALID)) + return; + + ida_free(&iommu_global_pasid_ida, pasid); +} +EXPORT_SYMBOL_GPL(iommu_free_global_pasid); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2870bc29d456..6b821cb715d5 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -197,6 +197,7 @@ enum iommu_dev_features { }; #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */ +#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */ #define IOMMU_PASID_INVALID (-1U) typedef unsigned int ioasid_t; @@ -728,6 +729,8 @@ void iommu_detach_device_pasid(struct iommu_domain *domain, struct iommu_domain * iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, unsigned int type); +ioasid_t iommu_alloc_global_pasid(struct device *dev); +void iommu_free_global_pasid(ioasid_t pasid); #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; @@ -1089,6 +1092,13 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, { return NULL; } + +static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) +{ + return IOMMU_PASID_INVALID; +} + +static inline void iommu_free_global_pasid(ioasid_t pasid) {} #endif /* CONFIG_IOMMU_API */ /** -- cgit v1.2.3 From a48ce36e2786f8bb33e9f42ea2d0c23ea4af3e0d Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 9 Aug 2023 20:48:02 +0800 Subject: iommu: Prevent RESV_DIRECT devices from blocking domains The IOMMU_RESV_DIRECT flag indicates that a memory region must be mapped 1:1 at all times. This means that the region must always be accessible to the device, even if the device is attached to a blocking domain. This is equal to saying that IOMMU_RESV_DIRECT flag prevents devices from being attached to blocking domains. This also implies that devices that implement RESV_DIRECT regions will be prevented from being assigned to user space since taking the DMA ownership immediately switches to a blocking domain. The rule of preventing devices with the IOMMU_RESV_DIRECT regions from being assigned to user space has existed in the Intel IOMMU driver for a long time. Now, this rule is being lifted up to a general core rule, as other architectures like AMD and ARM also have RMRR-like reserved regions. This has been discussed in the community mailing list and refer to below link for more details. Other places using unmanaged domains for kernel DMA must follow the iommu_get_resv_regions() and setup IOMMU_RESV_DIRECT - we do not restrict them in the core code. Cc: Robin Murphy Cc: Alex Williamson Cc: Kevin Tian Signed-off-by: Jason Gunthorpe Link: https://lore.kernel.org/linux-iommu/BN9PR11MB5276E84229B5BD952D78E9598C639@BN9PR11MB5276.namprd11.prod.outlook.com Signed-off-by: Lu Baolu Reviewed-by: Jason Gunthorpe Acked-by: Joerg Roedel Link: https://lore.kernel.org/r/20230724060352.113458-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 37 +++++++++++++++++++++++++++---------- include/linux/iommu.h | 2 ++ 2 files changed, 29 insertions(+), 10 deletions(-) (limited to 'include/linux/iommu.h') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d199d144460c..e385a99e25e1 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -960,14 +960,12 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain, unsigned long pg_size; int ret = 0; - if (!iommu_is_dma_domain(domain)) - return 0; - - BUG_ON(!domain->pgsize_bitmap); - - pg_size = 1UL << __ffs(domain->pgsize_bitmap); + pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; INIT_LIST_HEAD(&mappings); + if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) + return -EINVAL; + iommu_get_resv_regions(dev, &mappings); /* We need to consider overlapping regions for different devices */ @@ -975,13 +973,17 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain, dma_addr_t start, end, addr; size_t map_size = 0; - start = ALIGN(entry->start, pg_size); - end = ALIGN(entry->start + entry->length, pg_size); + if (entry->type == IOMMU_RESV_DIRECT) + dev->iommu->require_direct = 1; - if (entry->type != IOMMU_RESV_DIRECT && - entry->type != IOMMU_RESV_DIRECT_RELAXABLE) + if ((entry->type != IOMMU_RESV_DIRECT && + entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || + !iommu_is_dma_domain(domain)) continue; + start = ALIGN(entry->start, pg_size); + end = ALIGN(entry->start + entry->length, pg_size); + for (addr = start; addr <= end; addr += pg_size) { phys_addr_t phys_addr; @@ -2122,6 +2124,21 @@ static int __iommu_device_set_domain(struct iommu_group *group, { int ret; + /* + * If the device requires IOMMU_RESV_DIRECT then we cannot allow + * the blocking domain to be attached as it does not contain the + * required 1:1 mapping. This test effectively excludes the device + * being used with iommu_group_claim_dma_owner() which will block + * vfio and iommufd as well. + */ + if (dev->iommu->require_direct && + (new_domain->type == IOMMU_DOMAIN_BLOCKED || + new_domain == group->blocking_domain)) { + dev_warn(dev, + "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n"); + return -EINVAL; + } + if (dev->iommu->attach_deferred) { if (new_domain == group->default_domain) return 0; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 6b821cb715d5..54bae452975f 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -411,6 +411,7 @@ struct iommu_fault_param { * @priv: IOMMU Driver private data * @max_pasids: number of PASIDs this device can consume * @attach_deferred: the dma domain attachment is deferred + * @require_direct: device requires IOMMU_RESV_DIRECT regions * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. * struct iommu_group *iommu_group; @@ -424,6 +425,7 @@ struct dev_iommu { void *priv; u32 max_pasids; u32 attach_deferred:1; + u32 require_direct:1; }; int iommu_device_register(struct iommu_device *iommu, -- cgit v1.2.3