From 43312b710b11a085ec81eb7d1878f8858045ae2b Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Thu, 25 Aug 2022 06:39:34 +0000 Subject: iommu/amd: Refactor amd_iommu_domain_enable_v2 to remove locking The current function to enable IOMMU v2 also lock the domain. In order to reuse the same code in different code path, in which the domain has already been locked, refactor the function to separate the locking from the enabling logic. Co-developed-by: Vasant Hegde Signed-off-by: Vasant Hegde Signed-off-by: Suravee Suthikulpanit Link: https://lore.kernel.org/r/20220825063939.8360-5-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 46 +++++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 19 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 07c47a7088be..771709d1dfff 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -85,6 +85,7 @@ struct iommu_cmd { struct kmem_cache *amd_iommu_irq_cache; static void detach_device(struct device *dev); +static int domain_enable_v2(struct protection_domain *domain, int pasids); /**************************************************************************** * @@ -2450,11 +2451,10 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom) } EXPORT_SYMBOL(amd_iommu_domain_direct_map); -int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) +/* Note: This function expects iommu_domain->lock to be held prior calling the function. */ +static int domain_enable_v2(struct protection_domain *domain, int pasids) { - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - int levels, ret; + int levels; /* Number of GCR3 table levels required */ for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9) @@ -2463,7 +2463,25 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) if (levels > amd_iommu_max_glx_val) return -EINVAL; - spin_lock_irqsave(&domain->lock, flags); + domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); + if (domain->gcr3_tbl == NULL) + return -ENOMEM; + + domain->glx = levels; + domain->flags |= PD_IOMMUV2_MASK; + + amd_iommu_domain_update(domain); + + return 0; +} + +int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) +{ + struct protection_domain *pdom = to_pdomain(dom); + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdom->lock, flags); /* * Save us all sanity checks whether devices already in the @@ -2471,24 +2489,14 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) * devices attached when it is switched into IOMMUv2 mode. */ ret = -EBUSY; - if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) - goto out; - - ret = -ENOMEM; - domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); - if (domain->gcr3_tbl == NULL) + if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK) goto out; - domain->glx = levels; - domain->flags |= PD_IOMMUV2_MASK; - - amd_iommu_domain_update(domain); - - ret = 0; + if (!pdom->gcr3_tbl) + ret = domain_enable_v2(pdom, pasids); out: - spin_unlock_irqrestore(&domain->lock, flags); - + spin_unlock_irqrestore(&pdom->lock, flags); return ret; } EXPORT_SYMBOL(amd_iommu_domain_enable_v2); -- cgit v1.2.3