summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommufd/hw_pagetable.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2023-07-17 21:11:59 +0300
committerJason Gunthorpe <jgg@nvidia.com>2023-07-26 16:19:22 +0300
commit91a2e17e243f70e0aec29facf44946439fce9195 (patch)
tree01205c32857b718c706727b52267b4f2031b4dfd /drivers/iommu/iommufd/hw_pagetable.c
parent3a3329a7f14a7a0a8c30a12c7ed9f1f77f8efaa1 (diff)
downloadlinux-91a2e17e243f70e0aec29facf44946439fce9195.tar.xz
iommufd: Replace the hwpt->devices list with iommufd_group
The devices list was used as a simple way to avoid having per-group information. Now that this seems to be unavoidable, just commit to per-group information fully and remove the devices list from the HWPT. The iommufd_group stores the currently assigned HWPT for the entire group and we can manage the per-device attach/detach with a list in the iommufd_group. For destruction the flow is organized to make the following patches easier, the actual call to iommufd_object_destroy_user() is done at the top of the call chain without holding any locks. The HWPT to be destroyed is returned out from the locked region to make this possible. Later patches create locking that requires this. Link: https://lore.kernel.org/r/3-v8-6659224517ea+532-iommufd_alloc_jgg@nvidia.com Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/iommu/iommufd/hw_pagetable.c')
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c22
1 files changed, 2 insertions, 20 deletions
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 6cdb6749d359..bdb76cdb1dc3 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -11,8 +11,6 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
struct iommufd_hw_pagetable *hwpt =
container_of(obj, struct iommufd_hw_pagetable, obj);
- WARN_ON(!list_empty(&hwpt->devices));
-
if (!list_empty(&hwpt->hwpt_item)) {
mutex_lock(&hwpt->ioas->mutex);
list_del(&hwpt->hwpt_item);
@@ -25,7 +23,6 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
iommu_domain_free(hwpt->domain);
refcount_dec(&hwpt->ioas->obj.users);
- mutex_destroy(&hwpt->devices_lock);
}
/**
@@ -52,9 +49,7 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (IS_ERR(hwpt))
return hwpt;
- INIT_LIST_HEAD(&hwpt->devices);
INIT_LIST_HEAD(&hwpt->hwpt_item);
- mutex_init(&hwpt->devices_lock);
/* Pairs with iommufd_hw_pagetable_destroy() */
refcount_inc(&ioas->obj.users);
hwpt->ioas = ioas;
@@ -65,8 +60,6 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
goto out_abort;
}
- mutex_lock(&hwpt->devices_lock);
-
/*
* immediate_attach exists only to accommodate iommu drivers that cannot
* directly allocate a domain. These drivers do not finish creating the
@@ -76,29 +69,18 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (immediate_attach) {
rc = iommufd_hw_pagetable_attach(hwpt, idev);
if (rc)
- goto out_unlock;
+ goto out_abort;
}
rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain);
if (rc)
goto out_detach;
list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list);
-
- if (immediate_attach) {
- /* See iommufd_device_do_attach() */
- refcount_inc(&hwpt->obj.users);
- idev->hwpt = hwpt;
- list_add(&idev->devices_item, &hwpt->devices);
- }
-
- mutex_unlock(&hwpt->devices_lock);
return hwpt;
out_detach:
if (immediate_attach)
- iommufd_hw_pagetable_detach(hwpt, idev);
-out_unlock:
- mutex_unlock(&hwpt->devices_lock);
+ iommufd_hw_pagetable_detach(idev);
out_abort:
iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
return ERR_PTR(rc);