summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommufd/hw_pagetable.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/iommufd/hw_pagetable.c')
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c112
1 files changed, 93 insertions, 19 deletions
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 6cdb6749d359..cf2c1504e20d 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -3,6 +3,7 @@
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/iommu.h>
+#include <uapi/linux/iommufd.h>
#include "iommufd_private.h"
@@ -11,8 +12,6 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
struct iommufd_hw_pagetable *hwpt =
container_of(obj, struct iommufd_hw_pagetable, obj);
- WARN_ON(!list_empty(&hwpt->devices));
-
if (!list_empty(&hwpt->hwpt_item)) {
mutex_lock(&hwpt->ioas->mutex);
list_del(&hwpt->hwpt_item);
@@ -25,7 +24,35 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
iommu_domain_free(hwpt->domain);
refcount_dec(&hwpt->ioas->obj.users);
- mutex_destroy(&hwpt->devices_lock);
+}
+
+void iommufd_hw_pagetable_abort(struct iommufd_object *obj)
+{
+ struct iommufd_hw_pagetable *hwpt =
+ container_of(obj, struct iommufd_hw_pagetable, obj);
+
+ /* The ioas->mutex must be held until finalize is called. */
+ lockdep_assert_held(&hwpt->ioas->mutex);
+
+ if (!list_empty(&hwpt->hwpt_item)) {
+ list_del_init(&hwpt->hwpt_item);
+ iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain);
+ }
+ iommufd_hw_pagetable_destroy(obj);
+}
+
+int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt)
+{
+ if (hwpt->enforce_cache_coherency)
+ return 0;
+
+ if (hwpt->domain->ops->enforce_cache_coherency)
+ hwpt->enforce_cache_coherency =
+ hwpt->domain->ops->enforce_cache_coherency(
+ hwpt->domain);
+ if (!hwpt->enforce_cache_coherency)
+ return -EINVAL;
+ return 0;
}
/**
@@ -38,6 +65,10 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
* Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
* will be linked to the given ioas and upon return the underlying iommu_domain
* is fully popoulated.
+ *
+ * The caller must hold the ioas->mutex until after
+ * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
+ * the returned hwpt.
*/
struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
@@ -52,9 +83,7 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (IS_ERR(hwpt))
return hwpt;
- INIT_LIST_HEAD(&hwpt->devices);
INIT_LIST_HEAD(&hwpt->hwpt_item);
- mutex_init(&hwpt->devices_lock);
/* Pairs with iommufd_hw_pagetable_destroy() */
refcount_inc(&ioas->obj.users);
hwpt->ioas = ioas;
@@ -65,7 +94,18 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
goto out_abort;
}
- mutex_lock(&hwpt->devices_lock);
+ /*
+ * Set the coherency mode before we do iopt_table_add_domain() as some
+ * iommus have a per-PTE bit that controls it and need to decide before
+ * doing any maps. It is an iommu driver bug to report
+ * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
+ * a new domain.
+ */
+ if (idev->enforce_cache_coherency) {
+ rc = iommufd_hw_pagetable_enforce_cc(hwpt);
+ if (WARN_ON(rc))
+ goto out_abort;
+ }
/*
* immediate_attach exists only to accommodate iommu drivers that cannot
@@ -76,30 +116,64 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (immediate_attach) {
rc = iommufd_hw_pagetable_attach(hwpt, idev);
if (rc)
- goto out_unlock;
+ goto out_abort;
}
rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain);
if (rc)
goto out_detach;
list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list);
-
- if (immediate_attach) {
- /* See iommufd_device_do_attach() */
- refcount_inc(&hwpt->obj.users);
- idev->hwpt = hwpt;
- list_add(&idev->devices_item, &hwpt->devices);
- }
-
- mutex_unlock(&hwpt->devices_lock);
return hwpt;
out_detach:
if (immediate_attach)
- iommufd_hw_pagetable_detach(hwpt, idev);
-out_unlock:
- mutex_unlock(&hwpt->devices_lock);
+ iommufd_hw_pagetable_detach(idev);
out_abort:
iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
return ERR_PTR(rc);
}
+
+int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
+{
+ struct iommu_hwpt_alloc *cmd = ucmd->cmd;
+ struct iommufd_hw_pagetable *hwpt;
+ struct iommufd_device *idev;
+ struct iommufd_ioas *ioas;
+ int rc;
+
+ if (cmd->flags || cmd->__reserved)
+ return -EOPNOTSUPP;
+
+ idev = iommufd_get_device(ucmd, cmd->dev_id);
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
+
+ ioas = iommufd_get_ioas(ucmd->ictx, cmd->pt_id);
+ if (IS_ERR(ioas)) {
+ rc = PTR_ERR(ioas);
+ goto out_put_idev;
+ }
+
+ mutex_lock(&ioas->mutex);
+ hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev, false);
+ if (IS_ERR(hwpt)) {
+ rc = PTR_ERR(hwpt);
+ goto out_unlock;
+ }
+
+ cmd->out_hwpt_id = hwpt->obj.id;
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+ if (rc)
+ goto out_hwpt;
+ iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
+ goto out_unlock;
+
+out_hwpt:
+ iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
+out_unlock:
+ mutex_unlock(&ioas->mutex);
+ iommufd_put_object(&ioas->obj);
+out_put_idev:
+ iommufd_put_object(&idev->obj);
+ return rc;
+}