summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdkfd/kfd_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_process.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c175
1 files changed, 112 insertions, 63 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index c74cf22a1ed9..1f5ccd28bd41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -35,13 +35,6 @@ struct mm_struct;
#include "kfd_dbgmgr.h"
/*
- * Initial size for the array of queues.
- * The allocated size is doubled each time
- * it is exceeded up to MAX_PROCESS_QUEUES.
- */
-#define INITIAL_QUEUE_ARRAY_SIZE 16
-
-/*
* List of struct kfd_process (field kfd_process).
* Unique/indexed by mm_struct*
*/
@@ -171,25 +164,22 @@ static void kfd_process_wq_release(struct work_struct *work)
pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
pdd->dev->id, p->pasid);
- if (pdd->reset_wavefronts)
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ if (pdd->bound == PDD_BOUND)
+ amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
- amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
list_del(&pdd->per_device_list);
-
kfree(pdd);
}
kfd_event_free_process(p);
kfd_pasid_free(p->pasid);
+ kfd_free_process_doorbells(p);
mutex_unlock(&p->mutex);
mutex_destroy(&p->mutex);
- kfree(p->queues);
-
kfree(p);
kfree(work);
@@ -201,7 +191,6 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
struct kfd_process *p;
p = container_of(rcu, struct kfd_process, rcu);
- WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
mmdrop(p->mm);
@@ -235,24 +224,26 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
mutex_lock(&p->mutex);
- /* In case our notifier is called before IOMMU notifier */
- pqm_uninit(&p->pqm);
-
- /* Iterate over all process device data structure and check
- * if we should delete debug managers and reset all wavefronts
+ /* Iterate over all process device data structures and if the
+ * pdd is in debug mode, we should first force unregistration,
+ * then we will be able to destroy the queues
*/
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
- if ((pdd->dev->dbgmgr) &&
- (pdd->dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
-
- if (pdd->reset_wavefronts) {
- pr_warn("Resetting all wave fronts\n");
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
+ struct kfd_dev *dev = pdd->dev;
+
+ mutex_lock(kfd_get_dbgmgr_mutex());
+ if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
+ if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+ dev->dbgmgr = NULL;
+ }
}
+ mutex_unlock(kfd_get_dbgmgr_mutex());
}
+ kfd_process_dequeue_from_all_devices(p);
+ pqm_uninit(&p->pqm);
+
mutex_unlock(&p->mutex);
/*
@@ -279,15 +270,13 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (!process)
goto err_alloc_process;
- process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
- sizeof(process->queues[0]), GFP_KERNEL);
- if (!process->queues)
- goto err_alloc_queues;
-
process->pasid = kfd_pasid_alloc();
if (process->pasid == 0)
goto err_alloc_pasid;
+ if (kfd_alloc_process_doorbells(process) < 0)
+ goto err_alloc_doorbells;
+
mutex_init(&process->mutex);
process->mm = thread->mm;
@@ -303,8 +292,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
process->lead_thread = thread->group_leader;
- process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
-
INIT_LIST_HEAD(&process->per_device_data);
kfd_event_init_process(process);
@@ -329,10 +316,10 @@ err_process_pqm_init:
mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
err_mmu_notifier:
mutex_destroy(&process->mutex);
+ kfd_free_process_doorbells(process);
+err_alloc_doorbells:
kfd_pasid_free(process->pasid);
err_alloc_pasid:
- kfree(process->queues);
-err_alloc_queues:
kfree(process);
err_alloc_process:
return ERR_PTR(err);
@@ -345,9 +332,9 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
if (pdd->dev == dev)
- break;
+ return pdd;
- return pdd;
+ return NULL;
}
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
@@ -361,7 +348,9 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
pdd->qpd.dqm = dev->dqm;
- pdd->reset_wavefronts = false;
+ pdd->process = p;
+ pdd->bound = PDD_UNBOUND;
+ pdd->already_dequeued = false;
list_add(&pdd->per_device_list, &p->per_device_data);
}
@@ -387,19 +376,87 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
- if (pdd->bound)
+ if (pdd->bound == PDD_BOUND) {
return pdd;
+ } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
+ pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
+ return ERR_PTR(-EINVAL);
+ }
err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
if (err < 0)
return ERR_PTR(err);
- pdd->bound = true;
+ pdd->bound = PDD_BOUND;
return pdd;
}
-void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
+/*
+ * Bind processes do the device that have been temporarily unbound
+ * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
+ */
+int kfd_bind_processes_to_device(struct kfd_dev *dev)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ unsigned int temp;
+ int err = 0;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(dev, p);
+ if (pdd->bound != PDD_BOUND_SUSPENDED) {
+ mutex_unlock(&p->mutex);
+ continue;
+ }
+
+ err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
+ p->lead_thread);
+ if (err < 0) {
+ pr_err("Unexpected pasid %d binding failure\n",
+ p->pasid);
+ mutex_unlock(&p->mutex);
+ break;
+ }
+
+ pdd->bound = PDD_BOUND;
+ mutex_unlock(&p->mutex);
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ return err;
+}
+
+/*
+ * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
+ * processes will be restored to PDD_BOUND state in
+ * kfd_bind_processes_to_device.
+ */
+void kfd_unbind_processes_from_device(struct kfd_dev *dev)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ unsigned int temp;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(dev, p);
+
+ if (pdd->bound == PDD_BOUND)
+ pdd->bound = PDD_BOUND_SUSPENDED;
+ mutex_unlock(&p->mutex);
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+}
+
+void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
{
struct kfd_process *p;
struct kfd_process_device *pdd;
@@ -415,31 +472,23 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
pr_debug("Unbinding process %d from IOMMU\n", pasid);
- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(dev->dbgmgr);
-
- pqm_uninit(&p->pqm);
-
- pdd = kfd_get_process_device_data(dev, p);
+ mutex_lock(kfd_get_dbgmgr_mutex());
- if (!pdd) {
- mutex_unlock(&p->mutex);
- return;
+ if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
+ if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+ dev->dbgmgr = NULL;
+ }
}
- if (pdd->reset_wavefronts) {
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
- }
+ mutex_unlock(kfd_get_dbgmgr_mutex());
- /*
- * Just mark pdd as unbound, because we still need it
- * to call amd_iommu_unbind_pasid() in when the
- * process exits.
- * We don't call amd_iommu_unbind_pasid() here
- * because the IOMMU called us.
- */
- pdd->bound = false;
+ pdd = kfd_get_process_device_data(dev, p);
+ if (pdd)
+ /* For GPU relying on IOMMU, we need to dequeue here
+ * when PASID is still bound.
+ */
+ kfd_process_dequeue_from_device(pdd);
mutex_unlock(&p->mutex);
}