summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdkfd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c460
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c83
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c1118
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.h123
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c481
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1031
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c93
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c405
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c108
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c64
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c85
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c83
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c423
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c86
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h73
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h320
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c283
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c98
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c303
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c213
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h14
45 files changed, 5339 insertions, 1056 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index e758c2a24cd0..2ec8f27c5366 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -53,9 +53,11 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_events.o \
$(AMDKFD_PATH)/cik_event_interrupt.o \
$(AMDKFD_PATH)/kfd_int_process_v9.o \
+ $(AMDKFD_PATH)/kfd_int_process_v10.o \
$(AMDKFD_PATH)/kfd_int_process_v11.o \
$(AMDKFD_PATH)/kfd_smi_events.o \
- $(AMDKFD_PATH)/kfd_crat.o
+ $(AMDKFD_PATH)/kfd_crat.o \
+ $(AMDKFD_PATH)/kfd_debug.o
ifneq ($(CONFIG_AMD_IOMMU_V2),)
AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 5c8023cba196..795382b55e0a 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -26,7 +26,7 @@
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
-static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+static bool cik_event_interrupt_isr(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
@@ -85,7 +85,7 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
!amdgpu_no_queue_eviction_on_vm_fault);
}
-static void cik_event_interrupt_wq(struct kfd_dev *dev,
+static void cik_event_interrupt_wq(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
const struct cik_ih_ring_entry *ihre =
@@ -118,9 +118,9 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
return;
if (info.vmid == vmid)
- kfd_signal_vm_fault_event(dev, pasid, &info);
+ kfd_signal_vm_fault_event(dev, pasid, &info, NULL);
else
- kfd_signal_vm_fault_event(dev, pasid, NULL);
+ kfd_signal_vm_fault_event(dev, pasid, NULL, NULL);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1b54a9aaae70..6a27b000a246 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -44,6 +44,7 @@
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
#include "amdgpu_dma_buf.h"
+#include "kfd_debug.h"
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
@@ -142,15 +143,13 @@ static int kfd_open(struct inode *inode, struct file *filep)
return -EPERM;
}
- process = kfd_create_process(filep);
+ process = kfd_create_process(current);
if (IS_ERR(process))
return PTR_ERR(process);
- if (kfd_is_locked()) {
- dev_dbg(kfd_device, "kfd is locked!\n"
- "process %d unreferenced", process->pasid);
+ if (kfd_process_init_cwsr_apu(process, filep)) {
kfd_unref_process(process);
- return -EAGAIN;
+ return -EFAULT;
}
/* filep now owns the reference returned by kfd_create_process */
@@ -186,7 +185,12 @@ static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
static int set_queue_properties_from_user(struct queue_properties *q_properties,
struct kfd_ioctl_create_queue_args *args)
{
- if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ /*
+ * Repurpose queue percentage to accommodate new features:
+ * bit 0-7: queue percentage
+ * bit 8-15: pm4_target_xcc
+ */
+ if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
@@ -236,7 +240,9 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->is_interop = false;
q_properties->is_gws = false;
- q_properties->queue_percent = args->queue_percentage;
+ q_properties->queue_percent = args->queue_percentage & 0xFF;
+ /* bit 8-15 are repurposed to be PM4 target XCC */
+ q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
q_properties->priority = args->queue_priority;
q_properties->queue_address = args->ring_base_address;
q_properties->queue_size = args->ring_size;
@@ -293,7 +299,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_create_queue_args *args = data;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
int err = 0;
unsigned int queue_id;
struct kfd_process_device *pdd;
@@ -328,7 +334,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
}
if (!pdd->doorbell_index &&
- kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
+ kfd_alloc_process_doorbells(dev->kfd, &pdd->doorbell_index) < 0) {
err = -ENOMEM;
goto err_alloc_doorbells;
}
@@ -336,7 +342,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
- if (dev->shared_resources.enable_mes &&
+ if (dev->kfd->shared_resources.enable_mes &&
((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
>> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
struct amdgpu_bo_va_mapping *wptr_mapping;
@@ -404,6 +410,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
pr_debug("Write ptr address == 0x%016llX\n",
args->write_pointer_address);
+ kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0);
return 0;
err_create_queue:
@@ -442,7 +449,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
struct kfd_ioctl_update_queue_args *args = data;
struct queue_properties properties;
- if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ /*
+ * Repurpose queue percentage to accommodate new features:
+ * bit 0-7: queue percentage
+ * bit 8-15: pm4_target_xcc
+ */
+ if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
@@ -466,7 +478,9 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_address = args->ring_base_address;
properties.queue_size = args->ring_size;
- properties.queue_percent = args->queue_percentage;
+ properties.queue_percent = args->queue_percentage & 0xFF;
+ /* bit 8-15 are repurposed to be PM4 target XCC */
+ properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
properties.priority = args->queue_priority;
pr_debug("Updating queue id %d for pasid 0x%x\n",
@@ -524,8 +538,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
goto out;
}
- minfo.update_flag = UPDATE_FLAG_CU_MASK;
-
mutex_lock(&p->mutex);
retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
@@ -887,7 +899,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
{
struct kfd_ioctl_set_scratch_backing_va_args *args = data;
struct kfd_process_device *pdd;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
long err;
mutex_lock(&p->mutex);
@@ -1006,19 +1018,26 @@ err_drm_file:
return ret;
}
-bool kfd_dev_is_large_bar(struct kfd_dev *dev)
+bool kfd_dev_is_large_bar(struct kfd_node *dev)
{
if (debug_largebar) {
pr_debug("Simulate large-bar allocation on non large-bar machine\n");
return true;
}
- if (dev->use_iommu_v2)
+ if (dev->kfd->use_iommu_v2)
return false;
if (dev->local_mem_info.local_mem_size_private == 0 &&
- dev->local_mem_info.local_mem_size_public > 0)
+ dev->local_mem_info.local_mem_size_public > 0)
+ return true;
+
+ if (dev->local_mem_info.local_mem_size_public == 0 &&
+ dev->kfd->adev->gmc.is_app_apu) {
+ pr_debug("APP APU, Consider like a large bar system\n");
return true;
+ }
+
return false;
}
@@ -1030,7 +1049,8 @@ static int kfd_ioctl_get_available_memory(struct file *filep,
if (!pdd)
return -EINVAL;
- args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev);
+ args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
+ pdd->dev->node_id);
kfd_unlock_pdd(pdd);
return 0;
}
@@ -1041,7 +1061,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
struct kfd_process_device *pdd;
void *mem;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
int idr_handle;
long err;
uint64_t offset = args->mmap_offset;
@@ -1105,7 +1125,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
}
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
- if (args->size != kfd_doorbell_process_slice(dev)) {
+ if (args->size != kfd_doorbell_process_slice(dev->kfd)) {
err = -EINVAL;
goto err_unlock;
}
@@ -1231,7 +1251,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
struct kfd_ioctl_map_memory_to_gpu_args *args = data;
struct kfd_process_device *pdd, *peer_pdd;
void *mem;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
long err = 0;
int i;
uint32_t *devices_arr = NULL;
@@ -1405,7 +1425,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
args->n_success = i+1;
}
- flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
+ flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd);
if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true);
@@ -1445,7 +1465,7 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep,
int retval;
struct kfd_ioctl_alloc_queue_gws_args *args = data;
struct queue *q;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
mutex_lock(&p->mutex);
q = pqm_get_user_queue(&p->pqm, args->queue_id);
@@ -1467,6 +1487,11 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep,
goto out_unlock;
}
+ if (!kfd_dbg_has_gws_support(dev) && p->debug_trap_enabled) {
+ retval = -EBUSY;
+ goto out_unlock;
+ }
+
retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
mutex_unlock(&p->mutex);
@@ -1482,10 +1507,11 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_dmabuf_info_args *args = data;
- struct kfd_dev *dev = NULL;
+ struct kfd_node *dev = NULL;
struct amdgpu_device *dmabuf_adev;
void *metadata_buffer = NULL;
uint32_t flags;
+ int8_t xcp_id;
unsigned int i;
int r;
@@ -1506,17 +1532,14 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
&dmabuf_adev, &args->size,
metadata_buffer, args->metadata_size,
- &args->metadata_size, &flags);
+ &args->metadata_size, &flags, &xcp_id);
if (r)
goto exit;
- /* Reverse-lookup gpu_id from kgd pointer */
- dev = kfd_device_by_adev(dmabuf_adev);
- if (!dev) {
- r = -EINVAL;
- goto exit;
- }
- args->gpu_id = dev->id;
+ if (xcp_id >= 0)
+ args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
+ else
+ args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
args->flags = flags;
/* Copy metadata buffer to user mode */
@@ -1596,7 +1619,7 @@ static int kfd_ioctl_export_dmabuf(struct file *filep,
struct kfd_ioctl_export_dmabuf_args *args = data;
struct kfd_process_device *pdd;
struct dma_buf *dmabuf;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
void *mem;
int ret = 0;
@@ -2178,7 +2201,7 @@ static int criu_restore_devices(struct kfd_process *p,
}
for (i = 0; i < args->num_devices; i++) {
- struct kfd_dev *dev;
+ struct kfd_node *dev;
struct kfd_process_device *pdd;
struct file *drm_file;
@@ -2240,7 +2263,7 @@ static int criu_restore_devices(struct kfd_process *p,
}
if (!pdd->doorbell_index &&
- kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+ kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) {
ret = -ENOMEM;
goto exit;
}
@@ -2268,7 +2291,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
u64 offset;
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
- if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev))
+ if (bo_bucket->size !=
+ kfd_doorbell_process_slice(pdd->dev->kfd))
return -EINVAL;
offset = kfd_get_process_doorbells(pdd);
@@ -2350,7 +2374,7 @@ static int criu_restore_bo(struct kfd_process *p,
/* now map these BOs to GPU/s */
for (j = 0; j < p->n_pdds; j++) {
- struct kfd_dev *peer;
+ struct kfd_node *peer;
struct kfd_process_device *peer_pdd;
if (!bo_priv->mapped_gpuids[j])
@@ -2715,6 +2739,356 @@ static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
return ret;
}
+static int runtime_enable(struct kfd_process *p, uint64_t r_debug,
+ bool enable_ttmp_setup)
+{
+ int i = 0, ret = 0;
+
+ if (p->is_runtime_retry)
+ goto retry;
+
+ if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
+ return -EBUSY;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ if (pdd->qpd.queue_count)
+ return -EEXIST;
+ }
+
+ p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
+ p->runtime_info.r_debug = r_debug;
+ p->runtime_info.ttmp_setup = enable_ttmp_setup;
+
+ if (p->runtime_info.ttmp_setup) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) {
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->dev->kfd2kgd->enable_debug_trap(
+ pdd->dev->adev,
+ true,
+ pdd->dev->vm_info.last_vmid_kfd);
+ } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
+ pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
+ pdd->dev->adev,
+ false,
+ 0);
+ }
+ }
+ }
+
+retry:
+ if (p->debug_trap_enabled) {
+ if (!p->is_runtime_retry) {
+ kfd_dbg_trap_activate(p);
+ kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
+ p, NULL, 0, false, NULL, 0);
+ }
+
+ mutex_unlock(&p->mutex);
+ ret = down_interruptible(&p->runtime_enable_sema);
+ mutex_lock(&p->mutex);
+
+ p->is_runtime_retry = !!ret;
+ }
+
+ return ret;
+}
+
+static int runtime_disable(struct kfd_process *p)
+{
+ int i = 0, ret;
+ bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
+
+ p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
+ p->runtime_info.r_debug = 0;
+
+ if (p->debug_trap_enabled) {
+ if (was_enabled)
+ kfd_dbg_trap_deactivate(p, false, 0);
+
+ if (!p->is_runtime_retry)
+ kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
+ p, NULL, 0, false, NULL, 0);
+
+ mutex_unlock(&p->mutex);
+ ret = down_interruptible(&p->runtime_enable_sema);
+ mutex_lock(&p->mutex);
+
+ p->is_runtime_retry = !!ret;
+ if (ret)
+ return ret;
+ }
+
+ if (was_enabled && p->runtime_info.ttmp_setup) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ if (!kfd_dbg_is_rlc_restore_supported(pdd->dev))
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+ }
+ }
+
+ p->runtime_info.ttmp_setup = false;
+
+ /* disable ttmp setup */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
+ pdd->spi_dbg_override =
+ pdd->dev->kfd2kgd->disable_debug_trap(
+ pdd->dev->adev,
+ false,
+ pdd->dev->vm_info.last_vmid_kfd);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ debug_refresh_runlist(pdd->dev->dqm);
+ else
+ kfd_dbg_set_mes_debug_mode(pdd);
+ }
+ }
+
+ return 0;
+}
+
+static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_runtime_enable_args *args = data;
+ int r;
+
+ mutex_lock(&p->mutex);
+
+ if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK)
+ r = runtime_enable(p, args->r_debug,
+ !!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK));
+ else
+ r = runtime_disable(p);
+
+ mutex_unlock(&p->mutex);
+
+ return r;
+}
+
+static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_dbg_trap_args *args = data;
+ struct task_struct *thread = NULL;
+ struct mm_struct *mm = NULL;
+ struct pid *pid = NULL;
+ struct kfd_process *target = NULL;
+ struct kfd_process_device *pdd = NULL;
+ int r = 0;
+
+ if (sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Debugging does not support sched_policy %i", sched_policy);
+ return -EINVAL;
+ }
+
+ pid = find_get_pid(args->pid);
+ if (!pid) {
+ pr_debug("Cannot find pid info for %i\n", args->pid);
+ r = -ESRCH;
+ goto out;
+ }
+
+ thread = get_pid_task(pid, PIDTYPE_PID);
+ if (!thread) {
+ r = -ESRCH;
+ goto out;
+ }
+
+ mm = get_task_mm(thread);
+ if (!mm) {
+ r = -ESRCH;
+ goto out;
+ }
+
+ if (args->op == KFD_IOC_DBG_TRAP_ENABLE) {
+ bool create_process;
+
+ rcu_read_lock();
+ create_process = thread && thread != current && ptrace_parent(thread) == current;
+ rcu_read_unlock();
+
+ target = create_process ? kfd_create_process(thread) :
+ kfd_lookup_process_by_pid(pid);
+ } else {
+ target = kfd_lookup_process_by_pid(pid);
+ }
+
+ if (IS_ERR_OR_NULL(target)) {
+ pr_debug("Cannot find process PID %i to debug\n", args->pid);
+ r = target ? PTR_ERR(target) : -ESRCH;
+ goto out;
+ }
+
+ /* Check if target is still PTRACED. */
+ rcu_read_lock();
+ if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE
+ && ptrace_parent(target->lead_thread) != current) {
+ pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid);
+ r = -EPERM;
+ }
+ rcu_read_unlock();
+
+ if (r)
+ goto out;
+
+ mutex_lock(&target->mutex);
+
+ if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) {
+ pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op);
+ r = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED &&
+ (args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE ||
+ args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE ||
+ args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES ||
+ args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES ||
+ args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
+ args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH ||
+ args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) {
+ r = -EPERM;
+ goto unlock_out;
+ }
+
+ if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
+ args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) {
+ int user_gpu_id = kfd_process_get_user_gpu_id(target,
+ args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ?
+ args->set_node_address_watch.gpu_id :
+ args->clear_node_address_watch.gpu_id);
+
+ pdd = kfd_process_device_data_by_id(target, user_gpu_id);
+ if (user_gpu_id == -EINVAL || !pdd) {
+ r = -ENODEV;
+ goto unlock_out;
+ }
+ }
+
+ switch (args->op) {
+ case KFD_IOC_DBG_TRAP_ENABLE:
+ if (target != p)
+ target->debugger_process = p;
+
+ r = kfd_dbg_trap_enable(target,
+ args->enable.dbg_fd,
+ (void __user *)args->enable.rinfo_ptr,
+ &args->enable.rinfo_size);
+ if (!r)
+ target->exception_enable_mask = args->enable.exception_mask;
+
+ break;
+ case KFD_IOC_DBG_TRAP_DISABLE:
+ r = kfd_dbg_trap_disable(target);
+ break;
+ case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT:
+ r = kfd_dbg_send_exception_to_runtime(target,
+ args->send_runtime_event.gpu_id,
+ args->send_runtime_event.queue_id,
+ args->send_runtime_event.exception_mask);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED:
+ kfd_dbg_set_enabled_debug_exception_mask(target,
+ args->set_exceptions_enabled.exception_mask);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE:
+ r = kfd_dbg_trap_set_wave_launch_override(target,
+ args->launch_override.override_mode,
+ args->launch_override.enable_mask,
+ args->launch_override.support_request_mask,
+ &args->launch_override.enable_mask,
+ &args->launch_override.support_request_mask);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE:
+ r = kfd_dbg_trap_set_wave_launch_mode(target,
+ args->launch_mode.launch_mode);
+ break;
+ case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES:
+ r = suspend_queues(target,
+ args->suspend_queues.num_queues,
+ args->suspend_queues.grace_period,
+ args->suspend_queues.exception_mask,
+ (uint32_t *)args->suspend_queues.queue_array_ptr);
+
+ break;
+ case KFD_IOC_DBG_TRAP_RESUME_QUEUES:
+ r = resume_queues(target, args->resume_queues.num_queues,
+ (uint32_t *)args->resume_queues.queue_array_ptr);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH:
+ r = kfd_dbg_trap_set_dev_address_watch(pdd,
+ args->set_node_address_watch.address,
+ args->set_node_address_watch.mask,
+ &args->set_node_address_watch.id,
+ args->set_node_address_watch.mode);
+ break;
+ case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH:
+ r = kfd_dbg_trap_clear_dev_address_watch(pdd,
+ args->clear_node_address_watch.id);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_FLAGS:
+ r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags);
+ break;
+ case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT:
+ r = kfd_dbg_ev_query_debug_event(target,
+ &args->query_debug_event.queue_id,
+ &args->query_debug_event.gpu_id,
+ args->query_debug_event.exception_mask,
+ &args->query_debug_event.exception_mask);
+ break;
+ case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO:
+ r = kfd_dbg_trap_query_exception_info(target,
+ args->query_exception_info.source_id,
+ args->query_exception_info.exception_code,
+ args->query_exception_info.clear_exception,
+ (void __user *)args->query_exception_info.info_ptr,
+ &args->query_exception_info.info_size);
+ break;
+ case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT:
+ r = pqm_get_queue_snapshot(&target->pqm,
+ args->queue_snapshot.exception_mask,
+ (void __user *)args->queue_snapshot.snapshot_buf_ptr,
+ &args->queue_snapshot.num_queues,
+ &args->queue_snapshot.entry_size);
+ break;
+ case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT:
+ r = kfd_dbg_trap_device_snapshot(target,
+ args->device_snapshot.exception_mask,
+ (void __user *)args->device_snapshot.snapshot_buf_ptr,
+ &args->device_snapshot.num_devices,
+ &args->device_snapshot.entry_size);
+ break;
+ default:
+ pr_err("Invalid option: %i\n", args->op);
+ r = -EINVAL;
+ }
+
+unlock_out:
+ mutex_unlock(&target->mutex);
+
+out:
+ if (thread)
+ put_task_struct(thread);
+
+ if (mm)
+ mmput(mm);
+
+ if (pid)
+ put_pid(pid);
+
+ if (target)
+ kfd_unref_process(target);
+
+ return r;
+}
+
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
@@ -2827,6 +3201,12 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
kfd_ioctl_export_dmabuf, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE,
+ kfd_ioctl_runtime_enable, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
+ kfd_ioctl_set_debug_trap, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
@@ -2947,7 +3327,7 @@ err_i1:
return retcode;
}
-static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
+static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
@@ -2981,7 +3361,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct kfd_process *process;
- struct kfd_dev *dev = NULL;
+ struct kfd_node *dev = NULL;
unsigned long mmap_offset;
unsigned int gpu_id;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 475e47027354..49f40d9f16e8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1166,7 +1166,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
props->weight = 20;
else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
- props->weight = 15 * iolink->num_hops_xgmi;
+ props->weight = iolink->weight_xgmi;
else
props->weight = node_distance(id_from, id_to);
@@ -1405,7 +1405,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
return i;
}
-int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info)
+int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
{
int num_of_cache_types = 0;
@@ -1524,7 +1524,7 @@ int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pca
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
num_of_cache_types =
- kfd_fill_gpu_cache_info_from_gfx_config(kdev, *pcache_info);
+ kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info);
break;
default:
*pcache_info = dummy_cache_info;
@@ -1858,7 +1858,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
}
static int kfd_fill_gpu_memory_affinity(int *avail_size,
- struct kfd_dev *kdev, uint8_t type, uint64_t size,
+ struct kfd_node *kdev, uint8_t type, uint64_t size,
struct crat_subtype_memory *sub_type_hdr,
uint32_t proximity_domain,
const struct kfd_local_mem_info *local_mem_info)
@@ -1887,7 +1887,7 @@ static int kfd_fill_gpu_memory_affinity(int *avail_size,
}
#ifdef CONFIG_ACPI_NUMA
-static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
+static void kfd_find_numa_node_in_srat(struct kfd_node *kdev)
{
struct acpi_table_header *table_header = NULL;
struct acpi_subtable_header *sub_header = NULL;
@@ -1972,6 +1972,9 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
}
#endif
+#define KFD_CRAT_INTRA_SOCKET_WEIGHT 13
+#define KFD_CRAT_XGMI_WEIGHT 15
+
/* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
* to its NUMA node
* @avail_size: Available size in the memory
@@ -1982,7 +1985,7 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
* Return 0 if successful else return -ve value
*/
static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
- struct kfd_dev *kdev,
+ struct kfd_node *kdev,
struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain)
{
@@ -2002,7 +2005,16 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
/* Fill in IOLINK subtype.
* TODO: Fill-in other fields of iolink subtype
*/
- if (kdev->adev->gmc.xgmi.connected_to_cpu) {
+ if (kdev->adev->gmc.xgmi.connected_to_cpu ||
+ (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 3) &&
+ kdev->adev->smuio.funcs->get_pkg_type(kdev->adev) ==
+ AMDGPU_PKG_TYPE_APU)) {
+ bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3);
+ int mem_bw = 819200, weight = ext_cpu ? KFD_CRAT_XGMI_WEIGHT :
+ KFD_CRAT_INTRA_SOCKET_WEIGHT;
+ uint32_t bandwidth = ext_cpu ? amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
+ kdev->adev, NULL, true) : mem_bw;
+
/*
* with host gpu xgmi link, host can access gpu memory whether
* or not pcie bar type is large, so always create bidirectional
@@ -2010,14 +2022,9 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
*/
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
- sub_type_hdr->num_hops_xgmi = 1;
- if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) {
- sub_type_hdr->minimum_bandwidth_mbs =
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
- kdev->adev, NULL, true);
- sub_type_hdr->maximum_bandwidth_mbs =
- sub_type_hdr->minimum_bandwidth_mbs;
- }
+ sub_type_hdr->weight_xgmi = weight;
+ sub_type_hdr->minimum_bandwidth_mbs = bandwidth;
+ sub_type_hdr->maximum_bandwidth_mbs = bandwidth;
} else {
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
sub_type_hdr->minimum_bandwidth_mbs =
@@ -2029,7 +2036,8 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
sub_type_hdr->proximity_domain_from = proximity_domain;
#ifdef CONFIG_ACPI_NUMA
- if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE)
+ if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE &&
+ num_possible_nodes() > 1)
kfd_find_numa_node_in_srat(kdev);
#endif
#ifdef CONFIG_NUMA
@@ -2044,12 +2052,14 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
}
static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
- struct kfd_dev *kdev,
- struct kfd_dev *peer_kdev,
+ struct kfd_node *kdev,
+ struct kfd_node *peer_kdev,
struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain_from,
uint32_t proximity_domain_to)
{
+ bool use_ta_info = kdev->kfd->num_nodes == 1;
+
*avail_size -= sizeof(struct crat_subtype_iolink);
if (*avail_size < 0)
return -ENOMEM;
@@ -2064,12 +2074,25 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->proximity_domain_from = proximity_domain_from;
sub_type_hdr->proximity_domain_to = proximity_domain_to;
- sub_type_hdr->num_hops_xgmi =
- amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
- sub_type_hdr->maximum_bandwidth_mbs =
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false);
- sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
+
+ if (use_ta_info) {
+ sub_type_hdr->weight_xgmi = KFD_CRAT_XGMI_WEIGHT *
+ amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
+ sub_type_hdr->maximum_bandwidth_mbs =
+ amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev,
+ peer_kdev->adev, false);
+ sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
+ amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
+ } else {
+ bool is_single_hop = kdev->kfd == peer_kdev->kfd;
+ int weight = is_single_hop ? KFD_CRAT_INTRA_SOCKET_WEIGHT :
+ (2 * KFD_CRAT_INTRA_SOCKET_WEIGHT) + KFD_CRAT_XGMI_WEIGHT;
+ int mem_bw = 819200;
+
+ sub_type_hdr->weight_xgmi = weight;
+ sub_type_hdr->maximum_bandwidth_mbs = is_single_hop ? mem_bw : 0;
+ sub_type_hdr->minimum_bandwidth_mbs = is_single_hop ? mem_bw : 0;
+ }
return 0;
}
@@ -2081,7 +2104,7 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
* [OUT] actual size of data filled in crat_image
*/
static int kfd_create_vcrat_image_gpu(void *pcrat_image,
- size_t *size, struct kfd_dev *kdev,
+ size_t *size, struct kfd_node *kdev,
uint32_t proximity_domain)
{
struct crat_header *crat_table = (struct crat_header *)pcrat_image;
@@ -2153,7 +2176,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
/* Check if this node supports IOMMU. During parsing this flag will
* translate to HSA_CAP_ATS_PRESENT
*/
- if (!kfd_iommu_check_device(kdev))
+ if (!kfd_iommu_check_device(kdev->kfd))
cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
crat_table->length += sub_type_hdr->length;
@@ -2216,12 +2239,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
* (from other GPU to this GPU) will be added
* in kfd_parse_subtype_iolink.
*/
- if (kdev->hive_id) {
+ if (kdev->kfd->hive_id) {
for (nid = 0; nid < proximity_domain; ++nid) {
peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid);
if (!peer_dev->gpu)
continue;
- if (peer_dev->gpu->hive_id != kdev->hive_id)
+ if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id)
continue;
sub_type_hdr = (typeof(sub_type_hdr))(
(char *)sub_type_hdr +
@@ -2255,12 +2278,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
* (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
* -- this option is not currently implemented.
* The assumption is that all AMD APUs will have CRAT
- * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
+ * @kdev: Valid kfd_node required if flags contain COMPUTE_UNIT_GPU
*
* Return 0 if successful else return -ve value
*/
int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
- int flags, struct kfd_dev *kdev,
+ int flags, struct kfd_node *kdev,
uint32_t proximity_domain)
{
void *pcrat_image = NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
index 8d1e8ba58dee..fc719389b5d6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -275,7 +275,7 @@ struct crat_subtype_iolink {
uint32_t maximum_bandwidth_mbs;
uint32_t recommended_transfer_size;
uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH - 1];
- uint8_t num_hops_xgmi;
+ uint8_t weight_xgmi;
};
/*
@@ -293,7 +293,7 @@ struct crat_subtype_generic {
#pragma pack()
-struct kfd_dev;
+struct kfd_node;
/* Static table to describe GPU Cache information */
struct kfd_gpu_cache_info {
@@ -305,14 +305,14 @@ struct kfd_gpu_cache_info {
*/
uint32_t num_cu_shared;
};
-int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info);
+int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info);
int kfd_create_crat_image_acpi(void **crat_image, size_t *size);
void kfd_destroy_crat_image(void *crat_image);
int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
uint32_t proximity_domain);
int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
- int flags, struct kfd_dev *kdev,
+ int flags, struct kfd_node *kdev,
uint32_t proximity_domain);
#endif /* KFD_CRAT_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
new file mode 100644
index 000000000000..fff3ccc04fa9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -0,0 +1,1118 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kfd_debug.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_topology.h"
+#include <linux/file.h>
+#include <uapi/linux/kfd_ioctl.h>
+
+#define MAX_WATCH_ADDRESSES 4
+
+int kfd_dbg_ev_query_debug_event(struct kfd_process *process,
+ unsigned int *queue_id,
+ unsigned int *gpu_id,
+ uint64_t exception_clear_mask,
+ uint64_t *event_status)
+{
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+ int i;
+
+ if (!(process && process->debug_trap_enabled))
+ return -ENODATA;
+
+ mutex_lock(&process->event_mutex);
+ *event_status = 0;
+ *queue_id = 0;
+ *gpu_id = 0;
+
+ /* find and report queue events */
+ pqm = &process->pqm;
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ uint64_t tmp = process->exception_enable_mask;
+
+ if (!pqn->q)
+ continue;
+
+ tmp &= pqn->q->properties.exception_status;
+
+ if (!tmp)
+ continue;
+
+ *event_status = pqn->q->properties.exception_status;
+ *queue_id = pqn->q->properties.queue_id;
+ *gpu_id = pqn->q->device->id;
+ pqn->q->properties.exception_status &= ~exception_clear_mask;
+ goto out;
+ }
+
+ /* find and report device events */
+ for (i = 0; i < process->n_pdds; i++) {
+ struct kfd_process_device *pdd = process->pdds[i];
+ uint64_t tmp = process->exception_enable_mask
+ & pdd->exception_status;
+
+ if (!tmp)
+ continue;
+
+ *event_status = pdd->exception_status;
+ *gpu_id = pdd->dev->id;
+ pdd->exception_status &= ~exception_clear_mask;
+ goto out;
+ }
+
+ /* report process events */
+ if (process->exception_enable_mask & process->exception_status) {
+ *event_status = process->exception_status;
+ process->exception_status &= ~exception_clear_mask;
+ }
+
+out:
+ mutex_unlock(&process->event_mutex);
+ return *event_status ? 0 : -EAGAIN;
+}
+
+void debug_event_write_work_handler(struct work_struct *work)
+{
+ struct kfd_process *process;
+
+ static const char write_data = '.';
+ loff_t pos = 0;
+
+ process = container_of(work,
+ struct kfd_process,
+ debug_event_workarea);
+
+ kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
+}
+
+/* update process/device/queue exception status, write to descriptor
+ * only if exception_status is enabled.
+ */
+bool kfd_dbg_ev_raise(uint64_t event_mask,
+ struct kfd_process *process, struct kfd_node *dev,
+ unsigned int source_id, bool use_worker,
+ void *exception_data, size_t exception_data_size)
+{
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+ int i;
+ static const char write_data = '.';
+ loff_t pos = 0;
+ bool is_subscribed = true;
+
+ if (!(process && process->debug_trap_enabled))
+ return false;
+
+ mutex_lock(&process->event_mutex);
+
+ if (event_mask & KFD_EC_MASK_DEVICE) {
+ for (i = 0; i < process->n_pdds; i++) {
+ struct kfd_process_device *pdd = process->pdds[i];
+
+ if (pdd->dev != dev)
+ continue;
+
+ pdd->exception_status |= event_mask & KFD_EC_MASK_DEVICE;
+
+ if (event_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
+ if (!pdd->vm_fault_exc_data) {
+ pdd->vm_fault_exc_data = kmemdup(
+ exception_data,
+ exception_data_size,
+ GFP_KERNEL);
+ if (!pdd->vm_fault_exc_data)
+ pr_debug("Failed to allocate exception data memory");
+ } else {
+ pr_debug("Debugger exception data not saved\n");
+ print_hex_dump_bytes("exception data: ",
+ DUMP_PREFIX_OFFSET,
+ exception_data,
+ exception_data_size);
+ }
+ }
+ break;
+ }
+ } else if (event_mask & KFD_EC_MASK_PROCESS) {
+ process->exception_status |= event_mask & KFD_EC_MASK_PROCESS;
+ } else {
+ pqm = &process->pqm;
+ list_for_each_entry(pqn, &pqm->queues,
+ process_queue_list) {
+ int target_id;
+
+ if (!pqn->q)
+ continue;
+
+ target_id = event_mask & KFD_EC_MASK(EC_QUEUE_NEW) ?
+ pqn->q->properties.queue_id :
+ pqn->q->doorbell_id;
+
+ if (pqn->q->device != dev || target_id != source_id)
+ continue;
+
+ pqn->q->properties.exception_status |= event_mask;
+ break;
+ }
+ }
+
+ if (process->exception_enable_mask & event_mask) {
+ if (use_worker)
+ schedule_work(&process->debug_event_workarea);
+ else
+ kernel_write(process->dbg_ev_file,
+ &write_data,
+ 1,
+ &pos);
+ } else {
+ is_subscribed = false;
+ }
+
+ mutex_unlock(&process->event_mutex);
+
+ return is_subscribed;
+}
+
+/* set pending event queue entry from ring entry */
+bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
+ unsigned int pasid,
+ uint32_t doorbell_id,
+ uint64_t trap_mask,
+ void *exception_data,
+ size_t exception_data_size)
+{
+ struct kfd_process *p;
+ bool signaled_to_debugger_or_runtime = false;
+
+ p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return false;
+
+ if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true,
+ exception_data, exception_data_size)) {
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+
+ if (!!(trap_mask & KFD_EC_MASK_QUEUE) &&
+ p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) {
+ mutex_lock(&p->mutex);
+
+ pqm = &p->pqm;
+ list_for_each_entry(pqn, &pqm->queues,
+ process_queue_list) {
+
+ if (!(pqn->q && pqn->q->device == dev &&
+ pqn->q->doorbell_id == doorbell_id))
+ continue;
+
+ kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id,
+ trap_mask);
+
+ signaled_to_debugger_or_runtime = true;
+
+ break;
+ }
+
+ mutex_unlock(&p->mutex);
+ } else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
+ kfd_dqm_evict_pasid(dev->dqm, p->pasid);
+ kfd_signal_vm_fault_event(dev, p->pasid, NULL,
+ exception_data);
+
+ signaled_to_debugger_or_runtime = true;
+ }
+ } else {
+ signaled_to_debugger_or_runtime = true;
+ }
+
+ kfd_unref_process(p);
+
+ return signaled_to_debugger_or_runtime;
+}
+
+int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
+ unsigned int dev_id,
+ unsigned int queue_id,
+ uint64_t error_reason)
+{
+ if (error_reason & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
+ struct kfd_process_device *pdd = NULL;
+ struct kfd_hsa_memory_exception_data *data;
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->dev->id == dev_id) {
+ pdd = p->pdds[i];
+ break;
+ }
+ }
+
+ if (!pdd)
+ return -ENODEV;
+
+ data = (struct kfd_hsa_memory_exception_data *)
+ pdd->vm_fault_exc_data;
+
+ kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid);
+ kfd_signal_vm_fault_event(pdd->dev, p->pasid, NULL, data);
+ error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION);
+ }
+
+ if (error_reason & (KFD_EC_MASK(EC_PROCESS_RUNTIME))) {
+ /*
+ * block should only happen after the debugger receives runtime
+ * enable notice.
+ */
+ up(&p->runtime_enable_sema);
+ error_reason &= ~KFD_EC_MASK(EC_PROCESS_RUNTIME);
+ }
+
+ if (error_reason)
+ return kfd_send_exception_to_runtime(p, queue_id, error_reason);
+
+ return 0;
+}
+
+static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)
+{
+ struct mqd_update_info minfo = {0};
+ int err;
+
+ if (!q)
+ return 0;
+
+ if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) ||
+ KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0))
+ return 0;
+
+ if (enable && q->properties.is_user_cu_masked)
+ return -EBUSY;
+
+ minfo.update_flag = enable ? UPDATE_FLAG_DBG_WA_ENABLE : UPDATE_FLAG_DBG_WA_DISABLE;
+
+ q->properties.is_dbg_wa = enable;
+ err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo);
+ if (err)
+ q->properties.is_dbg_wa = false;
+
+ return err;
+}
+
+static int kfd_dbg_set_workaround(struct kfd_process *target, bool enable)
+{
+ struct process_queue_manager *pqm = &target->pqm;
+ struct process_queue_node *pqn;
+ int r = 0;
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ r = kfd_dbg_set_queue_workaround(pqn->q, enable);
+ if (enable && r)
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list)
+ kfd_dbg_set_queue_workaround(pqn->q, false);
+
+ if (enable)
+ target->runtime_info.runtime_state = r == -EBUSY ?
+ DEBUG_RUNTIME_STATE_ENABLED_BUSY :
+ DEBUG_RUNTIME_STATE_ENABLED_ERROR;
+
+ return r;
+}
+
+int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd)
+{
+ uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
+ uint32_t flags = pdd->process->dbg_flags;
+ bool sq_trap_en = !!spi_dbg_cntl;
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
+ return 0;
+
+ return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
+ pdd->watch_points, flags, sq_trap_en);
+}
+
+#define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1
+static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_id)
+{
+ int i;
+
+ *watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID;
+
+ spin_lock(&pdd->dev->kfd->watch_points_lock);
+
+ for (i = 0; i < MAX_WATCH_ADDRESSES; i++) {
+ /* device watchpoint in use so skip */
+ if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1)
+ continue;
+
+ pdd->alloc_watch_ids |= 0x1 << i;
+ pdd->dev->kfd->alloc_watch_ids |= 0x1 << i;
+ *watch_id = i;
+ spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ return 0;
+ }
+
+ spin_unlock(&pdd->dev->kfd->watch_points_lock);
+
+ return -ENOMEM;
+}
+
+static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
+{
+ spin_lock(&pdd->dev->kfd->watch_points_lock);
+
+ /* process owns device watch point so safe to clear */
+ if ((pdd->alloc_watch_ids >> watch_id) & 0x1) {
+ pdd->alloc_watch_ids &= ~(0x1 << watch_id);
+ pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id);
+ }
+
+ spin_unlock(&pdd->dev->kfd->watch_points_lock);
+}
+
+static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
+{
+ bool owns_watch_id = false;
+
+ spin_lock(&pdd->dev->kfd->watch_points_lock);
+ owns_watch_id = watch_id < MAX_WATCH_ADDRESSES &&
+ ((pdd->alloc_watch_ids >> watch_id) & 0x1);
+
+ spin_unlock(&pdd->dev->kfd->watch_points_lock);
+
+ return owns_watch_id;
+}
+
+int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd,
+ uint32_t watch_id)
+{
+ int r;
+
+ if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id))
+ return -EINVAL;
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes) {
+ r = debug_lock_and_unmap(pdd->dev->dqm);
+ if (r)
+ return r;
+ }
+
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->watch_points[watch_id] = pdd->dev->kfd2kgd->clear_address_watch(
+ pdd->dev->adev,
+ watch_id);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_map_and_unlock(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd);
+
+ kfd_dbg_clear_dev_watch_id(pdd, watch_id);
+
+ return r;
+}
+
+int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t *watch_id,
+ uint32_t watch_mode)
+{
+ int r = kfd_dbg_get_dev_watch_id(pdd, watch_id);
+
+ if (r)
+ return r;
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes) {
+ r = debug_lock_and_unmap(pdd->dev->dqm);
+ if (r) {
+ kfd_dbg_clear_dev_watch_id(pdd, *watch_id);
+ return r;
+ }
+ }
+
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch(
+ pdd->dev->adev,
+ watch_address,
+ watch_address_mask,
+ *watch_id,
+ watch_mode,
+ pdd->dev->vm_info.last_vmid_kfd);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_map_and_unlock(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd);
+
+ /* HWS is broken so no point in HW rollback but release the watchpoint anyways */
+ if (r)
+ kfd_dbg_clear_dev_watch_id(pdd, *watch_id);
+
+ return 0;
+}
+
+static void kfd_dbg_clear_process_address_watch(struct kfd_process *target)
+{
+ int i, j;
+
+ for (i = 0; i < target->n_pdds; i++)
+ for (j = 0; j < MAX_WATCH_ADDRESSES; j++)
+ kfd_dbg_trap_clear_dev_address_watch(target->pdds[i], j);
+}
+
+int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags)
+{
+ uint32_t prev_flags = target->dbg_flags;
+ int i, r = 0, rewind_count = 0;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ if (!kfd_dbg_is_per_vmid_supported(target->pdds[i]->dev) &&
+ (*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) {
+ *flags = prev_flags;
+ return -EACCES;
+ }
+ }
+
+ target->dbg_flags = *flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP;
+ *flags = prev_flags;
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
+ continue;
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_refresh_runlist(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd);
+
+ if (r) {
+ target->dbg_flags = prev_flags;
+ break;
+ }
+
+ rewind_count++;
+ }
+
+ /* Rewind flags */
+ if (r) {
+ target->dbg_flags = prev_flags;
+
+ for (i = 0; i < rewind_count; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
+ continue;
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ debug_refresh_runlist(pdd->dev->dqm);
+ else
+ kfd_dbg_set_mes_debug_mode(pdd);
+ }
+ }
+
+ return r;
+}
+
+/* kfd_dbg_trap_deactivate:
+ * target: target process
+ * unwind: If this is unwinding a failed kfd_dbg_trap_enable()
+ * unwind_count:
+ * If unwind == true, how far down the pdd list we need
+ * to unwind
+ * else: ignored
+ */
+void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count)
+{
+ int i;
+
+ if (!unwind) {
+ uint32_t flags = 0;
+ int resume_count = resume_queues(target, 0, NULL);
+
+ if (resume_count)
+ pr_debug("Resumed %d queues\n", resume_count);
+
+ cancel_work_sync(&target->debug_event_workarea);
+ kfd_dbg_clear_process_address_watch(target);
+ kfd_dbg_trap_set_wave_launch_mode(target, 0);
+
+ kfd_dbg_trap_set_flags(target, &flags);
+ }
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ /* If this is an unwind, and we have unwound the required
+ * enable calls on the pdd list, we need to stop now
+ * otherwise we may mess up another debugger session.
+ */
+ if (unwind && i == unwind_count)
+ break;
+
+ kfd_process_set_trap_debug_flag(&pdd->qpd, false);
+
+ /* GFX off is already disabled by debug activate if not RLC restore supported. */
+ if (kfd_dbg_is_rlc_restore_supported(pdd->dev))
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->spi_dbg_override =
+ pdd->dev->kfd2kgd->disable_debug_trap(
+ pdd->dev->adev,
+ target->runtime_info.ttmp_setup,
+ pdd->dev->vm_info.last_vmid_kfd);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev) &&
+ release_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd))
+ pr_err("Failed to release debug vmid on [%i]\n", pdd->dev->id);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ debug_refresh_runlist(pdd->dev->dqm);
+ else
+ kfd_dbg_set_mes_debug_mode(pdd);
+ }
+
+ kfd_dbg_set_workaround(target, false);
+}
+
+static void kfd_dbg_clean_exception_status(struct kfd_process *target)
+{
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+ int i;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ kfd_process_drain_interrupts(pdd);
+
+ pdd->exception_status = 0;
+ }
+
+ pqm = &target->pqm;
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (!pqn->q)
+ continue;
+
+ pqn->q->properties.exception_status = 0;
+ }
+
+ target->exception_status = 0;
+}
+
+int kfd_dbg_trap_disable(struct kfd_process *target)
+{
+ if (!target->debug_trap_enabled)
+ return 0;
+
+ /*
+ * Defer deactivation to runtime if runtime not enabled otherwise reset
+ * attached running target runtime state to enable for re-attach.
+ */
+ if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED)
+ kfd_dbg_trap_deactivate(target, false, 0);
+ else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
+ target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
+
+ fput(target->dbg_ev_file);
+ target->dbg_ev_file = NULL;
+
+ if (target->debugger_process) {
+ atomic_dec(&target->debugger_process->debugged_process_count);
+ target->debugger_process = NULL;
+ }
+
+ target->debug_trap_enabled = false;
+ kfd_dbg_clean_exception_status(target);
+ kfd_unref_process(target);
+
+ return 0;
+}
+
+int kfd_dbg_trap_activate(struct kfd_process *target)
+{
+ int i, r = 0;
+
+ r = kfd_dbg_set_workaround(target, true);
+ if (r)
+ return r;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) {
+ r = reserve_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd);
+
+ if (r) {
+ target->runtime_info.runtime_state = (r == -EBUSY) ?
+ DEBUG_RUNTIME_STATE_ENABLED_BUSY :
+ DEBUG_RUNTIME_STATE_ENABLED_ERROR;
+
+ goto unwind_err;
+ }
+ }
+
+ /* Disable GFX OFF to prevent garbage read/writes to debug registers.
+ * If RLC restore of debug registers is not supported and runtime enable
+ * hasn't done so already on ttmp setup request, restore the trap config registers.
+ *
+ * If RLC restore of debug registers is not supported, keep gfx off disabled for
+ * the debug session.
+ */
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ if (!(kfd_dbg_is_rlc_restore_supported(pdd->dev) ||
+ target->runtime_info.ttmp_setup))
+ pdd->dev->kfd2kgd->enable_debug_trap(pdd->dev->adev, true,
+ pdd->dev->vm_info.last_vmid_kfd);
+
+ pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
+ pdd->dev->adev,
+ false,
+ pdd->dev->vm_info.last_vmid_kfd);
+
+ if (kfd_dbg_is_rlc_restore_supported(pdd->dev))
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ /*
+ * Setting the debug flag in the trap handler requires that the TMA has been
+ * allocated, which occurs during CWSR initialization.
+ * In the event that CWSR has not been initialized at this point, setting the
+ * flag will be called again during CWSR initialization if the target process
+ * is still debug enabled.
+ */
+ kfd_process_set_trap_debug_flag(&pdd->qpd, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_refresh_runlist(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd);
+
+ if (r) {
+ target->runtime_info.runtime_state =
+ DEBUG_RUNTIME_STATE_ENABLED_ERROR;
+ goto unwind_err;
+ }
+ }
+
+ return 0;
+
+unwind_err:
+ /* Enabling debug failed, we need to disable on
+ * all GPUs so the enable is all or nothing.
+ */
+ kfd_dbg_trap_deactivate(target, true, i);
+ return r;
+}
+
+int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
+ void __user *runtime_info, uint32_t *runtime_size)
+{
+ struct file *f;
+ uint32_t copy_size;
+ int i, r = 0;
+
+ if (target->debug_trap_enabled)
+ return -EALREADY;
+
+ /* Enable pre-checks */
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ if (!KFD_IS_SOC15(pdd->dev))
+ return -ENODEV;
+
+ if (!kfd_dbg_has_gws_support(pdd->dev) && pdd->qpd.num_gws)
+ return -EBUSY;
+ }
+
+ copy_size = min((size_t)(*runtime_size), sizeof(target->runtime_info));
+
+ f = fget(fd);
+ if (!f) {
+ pr_err("Failed to get file for (%i)\n", fd);
+ return -EBADF;
+ }
+
+ target->dbg_ev_file = f;
+
+ /* defer activation to runtime if not runtime enabled */
+ if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED)
+ kfd_dbg_trap_activate(target);
+
+ /* We already hold the process reference but hold another one for the
+ * debug session.
+ */
+ kref_get(&target->ref);
+ target->debug_trap_enabled = true;
+
+ if (target->debugger_process)
+ atomic_inc(&target->debugger_process->debugged_process_count);
+
+ if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) {
+ kfd_dbg_trap_deactivate(target, false, 0);
+ r = -EFAULT;
+ }
+
+ *runtime_size = sizeof(target->runtime_info);
+
+ return r;
+}
+
+static int kfd_dbg_validate_trap_override_request(struct kfd_process *p,
+ uint32_t trap_override,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_supported)
+{
+ int i = 0;
+
+ *trap_mask_supported = 0xffffffff;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ int err = pdd->dev->kfd2kgd->validate_trap_override_request(
+ pdd->dev->adev,
+ trap_override,
+ trap_mask_supported);
+
+ if (err)
+ return err;
+ }
+
+ if (trap_mask_request & ~*trap_mask_supported)
+ return -EACCES;
+
+ return 0;
+}
+
+int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t *trap_mask_supported)
+{
+ int r = 0, i;
+
+ r = kfd_dbg_validate_trap_override_request(target,
+ trap_override,
+ trap_mask_request,
+ trap_mask_supported);
+
+ if (r)
+ return r;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->spi_dbg_override = pdd->dev->kfd2kgd->set_wave_launch_trap_override(
+ pdd->dev->adev,
+ pdd->dev->vm_info.last_vmid_kfd,
+ trap_override,
+ trap_mask_bits,
+ trap_mask_request,
+ trap_mask_prev,
+ pdd->spi_dbg_override);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_refresh_runlist(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd);
+
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target,
+ uint8_t wave_launch_mode)
+{
+ int r = 0, i;
+
+ if (wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL &&
+ wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT &&
+ wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG)
+ return -EINVAL;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->spi_dbg_launch_mode = pdd->dev->kfd2kgd->set_wave_launch_mode(
+ pdd->dev->adev,
+ wave_launch_mode,
+ pdd->dev->vm_info.last_vmid_kfd);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_refresh_runlist(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd);
+
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int kfd_dbg_trap_query_exception_info(struct kfd_process *target,
+ uint32_t source_id,
+ uint32_t exception_code,
+ bool clear_exception,
+ void __user *info,
+ uint32_t *info_size)
+{
+ bool found = false;
+ int r = 0;
+ uint32_t copy_size, actual_info_size = 0;
+ uint64_t *exception_status_ptr = NULL;
+
+ if (!target)
+ return -EINVAL;
+
+ if (!info || !info_size)
+ return -EINVAL;
+
+ mutex_lock(&target->event_mutex);
+
+ if (KFD_DBG_EC_TYPE_IS_QUEUE(exception_code)) {
+ /* Per queue exceptions */
+ struct queue *queue = NULL;
+ int i;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ list_for_each_entry(queue, &qpd->queues_list, list) {
+ if (!found && queue->properties.queue_id == source_id) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+
+ if (!found) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!(queue->properties.exception_status & KFD_EC_MASK(exception_code))) {
+ r = -ENODATA;
+ goto out;
+ }
+ exception_status_ptr = &queue->properties.exception_status;
+ } else if (KFD_DBG_EC_TYPE_IS_DEVICE(exception_code)) {
+ /* Per device exceptions */
+ struct kfd_process_device *pdd = NULL;
+ int i;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ pdd = target->pdds[i];
+ if (pdd->dev->id == source_id) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!(pdd->exception_status & KFD_EC_MASK(exception_code))) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ if (exception_code == EC_DEVICE_MEMORY_VIOLATION) {
+ copy_size = min((size_t)(*info_size), pdd->vm_fault_exc_data_size);
+
+ if (copy_to_user(info, pdd->vm_fault_exc_data, copy_size)) {
+ r = -EFAULT;
+ goto out;
+ }
+ actual_info_size = pdd->vm_fault_exc_data_size;
+ if (clear_exception) {
+ kfree(pdd->vm_fault_exc_data);
+ pdd->vm_fault_exc_data = NULL;
+ pdd->vm_fault_exc_data_size = 0;
+ }
+ }
+ exception_status_ptr = &pdd->exception_status;
+ } else if (KFD_DBG_EC_TYPE_IS_PROCESS(exception_code)) {
+ /* Per process exceptions */
+ if (!(target->exception_status & KFD_EC_MASK(exception_code))) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ if (exception_code == EC_PROCESS_RUNTIME) {
+ copy_size = min((size_t)(*info_size), sizeof(target->runtime_info));
+
+ if (copy_to_user(info, (void *)&target->runtime_info, copy_size)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ actual_info_size = sizeof(target->runtime_info);
+ }
+
+ exception_status_ptr = &target->exception_status;
+ } else {
+ pr_debug("Bad exception type [%i]\n", exception_code);
+ r = -EINVAL;
+ goto out;
+ }
+
+ *info_size = actual_info_size;
+ if (clear_exception)
+ *exception_status_ptr &= ~KFD_EC_MASK(exception_code);
+out:
+ mutex_unlock(&target->event_mutex);
+ return r;
+}
+
+int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
+ uint64_t exception_clear_mask,
+ void __user *user_info,
+ uint32_t *number_of_device_infos,
+ uint32_t *entry_size)
+{
+ struct kfd_dbg_device_info_entry device_info;
+ uint32_t tmp_entry_size = *entry_size, tmp_num_devices;
+ int i, r = 0;
+
+ if (!(target && user_info && number_of_device_infos && entry_size))
+ return -EINVAL;
+
+ tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds);
+ *number_of_device_infos = target->n_pdds;
+ *entry_size = min_t(size_t, *entry_size, sizeof(device_info));
+
+ if (!tmp_num_devices)
+ return 0;
+
+ memset(&device_info, 0, sizeof(device_info));
+
+ mutex_lock(&target->event_mutex);
+
+ /* Run over all pdd of the process */
+ for (i = 0; i < tmp_num_devices; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+ struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id);
+
+ device_info.gpu_id = pdd->dev->id;
+ device_info.exception_status = pdd->exception_status;
+ device_info.lds_base = pdd->lds_base;
+ device_info.lds_limit = pdd->lds_limit;
+ device_info.scratch_base = pdd->scratch_base;
+ device_info.scratch_limit = pdd->scratch_limit;
+ device_info.gpuvm_base = pdd->gpuvm_base;
+ device_info.gpuvm_limit = pdd->gpuvm_limit;
+ device_info.location_id = topo_dev->node_props.location_id;
+ device_info.vendor_id = topo_dev->node_props.vendor_id;
+ device_info.device_id = topo_dev->node_props.device_id;
+ device_info.revision_id = pdd->dev->adev->pdev->revision;
+ device_info.subsystem_vendor_id = pdd->dev->adev->pdev->subsystem_vendor;
+ device_info.subsystem_device_id = pdd->dev->adev->pdev->subsystem_device;
+ device_info.fw_version = pdd->dev->kfd->mec_fw_version;
+ device_info.gfx_target_version =
+ topo_dev->node_props.gfx_target_version;
+ device_info.simd_count = topo_dev->node_props.simd_count;
+ device_info.max_waves_per_simd =
+ topo_dev->node_props.max_waves_per_simd;
+ device_info.array_count = topo_dev->node_props.array_count;
+ device_info.simd_arrays_per_engine =
+ topo_dev->node_props.simd_arrays_per_engine;
+ device_info.num_xcc = NUM_XCC(pdd->dev->xcc_mask);
+ device_info.capability = topo_dev->node_props.capability;
+ device_info.debug_prop = topo_dev->node_props.debug_prop;
+
+ if (exception_clear_mask)
+ pdd->exception_status &= ~exception_clear_mask;
+
+ if (copy_to_user(user_info, &device_info, *entry_size)) {
+ r = -EFAULT;
+ break;
+ }
+
+ user_info += tmp_entry_size;
+ }
+
+ mutex_unlock(&target->event_mutex);
+
+ return r;
+}
+
+void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target,
+ uint64_t exception_set_mask)
+{
+ uint64_t found_mask = 0;
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+ static const char write_data = '.';
+ loff_t pos = 0;
+ int i;
+
+ mutex_lock(&target->event_mutex);
+
+ found_mask |= target->exception_status;
+
+ pqm = &target->pqm;
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (!pqn->q)
+ continue;
+
+ found_mask |= pqn->q->properties.exception_status;
+ }
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ found_mask |= pdd->exception_status;
+ }
+
+ if (exception_set_mask & found_mask)
+ kernel_write(target->dbg_ev_file, &write_data, 1, &pos);
+
+ target->exception_enable_mask = exception_set_mask;
+
+ mutex_unlock(&target->event_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
new file mode 100644
index 000000000000..a289e59ceb79
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_DEBUG_EVENTS_H_INCLUDED
+#define KFD_DEBUG_EVENTS_H_INCLUDED
+
+#include "kfd_priv.h"
+
+void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count);
+int kfd_dbg_trap_activate(struct kfd_process *target);
+int kfd_dbg_ev_query_debug_event(struct kfd_process *process,
+ unsigned int *queue_id,
+ unsigned int *gpu_id,
+ uint64_t exception_clear_mask,
+ uint64_t *event_status);
+bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
+ unsigned int pasid,
+ uint32_t doorbell_id,
+ uint64_t trap_mask,
+ void *exception_data,
+ size_t exception_data_size);
+bool kfd_dbg_ev_raise(uint64_t event_mask,
+ struct kfd_process *process, struct kfd_node *dev,
+ unsigned int source_id, bool use_worker,
+ void *exception_data,
+ size_t exception_data_size);
+int kfd_dbg_trap_disable(struct kfd_process *target);
+int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
+ void __user *runtime_info,
+ uint32_t *runtime_info_size);
+int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t *trap_mask_supported);
+int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target,
+ uint8_t wave_launch_mode);
+int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd,
+ uint32_t watch_id);
+int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t *watch_id,
+ uint32_t watch_mode);
+int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags);
+int kfd_dbg_trap_query_exception_info(struct kfd_process *target,
+ uint32_t source_id,
+ uint32_t exception_code,
+ bool clear_exception,
+ void __user *info,
+ uint32_t *info_size);
+int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
+ unsigned int dev_id,
+ unsigned int queue_id,
+ uint64_t error_reason);
+
+static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
+{
+ return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+ KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0);
+}
+
+void debug_event_write_work_handler(struct work_struct *work);
+int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
+ uint64_t exception_clear_mask,
+ void __user *user_info,
+ uint32_t *number_of_device_infos,
+ uint32_t *entry_size);
+
+void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target,
+ uint64_t exception_set_mask);
+/*
+ * If GFX off is enabled, chips that do not support RLC restore for the debug
+ * registers will disable GFX off temporarily for the entire debug session.
+ * See disable_on_trap_action_entry and enable_on_trap_action_exit for details.
+ */
+static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev)
+{
+ return !(KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 10) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1));
+}
+
+static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
+{
+ if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1)
+ && dev->kfd->mec2_fw_version < 0x81b6) ||
+ (KFD_GC_VERSION(dev) >= IP_VERSION(9, 1, 0)
+ && KFD_GC_VERSION(dev) <= IP_VERSION(9, 2, 2)
+ && dev->kfd->mec2_fw_version < 0x1b6) ||
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0)
+ && dev->kfd->mec2_fw_version < 0x1b6) ||
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1)
+ && dev->kfd->mec2_fw_version < 0x30) ||
+ (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
+ KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0)))
+ return false;
+
+ /* Assume debugging and cooperative launch supported otherwise. */
+ return true;
+}
+
+int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd);
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index ad5a40a685ac..4a5a0a4e00f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -43,7 +43,7 @@ static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
const char __user *user_buf, size_t size, loff_t *ppos)
{
- struct kfd_dev *dev;
+ struct kfd_node *dev;
char tmp[16];
uint32_t gpu_id;
int ret = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 00f528eb9812..9d4abfd8b55e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -32,8 +32,10 @@
#include "kfd_iommu.h"
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
+#include "kfd_svm.h"
#include "kfd_migrate.h"
#include "amdgpu.h"
+#include "amdgpu_xcp.h"
#define MQD_SIZE_ALIGNED 768
@@ -42,7 +44,7 @@
* once locked, kfd driver will stop any further GPU execution.
* create process (open) will return -EAGAIN.
*/
-static atomic_t kfd_locked = ATOMIC_INIT(0);
+static int kfd_locked;
#ifdef CONFIG_DRM_AMDGPU_CIK
extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
@@ -51,6 +53,7 @@ extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
extern const struct kfd2kgd_calls arcturus_kfd2kgd;
extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
+extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
@@ -60,7 +63,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume_iommu(struct kfd_dev *kfd);
-static int kfd_resume(struct kfd_dev *kfd);
+static int kfd_resume(struct kfd_node *kfd);
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
{
@@ -81,6 +84,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(4, 2, 0):/* VEGA20 */
case IP_VERSION(4, 2, 2):/* ARCTURUS */
case IP_VERSION(4, 4, 0):/* ALDEBARAN */
+ case IP_VERSION(4, 4, 2):
case IP_VERSION(5, 0, 0):/* NAVI10 */
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
case IP_VERSION(5, 0, 2):/* NAVI14 */
@@ -102,20 +106,19 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
kfd->device_info.num_sdma_queues_per_engine = 8;
}
+ bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
+
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
- kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
- break;
- case IP_VERSION(6, 0, 1):
- /* Reserve 1 for paging and 1 for gfx */
- kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
- /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
- kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
+ bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0,
+ kfd->adev->sdma.num_instances *
+ kfd->device_info.num_reserved_sdma_queues_per_engine);
break;
default:
break;
@@ -135,6 +138,9 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(9, 4, 0): /* VEGA20 */
case IP_VERSION(9, 4, 1): /* ARCTURUS */
case IP_VERSION(9, 4, 2): /* ALDEBARAN */
+ case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
+ break;
case IP_VERSION(10, 3, 1): /* VANGOGH */
case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
@@ -148,7 +154,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
- kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_v10;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -327,8 +333,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &aldebaran_kfd2kgd;
break;
case IP_VERSION(9, 4, 3):
- gfx_target_version = 90400;
- f2g = &aldebaran_kfd2kgd;
+ gfx_target_version = adev->rev_id >= 1 ? 90402
+ : adev->flags & AMD_IS_APU ? 90400
+ : 90401;
+ f2g = &gc_9_4_3_kfd2kgd;
break;
/* Navi10 */
case IP_VERSION(10, 1, 10):
@@ -406,8 +414,15 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 3):
- /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
- gfx_target_version = 110001;
+ if ((adev->pdev->device == 0x7460 &&
+ adev->pdev->revision == 0x00) ||
+ (adev->pdev->device == 0x7461 &&
+ adev->pdev->revision == 0x00))
+ /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
+ gfx_target_version = 110005;
+ else
+ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+ gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd;
break;
default:
@@ -440,8 +455,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
memset(&kfd->doorbell_available_index, 0,
sizeof(kfd->doorbell_available_index));
- atomic_set(&kfd->sram_ecc_flag, 0);
-
ida_init(&kfd->doorbell_ida);
return kfd;
@@ -488,41 +501,112 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
}
-static int kfd_gws_init(struct kfd_dev *kfd)
+static int kfd_gws_init(struct kfd_node *node)
{
int ret = 0;
+ struct kfd_dev *kfd = node->kfd;
- if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
+ if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
return 0;
- if (hws_gws_support || (KFD_IS_SOC15(kfd) &&
- ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1)
+ if (hws_gws_support || (KFD_IS_SOC15(node) &&
+ ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
&& kfd->mec2_fw_version >= 0x81b3) ||
- (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0)
+ (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
&& kfd->mec2_fw_version >= 0x1b3) ||
- (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
+ (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
&& kfd->mec2_fw_version >= 0x30) ||
- (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
+ (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
&& kfd->mec2_fw_version >= 0x28) ||
- (KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0)
- && KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)
+ (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
+ && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
&& kfd->mec2_fw_version >= 0x6b))))
- ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
- kfd->adev->gds.gws_size, &kfd->gws);
+ ret = amdgpu_amdkfd_alloc_gws(node->adev,
+ node->adev->gds.gws_size, &node->gws);
return ret;
}
-static void kfd_smi_init(struct kfd_dev *dev)
+static void kfd_smi_init(struct kfd_node *dev)
{
INIT_LIST_HEAD(&dev->smi_clients);
spin_lock_init(&dev->smi_lock);
}
+static int kfd_init_node(struct kfd_node *node)
+{
+ int err = -1;
+
+ if (kfd_interrupt_init(node)) {
+ dev_err(kfd_device, "Error initializing interrupts\n");
+ goto kfd_interrupt_error;
+ }
+
+ node->dqm = device_queue_manager_init(node);
+ if (!node->dqm) {
+ dev_err(kfd_device, "Error initializing queue manager\n");
+ goto device_queue_manager_error;
+ }
+
+ if (kfd_gws_init(node)) {
+ dev_err(kfd_device, "Could not allocate %d gws\n",
+ node->adev->gds.gws_size);
+ goto gws_error;
+ }
+
+ if (kfd_resume(node))
+ goto kfd_resume_error;
+
+ if (kfd_topology_add_device(node)) {
+ dev_err(kfd_device, "Error adding device to topology\n");
+ goto kfd_topology_add_device_error;
+ }
+
+ kfd_smi_init(node);
+
+ return 0;
+
+kfd_topology_add_device_error:
+kfd_resume_error:
+gws_error:
+ device_queue_manager_uninit(node->dqm);
+device_queue_manager_error:
+ kfd_interrupt_exit(node);
+kfd_interrupt_error:
+ if (node->gws)
+ amdgpu_amdkfd_free_gws(node->adev, node->gws);
+
+ /* Cleanup the node memory here */
+ kfree(node);
+ return err;
+}
+
+static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
+{
+ struct kfd_node *knode;
+ unsigned int i;
+
+ for (i = 0; i < num_nodes; i++) {
+ knode = kfd->nodes[i];
+ device_queue_manager_uninit(knode->dqm);
+ kfd_interrupt_exit(knode);
+ kfd_topology_remove_device(knode);
+ if (knode->gws)
+ amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
+ kfree(knode);
+ kfd->nodes[i] = NULL;
+ }
+}
+
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources)
{
- unsigned int size, map_process_packet_size;
+ unsigned int size, map_process_packet_size, i;
+ struct kfd_node *node;
+ uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
+ unsigned int max_proc_per_quantum;
+ int partition_mode;
+ int xcp_idx;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC1);
@@ -532,10 +616,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
- kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
- kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
- kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- - kfd->vm_info.first_vmid_kfd + 1;
+ kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr);
+
+ if (kfd->num_nodes == 0) {
+ dev_err(kfd_device,
+ "KFD num nodes cannot be 0, num_xcc_in_node: %d\n",
+ kfd->adev->gfx.num_xcc_per_xcp);
+ goto out;
+ }
/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
* 32 and 64-bit requests are possible and must be
@@ -554,11 +642,34 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
return false;
}
+ first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
+ last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
+ vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
+
+ /* For GFX9.4.3, we need special handling for VMIDs depending on
+ * partition mode.
+ * In CPX mode, the VMID range needs to be shared between XCDs.
+ * Additionally, there are 13 VMIDs (3-15) available for KFD. To
+ * divide them equally, we change starting VMID to 4 and not use
+ * VMID 3.
+ * If the VMID range changes for GFX9.4.3, then this code MUST be
+ * revisited.
+ */
+ if (kfd->adev->xcp_mgr) {
+ partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
+ AMDGPU_XCP_FL_LOCKED);
+ if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
+ kfd->num_nodes != 1) {
+ vmid_num_kfd /= 2;
+ first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
+ }
+ }
+
/* Verify module parameters regarding mapped process number*/
if (hws_max_conc_proc >= 0)
- kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
+ max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
else
- kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
+ max_proc_per_quantum = vmid_num_kfd;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
@@ -606,27 +717,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (amdgpu_use_xgmi_p2p)
kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
- kfd->noretry = kfd->adev->gmc.noretry;
-
- if (kfd_interrupt_init(kfd)) {
- dev_err(kfd_device, "Error initializing interrupts\n");
- goto kfd_interrupt_error;
- }
-
- kfd->dqm = device_queue_manager_init(kfd);
- if (!kfd->dqm) {
- dev_err(kfd_device, "Error initializing queue manager\n");
- goto device_queue_manager_error;
- }
-
- /* If supported on this device, allocate global GWS that is shared
- * by all KFD processes
+ /*
+ * For GFX9.4.3, the KFD abstracts all partitions within a socket as
+ * xGMI connected in the topology so assign a unique hive id per
+ * device based on the pci device location if device is in PCIe mode.
*/
- if (kfd_gws_init(kfd)) {
- dev_err(kfd_device, "Could not allocate %d gws\n",
- kfd->adev->gds.gws_size);
- goto gws_error;
- }
+ if (!kfd->hive_id && (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) && kfd->num_nodes > 1)
+ kfd->hive_id = pci_dev_id(kfd->adev->pdev);
+
+ kfd->noretry = kfd->adev->gmc.noretry;
/* If CRAT is broken, won't set iommu enabled */
kfd_double_confirm_iommu_support(kfd);
@@ -639,48 +738,100 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_cwsr_init(kfd);
- svm_migrate_init(kfd->adev);
+ dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n",
+ kfd->num_nodes);
+
+ /* Allocate the KFD nodes */
+ for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) {
+ node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
+ if (!node)
+ goto node_alloc_error;
+
+ node->node_id = i;
+ node->adev = kfd->adev;
+ node->kfd = kfd;
+ node->kfd2kgd = kfd->kfd2kgd;
+ node->vm_info.vmid_num_kfd = vmid_num_kfd;
+ node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
+ /* TODO : Check if error handling is needed */
+ if (node->xcp) {
+ amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
+ &node->xcc_mask);
+ ++xcp_idx;
+ } else {
+ node->xcc_mask =
+ (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1;
+ }
- if (kfd_resume_iommu(kfd))
- goto device_iommu_error;
+ if (node->xcp) {
+ dev_info(kfd_device, "KFD node %d partition %d size %lldM\n",
+ node->node_id, node->xcp->mem_id,
+ KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
+ }
- if (kfd_resume(kfd))
- goto kfd_resume_error;
+ if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
+ partition_mode == AMDGPU_CPX_PARTITION_MODE &&
+ kfd->num_nodes != 1) {
+ /* For GFX9.4.3 and CPX mode, first XCD gets VMID range
+ * 4-9 and second XCD gets VMID range 10-15.
+ */
- amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info);
+ node->vm_info.first_vmid_kfd = (i%2 == 0) ?
+ first_vmid_kfd :
+ first_vmid_kfd+vmid_num_kfd;
+ node->vm_info.last_vmid_kfd = (i%2 == 0) ?
+ last_vmid_kfd-vmid_num_kfd :
+ last_vmid_kfd;
+ node->compute_vmid_bitmap =
+ ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
+ ((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
+ } else {
+ node->vm_info.first_vmid_kfd = first_vmid_kfd;
+ node->vm_info.last_vmid_kfd = last_vmid_kfd;
+ node->compute_vmid_bitmap =
+ gpu_resources->compute_vmid_bitmap;
+ }
+ node->max_proc_per_quantum = max_proc_per_quantum;
+ atomic_set(&node->sram_ecc_flag, 0);
- if (kfd_topology_add_device(kfd)) {
- dev_err(kfd_device, "Error adding device to topology\n");
- goto kfd_topology_add_device_error;
+ amdgpu_amdkfd_get_local_mem_info(kfd->adev,
+ &node->local_mem_info, node->xcp);
+
+ /* Initialize the KFD node */
+ if (kfd_init_node(node)) {
+ dev_err(kfd_device, "Error initializing KFD node\n");
+ goto node_init_error;
+ }
+ kfd->nodes[i] = node;
}
- kfd_smi_init(kfd);
+ svm_range_set_max_pages(kfd->adev);
+
+ if (kfd_resume_iommu(kfd))
+ goto kfd_resume_iommu_error;
+
+ spin_lock_init(&kfd->watch_points_lock);
kfd->init_complete = true;
dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
kfd->adev->pdev->device);
pr_debug("Starting kfd with the following scheduling policy %d\n",
- kfd->dqm->sched_policy);
+ node->dqm->sched_policy);
goto out;
-kfd_topology_add_device_error:
-kfd_resume_error:
+kfd_resume_iommu_error:
+node_init_error:
+node_alloc_error:
+ kfd_cleanup_nodes(kfd, i);
device_iommu_error:
-gws_error:
- device_queue_manager_uninit(kfd->dqm);
-device_queue_manager_error:
- kfd_interrupt_exit(kfd);
-kfd_interrupt_error:
kfd_doorbell_fini(kfd);
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
alloc_gtt_mem_failure:
- if (kfd->gws)
- amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
kfd->adev->pdev->vendor, kfd->adev->pdev->device);
@@ -691,15 +842,13 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
- device_queue_manager_uninit(kfd->dqm);
- kfd_interrupt_exit(kfd);
- kfd_topology_remove_device(kfd);
+ /* Cleanup KFD nodes */
+ kfd_cleanup_nodes(kfd, kfd->num_nodes);
+ /* Cleanup common/shared resources */
kfd_doorbell_fini(kfd);
ida_destroy(&kfd->doorbell_ida);
kfd_gtt_sa_fini(kfd);
amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
- if (kfd->gws)
- amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
}
kfree(kfd);
@@ -707,16 +856,23 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
int kgd2kfd_pre_reset(struct kfd_dev *kfd)
{
+ struct kfd_node *node;
+ int i;
+
if (!kfd->init_complete)
return 0;
- kfd_smi_event_update_gpu_reset(kfd, false);
-
- kfd->dqm->ops.pre_reset(kfd->dqm);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ kfd_smi_event_update_gpu_reset(node, false);
+ node->dqm->ops.pre_reset(node->dqm);
+ }
kgd2kfd_suspend(kfd, false);
- kfd_signal_reset_event(kfd);
+ for (i = 0; i < kfd->num_nodes; i++)
+ kfd_signal_reset_event(kfd->nodes[i]);
+
return 0;
}
@@ -729,57 +885,83 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
int kgd2kfd_post_reset(struct kfd_dev *kfd)
{
int ret;
+ struct kfd_node *node;
+ int i;
if (!kfd->init_complete)
return 0;
- ret = kfd_resume(kfd);
- if (ret)
- return ret;
- atomic_dec(&kfd_locked);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ ret = kfd_resume(kfd->nodes[i]);
+ if (ret)
+ return ret;
+ }
- atomic_set(&kfd->sram_ecc_flag, 0);
+ mutex_lock(&kfd_processes_mutex);
+ --kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
- kfd_smi_event_update_gpu_reset(kfd, true);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ atomic_set(&node->sram_ecc_flag, 0);
+ kfd_smi_event_update_gpu_reset(node, true);
+ }
return 0;
}
bool kfd_is_locked(void)
{
- return (atomic_read(&kfd_locked) > 0);
+ lockdep_assert_held(&kfd_processes_mutex);
+ return (kfd_locked > 0);
}
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
+ struct kfd_node *node;
+ int i;
+ int count;
+
if (!kfd->init_complete)
return;
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
+ mutex_lock(&kfd_processes_mutex);
+ count = ++kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
+
/* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_locked) == 1)
+ if (count == 1)
kfd_suspend_all_processes();
}
- kfd->dqm->ops.stop(kfd->dqm);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ node->dqm->ops.stop(node->dqm);
+ }
kfd_iommu_suspend(kfd);
}
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
- int ret, count;
+ int ret, count, i;
if (!kfd->init_complete)
return 0;
- ret = kfd_resume(kfd);
- if (ret)
- return ret;
+ for (i = 0; i < kfd->num_nodes; i++) {
+ ret = kfd_resume(kfd->nodes[i]);
+ if (ret)
+ return ret;
+ }
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
- count = atomic_dec_return(&kfd_locked);
+ mutex_lock(&kfd_processes_mutex);
+ count = --kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
+
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
ret = kfd_resume_all_processes();
@@ -808,15 +990,15 @@ static int kfd_resume_iommu(struct kfd_dev *kfd)
return err;
}
-static int kfd_resume(struct kfd_dev *kfd)
+static int kfd_resume(struct kfd_node *node)
{
int err = 0;
- err = kfd->dqm->ops.start(kfd->dqm);
+ err = node->dqm->ops.start(node->dqm);
if (err)
dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n",
- kfd->adev->pdev->vendor, kfd->adev->pdev->device);
+ node->adev->pdev->vendor, node->adev->pdev->device);
return err;
}
@@ -839,9 +1021,10 @@ static inline void kfd_queue_work(struct workqueue_struct *wq,
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
- uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
+ uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i;
bool is_patched = false;
unsigned long flags;
+ struct kfd_node *node;
if (!kfd->init_complete)
return;
@@ -851,16 +1034,22 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
return;
}
- spin_lock_irqsave(&kfd->interrupt_lock, flags);
-
- if (kfd->interrupts_active
- && interrupt_is_wanted(kfd, ih_ring_entry,
- patched_ihre, &is_patched)
- && enqueue_ih_ring_entry(kfd,
- is_patched ? patched_ihre : ih_ring_entry))
- kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ spin_lock_irqsave(&node->interrupt_lock, flags);
+
+ if (node->interrupts_active
+ && interrupt_is_wanted(node, ih_ring_entry,
+ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(node,
+ is_patched ? patched_ihre : ih_ring_entry)) {
+ kfd_queue_work(node->ih_wq, &node->interrupt_work);
+ spin_unlock_irqrestore(&node->interrupt_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&node->interrupt_lock, flags);
+ }
- spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
@@ -998,10 +1187,11 @@ static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
}
-int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
struct kfd_mem_obj **mem_obj)
{
unsigned int found, start_search, cur_size;
+ struct kfd_dev *kfd = node->kfd;
if (size == 0)
return -EINVAL;
@@ -1101,8 +1291,10 @@ kfd_gtt_no_free_chunk:
return -ENOMEM;
}
-int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
+int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
{
+ struct kfd_dev *kfd = node->kfd;
+
/* Act like kfree when trying to free a NULL object */
if (!mem_obj)
return 0;
@@ -1124,29 +1316,40 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
{
+ /*
+ * TODO: Currently update SRAM ECC flag for first node.
+ * This needs to be updated later when we can
+ * identify SRAM ECC error on other nodes also.
+ */
if (kfd)
- atomic_inc(&kfd->sram_ecc_flag);
+ atomic_inc(&kfd->nodes[0]->sram_ecc_flag);
}
-void kfd_inc_compute_active(struct kfd_dev *kfd)
+void kfd_inc_compute_active(struct kfd_node *node)
{
- if (atomic_inc_return(&kfd->compute_profile) == 1)
- amdgpu_amdkfd_set_compute_idle(kfd->adev, false);
+ if (atomic_inc_return(&node->kfd->compute_profile) == 1)
+ amdgpu_amdkfd_set_compute_idle(node->adev, false);
}
-void kfd_dec_compute_active(struct kfd_dev *kfd)
+void kfd_dec_compute_active(struct kfd_node *node)
{
- int count = atomic_dec_return(&kfd->compute_profile);
+ int count = atomic_dec_return(&node->kfd->compute_profile);
if (count == 0)
- amdgpu_amdkfd_set_compute_idle(kfd->adev, true);
+ amdgpu_amdkfd_set_compute_idle(node->adev, true);
WARN_ONCE(count < 0, "Compute profile ref. count error");
}
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
+ /*
+ * TODO: For now, raise the throttling event only on first node.
+ * This will need to change after we are able to determine
+ * which node raised the throttling event.
+ */
if (kfd && kfd->init_complete)
- kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
+ kfd_smi_event_update_thermal_throttling(kfd->nodes[0],
+ throttle_bitmask);
}
/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
@@ -1154,19 +1357,41 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
* When the device has more than two engines, we reserve two for PCIe to enable
* full-duplex and the rest are used as XGMI.
*/
-unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev)
+unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
{
/* If XGMI is not supported, all SDMA engines are PCIe */
- if (!kdev->adev->gmc.xgmi.supported)
- return kdev->adev->sdma.num_instances;
+ if (!node->adev->gmc.xgmi.supported)
+ return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
- return min(kdev->adev->sdma.num_instances, 2);
+ return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
}
-unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
+unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
{
/* After reserved for PCIe, the rest of engines are XGMI */
- return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev);
+ return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
+ kfd_get_num_sdma_engines(node);
+}
+
+int kgd2kfd_check_and_lock_kfd(void)
+{
+ mutex_lock(&kfd_processes_mutex);
+ if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
+ mutex_unlock(&kfd_processes_mutex);
+ return -EBUSY;
+ }
+
+ ++kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
+
+ return 0;
+}
+
+void kgd2kfd_unlock_kfd(void)
+{
+ mutex_lock(&kfd_processes_mutex);
+ --kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
}
#if defined(CONFIG_DEBUG_FS)
@@ -1174,7 +1399,7 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
/* This function will send a package to HIQ to hang the HWS
* which will trigger a GPU reset and bring the HWS back to normal state
*/
-int kfd_debugfs_hang_hws(struct kfd_dev *dev)
+int kfd_debugfs_hang_hws(struct kfd_node *dev)
{
if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
pr_err("HWS is not enabled");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 7a95698d83f7..f515cb8f30ca 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -36,6 +36,7 @@
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
#include "mes_api_def.h"
+#include "kfd_debug.h"
/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
@@ -46,10 +47,13 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
static int execute_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param);
+ uint32_t filter_param,
+ uint32_t grace_period);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param, bool reset);
+ uint32_t filter_param,
+ uint32_t grace_period,
+ bool reset);
static int map_queues_cpsch(struct device_queue_manager *dqm);
@@ -74,31 +78,31 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{
int i;
- int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec
- + pipe) * dqm->dev->shared_resources.num_queue_per_pipe;
+ int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec
+ + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe;
/* queue is available for KFD usage if bit is 1 */
- for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
+ for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i)
if (test_bit(pipe_offset + i,
- dqm->dev->shared_resources.cp_queue_bitmap))
+ dqm->dev->kfd->shared_resources.cp_queue_bitmap))
return true;
return false;
}
unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{
- return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
+ return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap,
KGD_MAX_QUEUES);
}
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
{
- return dqm->dev->shared_resources.num_queue_per_pipe;
+ return dqm->dev->kfd->shared_resources.num_queue_per_pipe;
}
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{
- return dqm->dev->shared_resources.num_pipe_per_mec;
+ return dqm->dev->kfd->shared_resources.num_pipe_per_mec;
}
static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
@@ -110,29 +114,40 @@ static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
return kfd_get_num_sdma_engines(dqm->dev) *
- dqm->dev->device_info.num_sdma_queues_per_engine;
+ dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
}
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
{
return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
- dqm->dev->device_info.num_sdma_queues_per_engine;
+ dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
}
-static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm)
+static void init_sdma_bitmaps(struct device_queue_manager *dqm)
{
- return dqm->dev->device_info.reserved_sdma_queues_bitmap;
+ bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES);
+ bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm));
+
+ bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
+ bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm));
+
+ /* Mask out the reserved queues */
+ bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap,
+ dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap,
+ KFD_MAX_SDMA_QUEUES);
}
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- return dqm->dev->kfd2kgd->program_sh_mem_settings(
- dqm->dev->adev, qpd->vmid,
- qpd->sh_mem_config,
- qpd->sh_mem_ape1_base,
- qpd->sh_mem_ape1_limit,
- qpd->sh_mem_bases);
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
+ int xcc_id;
+
+ for_each_inst(xcc_id, xcc_mask)
+ dqm->dev->kfd2kgd->program_sh_mem_settings(
+ dqm->dev->adev, qpd->vmid, qpd->sh_mem_config,
+ qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit,
+ qpd->sh_mem_bases, xcc_id);
}
static void kfd_hws_hang(struct device_queue_manager *dqm)
@@ -211,6 +226,9 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
queue_input.tma_addr = qpd->tma_addr;
+ queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) ||
+ KFD_GC_VERSION(q->device) > IP_VERSION(11, 0, 3);
+ queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled;
queue_type = convert_to_mes_queue_type(q->properties.type);
if (queue_type < 0) {
@@ -330,7 +348,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd,
struct queue *q,
uint32_t const *restore_id)
{
- struct kfd_dev *dev = qpd->dqm->dev;
+ struct kfd_node *dev = qpd->dqm->dev;
if (!KFD_IS_SOC15(dev)) {
/* On pre-SOC15 chips we need to use the queue ID to
@@ -349,8 +367,17 @@ static int allocate_doorbell(struct qcm_process_device *qpd,
* for a SDMA engine is 512.
*/
- uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx;
- uint32_t valid_id = idx_offset[q->properties.sdma_engine_id]
+ uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx;
+
+ /*
+ * q->properties.sdma_engine_id corresponds to the virtual
+ * sdma engine number. However, for doorbell allocation,
+ * we need the physical sdma engine id in order to get the
+ * correct doorbell offset.
+ */
+ uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id *
+ get_num_all_sdma_engines(qpd->dqm) +
+ q->properties.sdma_engine_id]
+ (q->properties.sdma_queue_id & 1)
* KFD_QUEUE_DOORBELL_MIRROR_OFFSET
+ (q->properties.sdma_queue_id >> 1);
@@ -382,7 +409,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd,
}
q->properties.doorbell_off =
- kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd),
+ kfd_get_doorbell_dw_offset_in_bar(dev->kfd, qpd_to_pdd(qpd),
q->doorbell_id);
return 0;
}
@@ -391,7 +418,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
struct queue *q)
{
unsigned int old;
- struct kfd_dev *dev = qpd->dqm->dev;
+ struct kfd_node *dev = qpd->dqm->dev;
if (!KFD_IS_SOC15(dev) ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
@@ -405,10 +432,14 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
static void program_trap_handler_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
+ int xcc_id;
+
if (dqm->dev->kfd2kgd->program_trap_handler_settings)
- dqm->dev->kfd2kgd->program_trap_handler_settings(
- dqm->dev->adev, qpd->vmid,
- qpd->tba_addr, qpd->tma_addr);
+ for_each_inst(xcc_id, xcc_mask)
+ dqm->dev->kfd2kgd->program_trap_handler_settings(
+ dqm->dev->adev, qpd->vmid, qpd->tba_addr,
+ qpd->tma_addr, xcc_id);
}
static int allocate_vmid(struct device_queue_manager *dqm,
@@ -441,7 +472,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
program_sh_mem_settings(dqm, qpd);
- if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled)
+ if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled)
program_trap_handler_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
@@ -460,7 +491,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
return 0;
}
-static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
+static int flush_texture_cache_nocpsch(struct kfd_node *kdev,
struct qcm_process_device *qpd)
{
const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
@@ -661,7 +692,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
#define SQ_IND_CMD_CMD_KILL 0x00000003
#define SQ_IND_CMD_MODE_BROADCAST 0x00000001
-static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
+static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p)
{
int status = 0;
unsigned int vmid;
@@ -671,6 +702,8 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process
struct kfd_process_device *pdd;
int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
+ uint32_t xcc_mask = dev->xcc_mask;
+ int xcc_id;
reg_sq_cmd.u32All = 0;
reg_gfx_index.u32All = 0;
@@ -715,9 +748,10 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process
reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
reg_sq_cmd.bits.vm_id = vmid;
- dev->kfd2kgd->wave_control_execute(dev->adev,
- reg_gfx_index.u32All,
- reg_sq_cmd.u32All);
+ for_each_inst(xcc_id, xcc_mask)
+ dev->kfd2kgd->wave_control_execute(
+ dev->adev, reg_gfx_index.u32All,
+ reg_sq_cmd.u32All, xcc_id);
return 0;
}
@@ -837,9 +871,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
/* Make sure the queue is unmapped before updating the MQD */
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = unmap_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
else if (prev_active)
retval = remove_queue_mes(dqm, q, &pdd->qpd);
@@ -858,7 +892,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
}
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
- (dqm->dev->cwsr_enabled ?
+ (dqm->dev->kfd->cwsr_enabled ?
KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -895,7 +929,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
}
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active)
retval = add_queue_mes(dqm, q, &pdd->qpd);
@@ -917,6 +951,92 @@ out_unlock:
return retval;
}
+/* suspend_single_queue does not lock the dqm like the
+ * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should
+ * lock the dqm before calling, and unlock after calling.
+ *
+ * The reason we don't lock the dqm is because this function may be
+ * called on multiple queues in a loop, so rather than locking/unlocking
+ * multiple times, we will just keep the dqm locked for all of the calls.
+ */
+static int suspend_single_queue(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd,
+ struct queue *q)
+{
+ bool is_new;
+
+ if (q->properties.is_suspended)
+ return 0;
+
+ pr_debug("Suspending PASID %u queue [%i]\n",
+ pdd->process->pasid,
+ q->properties.queue_id);
+
+ is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
+
+ if (is_new || q->properties.is_being_destroyed) {
+ pr_debug("Suspend: skip %s queue id %i\n",
+ is_new ? "new" : "destroyed",
+ q->properties.queue_id);
+ return -EBUSY;
+ }
+
+ q->properties.is_suspended = true;
+ if (q->properties.is_active) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ int r = remove_queue_mes(dqm, q, &pdd->qpd);
+
+ if (r)
+ return r;
+ }
+
+ decrement_queue_count(dqm, &pdd->qpd, q);
+ q->properties.is_active = false;
+ }
+
+ return 0;
+}
+
+/* resume_single_queue does not lock the dqm like the functions
+ * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should
+ * lock the dqm before calling, and unlock after calling.
+ *
+ * The reason we don't lock the dqm is because this function may be
+ * called on multiple queues in a loop, so rather than locking/unlocking
+ * multiple times, we will just keep the dqm locked for all of the calls.
+ */
+static int resume_single_queue(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ struct kfd_process_device *pdd;
+
+ if (!q->properties.is_suspended)
+ return 0;
+
+ pdd = qpd_to_pdd(qpd);
+
+ pr_debug("Restoring from suspend PASID %u queue [%i]\n",
+ pdd->process->pasid,
+ q->properties.queue_id);
+
+ q->properties.is_suspended = false;
+
+ if (QUEUE_IS_ACTIVE(q->properties)) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ int r = add_queue_mes(dqm, q, &pdd->qpd);
+
+ if (r)
+ return r;
+ }
+
+ q->properties.is_active = true;
+ increment_queue_count(dqm, qpd, q);
+ }
+
+ return 0;
+}
+
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -951,7 +1071,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
continue;
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
- (dqm->dev->cwsr_enabled ?
+ (dqm->dev->kfd->cwsr_enabled ?
KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -979,6 +1099,14 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
+
+ /* The debugger creates processes that temporarily have not acquired
+ * all VMs for all devices and has no VMs itself.
+ * Skip queue eviction on process eviction.
+ */
+ if (!pdd->drm_priv)
+ goto out;
+
pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
@@ -993,7 +1121,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
q->properties.is_active = false;
decrement_queue_count(dqm, qpd, q);
- if (dqm->dev->shared_resources.enable_mes) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval) {
pr_err("Failed to evict queue %d\n",
@@ -1003,11 +1131,12 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
}
}
pdd->last_evict_timestamp = get_jiffies_64();
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD);
out:
dqm_unlock(dqm);
@@ -1100,13 +1229,10 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
{
struct queue *q;
struct kfd_process_device *pdd;
- uint64_t pd_base;
uint64_t eviction_duration;
int retval = 0;
pdd = qpd_to_pdd(qpd);
- /* Retrieve PD base */
- pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -1116,12 +1242,19 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
}
+ /* The debugger creates processes that temporarily have not acquired
+ * all VMs for all devices and has no VMs itself.
+ * Skip queue restore on process restore.
+ */
+ if (!pdd->drm_priv)
+ goto vm_not_acquired;
+
pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
- qpd->page_table_base = pd_base;
- pr_debug("Updated PD address to 0x%llx\n", pd_base);
+ qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
+ pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base);
/* activate all active queues on the qpd */
list_for_each_entry(q, &qpd->queues_list, list) {
@@ -1132,7 +1265,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
q->properties.is_active = true;
increment_queue_count(dqm, &pdd->qpd, q);
- if (dqm->dev->shared_resources.enable_mes) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = add_queue_mes(dqm, q, qpd);
if (retval) {
pr_err("Failed to restore queue %d\n",
@@ -1141,12 +1274,13 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
}
}
}
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- qpd->evicted = 0;
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
atomic64_add(eviction_duration, &pdd->evict_duration_counter);
+vm_not_acquired:
+ qpd->evicted = 0;
out:
dqm_unlock(dqm);
return retval;
@@ -1229,35 +1363,32 @@ static int
set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
unsigned int vmid)
{
- return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
- dqm->dev->adev, pasid, vmid);
-}
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
+ int xcc_id, ret;
-static void init_interrupts(struct device_queue_manager *dqm)
-{
- unsigned int i;
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
+ dqm->dev->adev, pasid, vmid, xcc_id);
+ if (ret)
+ break;
+ }
- for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
- if (is_pipe_enabled(dqm, 0, i))
- dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
+ return ret;
}
-static void init_sdma_bitmaps(struct device_queue_manager *dqm)
+static void init_interrupts(struct device_queue_manager *dqm)
{
- unsigned int num_sdma_queues =
- min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8,
- get_num_sdma_queues(dqm));
- unsigned int num_xgmi_sdma_queues =
- min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8,
- get_num_xgmi_sdma_queues(dqm));
-
- if (num_sdma_queues)
- dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0);
- if (num_xgmi_sdma_queues)
- dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0);
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
+ unsigned int i, xcc_id;
- dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm);
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
+ for_each_inst(xcc_id, xcc_mask) {
+ for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) {
+ if (is_pipe_enabled(dqm, 0, i)) {
+ dqm->dev->kfd2kgd->init_interrupts(
+ dqm->dev->adev, i, xcc_id);
+ }
+ }
+ }
}
static int initialize_nocpsch(struct device_queue_manager *dqm)
@@ -1282,7 +1413,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
if (test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.cp_queue_bitmap))
+ dqm->dev->kfd->shared_resources.cp_queue_bitmap))
dqm->allocated_queues[pipe] |= 1 << queue;
}
@@ -1322,9 +1453,16 @@ static int start_nocpsch(struct device_queue_manager *dqm)
static int stop_nocpsch(struct device_queue_manager *dqm)
{
+ dqm_lock(dqm);
+ if (!dqm->sched_running) {
+ dqm_unlock(dqm);
+ return 0;
+ }
+
if (dqm->dev->adev->asic_type == CHIP_HAWAII)
pm_uninit(&dqm->packet_mgr, false);
dqm->sched_running = false;
+ dqm_unlock(dqm);
return 0;
}
@@ -1342,46 +1480,48 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (dqm->sdma_bitmap == 0) {
+ if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
pr_err("No more SDMA queue to allocate\n");
return -ENOMEM;
}
if (restore_sdma_id) {
/* Re-use existing sdma_id */
- if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) {
+ if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) {
pr_err("SDMA queue already in use\n");
return -EBUSY;
}
- dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id);
+ clear_bit(*restore_sdma_id, dqm->sdma_bitmap);
q->sdma_id = *restore_sdma_id;
} else {
/* Find first available sdma_id */
- bit = __ffs64(dqm->sdma_bitmap);
- dqm->sdma_bitmap &= ~(1ULL << bit);
+ bit = find_first_bit(dqm->sdma_bitmap,
+ get_num_sdma_queues(dqm));
+ clear_bit(bit, dqm->sdma_bitmap);
q->sdma_id = bit;
}
- q->properties.sdma_engine_id = q->sdma_id %
- kfd_get_num_sdma_engines(dqm->dev);
+ q->properties.sdma_engine_id =
+ q->sdma_id % kfd_get_num_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (dqm->xgmi_sdma_bitmap == 0) {
+ if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
pr_err("No more XGMI SDMA queue to allocate\n");
return -ENOMEM;
}
if (restore_sdma_id) {
/* Re-use existing sdma_id */
- if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) {
+ if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) {
pr_err("SDMA queue already in use\n");
return -EBUSY;
}
- dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id);
+ clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap);
q->sdma_id = *restore_sdma_id;
} else {
- bit = __ffs64(dqm->xgmi_sdma_bitmap);
- dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
+ bit = find_first_bit(dqm->xgmi_sdma_bitmap,
+ get_num_xgmi_sdma_queues(dqm));
+ clear_bit(bit, dqm->xgmi_sdma_bitmap);
q->sdma_id = bit;
}
/* sdma_engine_id is sdma id including
@@ -1409,11 +1549,11 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
if (q->sdma_id >= get_num_sdma_queues(dqm))
return;
- dqm->sdma_bitmap |= (1ULL << q->sdma_id);
+ set_bit(q->sdma_id, dqm->sdma_bitmap);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
return;
- dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
+ set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap);
}
}
@@ -1426,14 +1566,14 @@ static int set_sched_resources(struct device_queue_manager *dqm)
int i, mec;
struct scheduling_resources res;
- res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
+ res.vmid_mask = dqm->dev->compute_vmid_bitmap;
res.queue_mask = 0;
for (i = 0; i < KGD_MAX_QUEUES; ++i) {
- mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
- / dqm->dev->shared_resources.num_pipe_per_mec;
+ mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
+ / dqm->dev->kfd->shared_resources.num_pipe_per_mec;
- if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
+ if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
continue;
/* only acquire queues from the first MEC */
@@ -1475,9 +1615,13 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+ dqm->trap_debug_vmid = 0;
init_sdma_bitmaps(dqm);
+ if (dqm->dev->kfd2kgd->get_iq_wait_times)
+ dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
+ &dqm->wait_times);
return 0;
}
@@ -1489,7 +1633,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm_lock(dqm);
- if (!dqm->dev->shared_resources.enable_mes) {
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
retval = pm_init(&dqm->packet_mgr, dqm);
if (retval)
goto fail_packet_manager_init;
@@ -1516,14 +1660,15 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm->is_hws_hang = false;
dqm->is_resetting = false;
dqm->sched_running = true;
- if (!dqm->dev->shared_resources.enable_mes)
- execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
dqm_unlock(dqm);
return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_uninit(&dqm->packet_mgr, false);
fail_packet_manager_init:
dqm_unlock(dqm);
@@ -1541,8 +1686,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->is_hws_hang) {
- if (!dqm->dev->shared_resources.enable_mes)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
else
remove_all_queues_mes(dqm);
}
@@ -1550,11 +1695,11 @@ static int stop_cpsch(struct device_queue_manager *dqm)
hanging = dqm->is_hws_hang || dqm->is_resetting;
dqm->sched_running = false;
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_release_ib(&dqm->packet_mgr);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_uninit(&dqm->packet_mgr, hanging);
dqm_unlock(dqm);
@@ -1584,7 +1729,8 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
list_add(&kq->list, &qpd->priv_queue_list);
increment_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = true;
- execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD);
dqm_unlock(dqm);
return 0;
@@ -1598,7 +1744,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
list_del(&kq->list);
decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
- execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
@@ -1658,6 +1805,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
* updates the is_evicted flag but is a no-op otherwise.
*/
q->properties.is_evicted = !!qpd->evicted;
+ q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
+ KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) &&
+ KFD_GC_VERSION(q->device) <= IP_VERSION(11, 0, 3);
if (qd)
mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
@@ -1673,9 +1823,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (q->properties.is_active) {
increment_queue_count(dqm, qpd, q);
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
else
retval = add_queue_mes(dqm, q, qpd);
if (retval)
@@ -1764,7 +1914,9 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
/* dqm->lock mutex has to be locked before calling this function */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param, bool reset)
+ uint32_t filter_param,
+ uint32_t grace_period,
+ bool reset)
{
int retval = 0;
struct mqd_manager *mqd_mgr;
@@ -1776,6 +1928,12 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
return retval;
+ if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
+ retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
+ if (retval)
+ return retval;
+ }
+
retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
if (retval)
return retval;
@@ -1808,6 +1966,13 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
return -ETIME;
}
+ /* We need to reset the grace period value for this device */
+ if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
+ if (pm_update_grace_period(&dqm->packet_mgr,
+ USE_DEFAULT_GRACE_PERIOD))
+ pr_err("Failed to reset grace period\n");
+ }
+
pm_release_ib(&dqm->packet_mgr);
dqm->active_runlist = false;
@@ -1823,7 +1988,7 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm,
dqm_lock(dqm);
retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
- pasid, true);
+ pasid, USE_DEFAULT_GRACE_PERIOD, true);
dqm_unlock(dqm);
return retval;
@@ -1832,19 +1997,45 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm,
/* dqm->lock mutex has to be locked before calling this function */
static int execute_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param)
+ uint32_t filter_param,
+ uint32_t grace_period)
{
int retval;
if (dqm->is_hws_hang)
return -EIO;
- retval = unmap_queues_cpsch(dqm, filter, filter_param, false);
+ retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false);
if (retval)
return retval;
return map_queues_cpsch(dqm);
}
+static int wait_on_destroy_queue(struct device_queue_manager *dqm,
+ struct queue *q)
+{
+ struct kfd_process_device *pdd = kfd_get_process_device_data(q->device,
+ q->process);
+ int ret = 0;
+
+ if (pdd->qpd.is_debug)
+ return ret;
+
+ q->properties.is_being_destroyed = true;
+
+ if (pdd->process->debug_trap_enabled && q->properties.is_suspended) {
+ dqm_unlock(dqm);
+ mutex_unlock(&q->process->mutex);
+ ret = wait_event_interruptible(dqm->destroy_wait,
+ !q->properties.is_suspended);
+
+ mutex_lock(&q->process->mutex);
+ dqm_lock(dqm);
+ }
+
+ return ret;
+}
+
static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
@@ -1864,11 +2055,16 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
q->properties.queue_id);
}
- retval = 0;
-
/* remove queue from list to prevent rescheduling after preemption */
dqm_lock(dqm);
+ retval = wait_on_destroy_queue(dqm, q);
+
+ if (retval) {
+ dqm_unlock(dqm);
+ return retval;
+ }
+
if (qpd->is_debug) {
/*
* error, currently we do not allow to destroy a queue
@@ -1893,10 +2089,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
- if (!dqm->dev->shared_resources.enable_mes) {
- decrement_queue_count(dqm, qpd, q);
+ decrement_queue_count(dqm, qpd, q);
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
retval = execute_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
} else {
@@ -1914,7 +2111,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
dqm_unlock(dqm);
- /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
+ /*
+ * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid
+ * circular locking
+ */
+ kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE),
+ qpd->pqm->process, q->device,
+ -1, false, NULL, 0);
+
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
return retval;
@@ -2056,7 +2260,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
- q->properties.is_active || !q->device->cwsr_enabled ||
+ q->properties.is_active || !q->device->kfd->cwsr_enabled ||
!mqd_mgr->get_wave_state) {
dqm_unlock(dqm);
return -EINVAL;
@@ -2069,8 +2273,8 @@ static int get_wave_state(struct device_queue_manager *dqm,
* and the queue should be protected against destruction by the process
* lock.
*/
- return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
- ctl_stack_used_size, save_area_used_size);
+ return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties,
+ ctl_stack, ctl_stack_used_size, save_area_used_size);
}
static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
@@ -2105,7 +2309,7 @@ static int checkpoint_mqd(struct device_queue_manager *dqm,
dqm_lock(dqm);
- if (q->properties.is_active || !q->device->cwsr_enabled) {
+ if (q->properties.is_active || !q->device->kfd->cwsr_enabled) {
r = -EINVAL;
goto dqm_unlock;
}
@@ -2158,7 +2362,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
if (q->properties.is_active) {
decrement_queue_count(dqm, qpd, q);
- if (dqm->dev->shared_resources.enable_mes) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval)
pr_err("Failed to remove queue %d\n",
@@ -2180,8 +2384,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
}
}
- if (!dqm->dev->shared_resources.enable_mes)
- retval = execute_queues_cpsch(dqm, filter, 0);
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD);
if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
@@ -2242,12 +2446,13 @@ out_free:
static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
{
int retval;
- struct kfd_dev *dev = dqm->dev;
+ struct kfd_node *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
get_num_all_sdma_engines(dqm) *
- dev->device_info.num_sdma_queues_per_engine +
- dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
+ dev->kfd->device_info.num_sdma_queues_per_engine +
+ (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
+ NUM_XCC(dqm->dev->xcc_mask));
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
@@ -2256,7 +2461,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
return retval;
}
-struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
{
struct device_queue_manager *dqm;
@@ -2373,20 +2578,22 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
if (init_mqd_managers(dqm))
goto out_free;
- if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
+ if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
goto out_free;
}
- if (!dqm->ops.initialize(dqm))
+ if (!dqm->ops.initialize(dqm)) {
+ init_waitqueue_head(&dqm->destroy_wait);
return dqm;
+ }
out_free:
kfree(dqm);
return NULL;
}
-static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
+static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
struct kfd_mem_obj *mqd)
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
@@ -2396,8 +2603,9 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
+ dqm->ops.stop(dqm);
dqm->ops.uninitialize(dqm);
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
kfree(dqm);
}
@@ -2426,6 +2634,498 @@ static void kfd_process_hw_exception(struct work_struct *work)
amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
}
+int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ int r;
+ int updated_vmid_mask;
+
+ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ return -EINVAL;
+ }
+
+ dqm_lock(dqm);
+
+ if (dqm->trap_debug_vmid != 0) {
+ pr_err("Trap debug id already reserved\n");
+ r = -EBUSY;
+ goto out_unlock;
+ }
+
+ r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD, false);
+ if (r)
+ goto out_unlock;
+
+ updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
+ updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd);
+
+ dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
+ dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd;
+ r = set_sched_resources(dqm);
+ if (r)
+ goto out_unlock;
+
+ r = map_queues_cpsch(dqm);
+ if (r)
+ goto out_unlock;
+
+ pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid);
+
+out_unlock:
+ dqm_unlock(dqm);
+ return r;
+}
+
+/*
+ * Releases vmid for the trap debugger
+ */
+int release_debug_trap_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ int r;
+ int updated_vmid_mask;
+ uint32_t trap_debug_vmid;
+
+ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ return -EINVAL;
+ }
+
+ dqm_lock(dqm);
+ trap_debug_vmid = dqm->trap_debug_vmid;
+ if (dqm->trap_debug_vmid == 0) {
+ pr_err("Trap debug id is not reserved\n");
+ r = -EINVAL;
+ goto out_unlock;
+ }
+
+ r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD, false);
+ if (r)
+ goto out_unlock;
+
+ updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
+ updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd);
+
+ dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
+ dqm->trap_debug_vmid = 0;
+ r = set_sched_resources(dqm);
+ if (r)
+ goto out_unlock;
+
+ r = map_queues_cpsch(dqm);
+ if (r)
+ goto out_unlock;
+
+ pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid);
+
+out_unlock:
+ dqm_unlock(dqm);
+ return r;
+}
+
+#define QUEUE_NOT_FOUND -1
+/* invalidate queue operation in array */
+static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids)
+{
+ int i;
+
+ for (i = 0; i < num_queues; i++)
+ queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK;
+}
+
+/* find queue index in array */
+static int q_array_get_index(unsigned int queue_id,
+ uint32_t num_queues,
+ uint32_t *queue_ids)
+{
+ int i;
+
+ for (i = 0; i < num_queues; i++)
+ if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK))
+ return i;
+
+ return QUEUE_NOT_FOUND;
+}
+
+struct copy_context_work_handler_workarea {
+ struct work_struct copy_context_work;
+ struct kfd_process *p;
+};
+
+static void copy_context_work_handler (struct work_struct *work)
+{
+ struct copy_context_work_handler_workarea *workarea;
+ struct mqd_manager *mqd_mgr;
+ struct queue *q;
+ struct mm_struct *mm;
+ struct kfd_process *p;
+ uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size;
+ int i;
+
+ workarea = container_of(work,
+ struct copy_context_work_handler_workarea,
+ copy_context_work);
+
+ p = workarea->p;
+ mm = get_task_mm(p->lead_thread);
+
+ if (!mm)
+ return;
+
+ kthread_use_mm(mm);
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ struct device_queue_manager *dqm = pdd->dev->dqm;
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
+
+ /* We ignore the return value from get_wave_state
+ * because
+ * i) right now, it always returns 0, and
+ * ii) if we hit an error, we would continue to the
+ * next queue anyway.
+ */
+ mqd_mgr->get_wave_state(mqd_mgr,
+ q->mqd,
+ &q->properties,
+ (void __user *) q->properties.ctx_save_restore_area_address,
+ &tmp_ctl_stack_used_size,
+ &tmp_save_area_used_size);
+ }
+ }
+ kthread_unuse_mm(mm);
+ mmput(mm);
+}
+
+static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array)
+{
+ size_t array_size = num_queues * sizeof(uint32_t);
+ uint32_t *queue_ids = NULL;
+
+ if (!usr_queue_id_array)
+ return NULL;
+
+ queue_ids = kzalloc(array_size, GFP_KERNEL);
+ if (!queue_ids)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(queue_ids, usr_queue_id_array, array_size))
+ return ERR_PTR(-EFAULT);
+
+ return queue_ids;
+}
+
+int resume_queues(struct kfd_process *p,
+ uint32_t num_queues,
+ uint32_t *usr_queue_id_array)
+{
+ uint32_t *queue_ids = NULL;
+ int total_resumed = 0;
+ int i;
+
+ if (usr_queue_id_array) {
+ queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
+
+ if (IS_ERR(queue_ids))
+ return PTR_ERR(queue_ids);
+
+ /* mask all queues as invalid. unmask per successful request */
+ q_array_invalidate(num_queues, queue_ids);
+ }
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ struct device_queue_manager *dqm = pdd->dev->dqm;
+ struct qcm_process_device *qpd = &pdd->qpd;
+ struct queue *q;
+ int r, per_device_resumed = 0;
+
+ dqm_lock(dqm);
+
+ /* unmask queues that resume or already resumed as valid */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ int q_idx = QUEUE_NOT_FOUND;
+
+ if (queue_ids)
+ q_idx = q_array_get_index(
+ q->properties.queue_id,
+ num_queues,
+ queue_ids);
+
+ if (!queue_ids || q_idx != QUEUE_NOT_FOUND) {
+ int err = resume_single_queue(dqm, &pdd->qpd, q);
+
+ if (queue_ids) {
+ if (!err) {
+ queue_ids[q_idx] &=
+ ~KFD_DBG_QUEUE_INVALID_MASK;
+ } else {
+ queue_ids[q_idx] |=
+ KFD_DBG_QUEUE_ERROR_MASK;
+ break;
+ }
+ }
+
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ wake_up_all(&dqm->destroy_wait);
+ if (!err)
+ total_resumed++;
+ } else {
+ per_device_resumed++;
+ }
+ }
+ }
+
+ if (!per_device_resumed) {
+ dqm_unlock(dqm);
+ continue;
+ }
+
+ r = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
+ 0,
+ USE_DEFAULT_GRACE_PERIOD);
+ if (r) {
+ pr_err("Failed to resume process queues\n");
+ if (queue_ids) {
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ int q_idx = q_array_get_index(
+ q->properties.queue_id,
+ num_queues,
+ queue_ids);
+
+ /* mask queue as error on resume fail */
+ if (q_idx != QUEUE_NOT_FOUND)
+ queue_ids[q_idx] |=
+ KFD_DBG_QUEUE_ERROR_MASK;
+ }
+ }
+ } else {
+ wake_up_all(&dqm->destroy_wait);
+ total_resumed += per_device_resumed;
+ }
+
+ dqm_unlock(dqm);
+ }
+
+ if (queue_ids) {
+ if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
+ num_queues * sizeof(uint32_t)))
+ pr_err("copy_to_user failed on queue resume\n");
+
+ kfree(queue_ids);
+ }
+
+ return total_resumed;
+}
+
+int suspend_queues(struct kfd_process *p,
+ uint32_t num_queues,
+ uint32_t grace_period,
+ uint64_t exception_clear_mask,
+ uint32_t *usr_queue_id_array)
+{
+ uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
+ int total_suspended = 0;
+ int i;
+
+ if (IS_ERR(queue_ids))
+ return PTR_ERR(queue_ids);
+
+ /* mask all queues as invalid. umask on successful request */
+ q_array_invalidate(num_queues, queue_ids);
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ struct device_queue_manager *dqm = pdd->dev->dqm;
+ struct qcm_process_device *qpd = &pdd->qpd;
+ struct queue *q;
+ int r, per_device_suspended = 0;
+
+ mutex_lock(&p->event_mutex);
+ dqm_lock(dqm);
+
+ /* unmask queues that suspend or already suspended */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ int q_idx = q_array_get_index(q->properties.queue_id,
+ num_queues,
+ queue_ids);
+
+ if (q_idx != QUEUE_NOT_FOUND) {
+ int err = suspend_single_queue(dqm, pdd, q);
+ bool is_mes = dqm->dev->kfd->shared_resources.enable_mes;
+
+ if (!err) {
+ queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK;
+ if (exception_clear_mask && is_mes)
+ q->properties.exception_status &=
+ ~exception_clear_mask;
+
+ if (is_mes)
+ total_suspended++;
+ else
+ per_device_suspended++;
+ } else if (err != -EBUSY) {
+ r = err;
+ queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
+ break;
+ }
+ }
+ }
+
+ if (!per_device_suspended) {
+ dqm_unlock(dqm);
+ mutex_unlock(&p->event_mutex);
+ if (total_suspended)
+ amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev);
+ continue;
+ }
+
+ r = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
+ grace_period);
+
+ if (r)
+ pr_err("Failed to suspend process queues.\n");
+ else
+ total_suspended += per_device_suspended;
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ int q_idx = q_array_get_index(q->properties.queue_id,
+ num_queues, queue_ids);
+
+ if (q_idx == QUEUE_NOT_FOUND)
+ continue;
+
+ /* mask queue as error on suspend fail */
+ if (r)
+ queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
+ else if (exception_clear_mask)
+ q->properties.exception_status &=
+ ~exception_clear_mask;
+ }
+
+ dqm_unlock(dqm);
+ mutex_unlock(&p->event_mutex);
+ amdgpu_device_flush_hdp(dqm->dev->adev, NULL);
+ }
+
+ if (total_suspended) {
+ struct copy_context_work_handler_workarea copy_context_worker;
+
+ INIT_WORK_ONSTACK(
+ &copy_context_worker.copy_context_work,
+ copy_context_work_handler);
+
+ copy_context_worker.p = p;
+
+ schedule_work(&copy_context_worker.copy_context_work);
+
+
+ flush_work(&copy_context_worker.copy_context_work);
+ destroy_work_on_stack(&copy_context_worker.copy_context_work);
+ }
+
+ if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
+ num_queues * sizeof(uint32_t)))
+ pr_err("copy_to_user failed on queue suspend\n");
+
+ kfree(queue_ids);
+
+ return total_suspended;
+}
+
+static uint32_t set_queue_type_for_user(struct queue_properties *q_props)
+{
+ switch (q_props->type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ return q_props->format == KFD_QUEUE_FORMAT_PM4
+ ? KFD_IOC_QUEUE_TYPE_COMPUTE
+ : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL;
+ case KFD_QUEUE_TYPE_SDMA:
+ return KFD_IOC_QUEUE_TYPE_SDMA;
+ case KFD_QUEUE_TYPE_SDMA_XGMI:
+ return KFD_IOC_QUEUE_TYPE_SDMA_XGMI;
+ default:
+ WARN_ONCE(true, "queue type not recognized!");
+ return 0xffffffff;
+ };
+}
+
+void set_queue_snapshot_entry(struct queue *q,
+ uint64_t exception_clear_mask,
+ struct kfd_queue_snapshot_entry *qss_entry)
+{
+ qss_entry->ring_base_address = q->properties.queue_address;
+ qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr;
+ qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr;
+ qss_entry->ctx_save_restore_address =
+ q->properties.ctx_save_restore_area_address;
+ qss_entry->ctx_save_restore_area_size =
+ q->properties.ctx_save_restore_area_size;
+ qss_entry->exception_status = q->properties.exception_status;
+ qss_entry->queue_id = q->properties.queue_id;
+ qss_entry->gpu_id = q->device->id;
+ qss_entry->ring_size = (uint32_t)q->properties.queue_size;
+ qss_entry->queue_type = set_queue_type_for_user(&q->properties);
+ q->properties.exception_status &= ~exception_clear_mask;
+}
+
+int debug_lock_and_unmap(struct device_queue_manager *dqm)
+{
+ int r;
+
+ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ return -EINVAL;
+ }
+
+ if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
+ return 0;
+
+ dqm_lock(dqm);
+
+ r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
+ if (r)
+ dqm_unlock(dqm);
+
+ return r;
+}
+
+int debug_map_and_unlock(struct device_queue_manager *dqm)
+{
+ int r;
+
+ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ return -EINVAL;
+ }
+
+ if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
+ return 0;
+
+ r = map_queues_cpsch(dqm);
+
+ dqm_unlock(dqm);
+
+ return r;
+}
+
+int debug_refresh_runlist(struct device_queue_manager *dqm)
+{
+ int r = debug_lock_and_unmap(dqm);
+
+ if (r)
+ return r;
+
+ return debug_map_and_unlock(dqm);
+}
+
#if defined(CONFIG_DEBUG_FS)
static void seq_reg_dump(struct seq_file *m,
@@ -2452,52 +3152,66 @@ static void seq_reg_dump(struct seq_file *m,
int dqm_debugfs_hqds(struct seq_file *m, void *data)
{
struct device_queue_manager *dqm = data;
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
uint32_t (*dump)[2], n_regs;
int pipe, queue;
- int r = 0;
+ int r = 0, xcc_id;
+ uint32_t sdma_engine_start;
if (!dqm->sched_running) {
seq_puts(m, " Device is stopped\n");
return 0;
}
- r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
- KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
- &dump, &n_regs);
- if (!r) {
- seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
- KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
- KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
- KFD_CIK_HIQ_QUEUE);
- seq_reg_dump(m, dump, n_regs);
+ for_each_inst(xcc_id, xcc_mask) {
+ r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
+ KFD_CIK_HIQ_PIPE,
+ KFD_CIK_HIQ_QUEUE, &dump,
+ &n_regs, xcc_id);
+ if (!r) {
+ seq_printf(
+ m,
+ " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n",
+ xcc_id,
+ KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1,
+ KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm),
+ KFD_CIK_HIQ_QUEUE);
+ seq_reg_dump(m, dump, n_regs);
- kfree(dump);
- }
+ kfree(dump);
+ }
- for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
- int pipe_offset = pipe * get_queues_per_pipe(dqm);
+ for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
+ int pipe_offset = pipe * get_queues_per_pipe(dqm);
- for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
- if (!test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.cp_queue_bitmap))
- continue;
+ for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
+ if (!test_bit(pipe_offset + queue,
+ dqm->dev->kfd->shared_resources.cp_queue_bitmap))
+ continue;
- r = dqm->dev->kfd2kgd->hqd_dump(
- dqm->dev->adev, pipe, queue, &dump, &n_regs);
- if (r)
- break;
+ r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
+ pipe, queue,
+ &dump, &n_regs,
+ xcc_id);
+ if (r)
+ break;
- seq_printf(m, " CP Pipe %d, Queue %d\n",
- pipe, queue);
- seq_reg_dump(m, dump, n_regs);
+ seq_printf(m,
+ " Inst %d, CP Pipe %d, Queue %d\n",
+ xcc_id, pipe, queue);
+ seq_reg_dump(m, dump, n_regs);
- kfree(dump);
+ kfree(dump);
+ }
}
}
- for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
+ sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
+ for (pipe = sdma_engine_start;
+ pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm));
+ pipe++) {
for (queue = 0;
- queue < dqm->dev->device_info.num_sdma_queues_per_engine;
+ queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
dqm->dev->adev, pipe, queue, &dump, &n_regs);
@@ -2526,7 +3240,8 @@ int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
return r;
}
dqm->active_runlist = true;
- r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
+ 0, USE_DEFAULT_GRACE_PERIOD);
dqm_unlock(dqm);
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index a537b9ef3e16..7dd4b177219d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -37,6 +37,7 @@
#define KFD_MES_PROCESS_QUANTUM 100000
#define KFD_MES_GANG_QUANTUM 10000
+#define USE_DEFAULT_GRACE_PERIOD 0xffffffff
struct device_process_node {
struct qcm_process_device *qpd;
@@ -207,7 +208,7 @@ struct device_queue_manager_asic_ops {
struct queue *q,
struct qcm_process_device *qpd);
struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
};
/**
@@ -228,7 +229,7 @@ struct device_queue_manager {
struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
struct packet_manager packet_mgr;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
struct list_head queues;
unsigned int saved_flags;
@@ -239,8 +240,8 @@ struct device_queue_manager {
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
- uint64_t sdma_bitmap;
- uint64_t xgmi_sdma_bitmap;
+ DECLARE_BITMAP(sdma_bitmap, KFD_MAX_SDMA_QUEUES);
+ DECLARE_BITMAP(xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
/* the pasid mapping for each kfd vmid */
uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
@@ -249,6 +250,7 @@ struct device_queue_manager {
struct kfd_mem_obj *fence_mem;
bool active_runlist;
int sched_policy;
+ uint32_t trap_debug_vmid;
/* hw exception */
bool is_hws_hang;
@@ -256,6 +258,13 @@ struct device_queue_manager {
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
bool sched_running;
+
+ /* used for GFX 9.4.3 only */
+ uint32_t current_logical_xcc_start;
+
+ uint32_t wait_times;
+
+ wait_queue_head_t destroy_wait;
};
void device_queue_manager_init_cik(
@@ -279,6 +288,24 @@ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
+int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+int release_debug_trap_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+int suspend_queues(struct kfd_process *p,
+ uint32_t num_queues,
+ uint32_t grace_period,
+ uint64_t exception_clear_mask,
+ uint32_t *usr_queue_id_array);
+int resume_queues(struct kfd_process *p,
+ uint32_t num_queues,
+ uint32_t *usr_queue_id_array);
+void set_queue_snapshot_entry(struct queue *q,
+ uint64_t exception_clear_mask,
+ struct kfd_queue_snapshot_entry *qss_entry);
+int debug_lock_and_unmap(struct device_queue_manager *dqm);
+int debug_map_and_unlock(struct device_queue_manager *dqm);
+int debug_refresh_runlist(struct device_queue_manager *dqm);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 8b2dd2670ab7..8af643388768 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -24,9 +24,7 @@
#include "kfd_device_queue_manager.h"
#include "vega10_enum.h"
-#include "gc/gc_9_0_offset.h"
-#include "gc/gc_9_0_sh_mask.h"
-#include "sdma0/sdma0_4_0_sh_mask.h"
+#include "gc/gc_9_4_3_sh_mask.h"
static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
@@ -62,9 +60,13 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (dqm->dev->noretry && !dqm->dev->use_iommu_v2)
+ if (dqm->dev->kfd->noretry && !dqm->dev->kfd->use_iommu_v2)
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+ if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3))
+ qpd->sh_mem_config |=
+ (1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
+
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 38c9e1ca6691..6421b620388d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -138,7 +138,7 @@ void kfd_doorbell_fini(struct kfd_dev *kfd)
iounmap(kfd->doorbell_kernel_ptr);
}
-int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
@@ -148,7 +148,7 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
* For simplicitly we only allow mapping of the entire doorbell
* allocation of a single device & process.
*/
- if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev))
+ if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev->kfd))
return -EINVAL;
pdd = kfd_get_process_device_data(dev, process);
@@ -170,13 +170,13 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
(unsigned long long) vma->vm_start, address, vma->vm_flags,
- kfd_doorbell_process_slice(dev));
+ kfd_doorbell_process_slice(dev->kfd));
return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,
- kfd_doorbell_process_slice(dev),
+ kfd_doorbell_process_slice(dev->kfd),
vma->vm_page_prot);
}
@@ -278,14 +278,14 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
{
if (!pdd->doorbell_index) {
- int r = kfd_alloc_process_doorbells(pdd->dev,
+ int r = kfd_alloc_process_doorbells(pdd->dev->kfd,
&pdd->doorbell_index);
if (r < 0)
return 0;
}
- return pdd->dev->doorbell_base +
- pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev);
+ return pdd->dev->kfd->doorbell_base +
+ pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev->kfd);
}
int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index c894cf8f7c50..8081a9408006 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -41,6 +41,7 @@ struct kfd_event_waiter {
wait_queue_entry_t wait;
struct kfd_event *event; /* Event to wait for */
bool activated; /* Becomes true when event is signaled */
+ bool event_age_enabled; /* set to true when last_event_age is non-zero */
};
/*
@@ -348,7 +349,7 @@ static int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset)
{
- struct kfd_dev *kfd;
+ struct kfd_node *kfd;
struct kfd_process_device *pdd;
void *mem, *kern_addr;
uint64_t size;
@@ -431,6 +432,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
if (!ret) {
*event_id = ev->event_id;
*event_trigger_data = ev->event_id;
+ ev->event_age = 1;
} else {
kfree(ev);
}
@@ -629,6 +631,11 @@ static void set_event(struct kfd_event *ev)
* updating the wait queues in kfd_wait_on_events.
*/
ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
+ if (!(++ev->event_age)) {
+ /* Never wrap back to reserved/default event age 0/1 */
+ ev->event_age = 2;
+ WARN_ONCE(1, "event_age wrap back!");
+ }
list_for_each_entry(waiter, &ev->wq.head, wait.entry)
WRITE_ONCE(waiter->activated, true);
@@ -791,9 +798,9 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
static int init_event_waiter(struct kfd_process *p,
struct kfd_event_waiter *waiter,
- uint32_t event_id)
+ struct kfd_event_data *event_data)
{
- struct kfd_event *ev = lookup_event_by_id(p, event_id);
+ struct kfd_event *ev = lookup_event_by_id(p, event_data->event_id);
if (!ev)
return -EINVAL;
@@ -802,6 +809,15 @@ static int init_event_waiter(struct kfd_process *p,
waiter->event = ev;
waiter->activated = ev->signaled;
ev->signaled = ev->signaled && !ev->auto_reset;
+
+ /* last_event_age = 0 reserved for backward compatible */
+ if (waiter->event->type == KFD_EVENT_TYPE_SIGNAL &&
+ event_data->signal_event_data.last_event_age) {
+ waiter->event_age_enabled = true;
+ if (ev->event_age != event_data->signal_event_data.last_event_age)
+ waiter->activated = true;
+ }
+
if (!waiter->activated)
add_wait_queue(&ev->wq, &waiter->wait);
spin_unlock(&ev->lock);
@@ -849,22 +865,29 @@ static int copy_signaled_event_data(uint32_t num_events,
struct kfd_event_waiter *event_waiters,
struct kfd_event_data __user *data)
{
- struct kfd_hsa_memory_exception_data *src;
- struct kfd_hsa_memory_exception_data __user *dst;
+ void *src;
+ void __user *dst;
struct kfd_event_waiter *waiter;
struct kfd_event *event;
- uint32_t i;
+ uint32_t i, size = 0;
for (i = 0; i < num_events; i++) {
waiter = &event_waiters[i];
event = waiter->event;
if (!event)
return -EINVAL; /* event was destroyed */
- if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
- dst = &data[i].memory_exception_data;
- src = &event->memory_exception_data;
- if (copy_to_user(dst, src,
- sizeof(struct kfd_hsa_memory_exception_data)))
+ if (waiter->activated) {
+ if (event->type == KFD_EVENT_TYPE_MEMORY) {
+ dst = &data[i].memory_exception_data;
+ src = &event->memory_exception_data;
+ size = sizeof(struct kfd_hsa_memory_exception_data);
+ } else if (event->type == KFD_EVENT_TYPE_SIGNAL &&
+ waiter->event_age_enabled) {
+ dst = &data[i].signal_event_data.last_event_age;
+ src = &event->event_age;
+ size = sizeof(u64);
+ }
+ if (size && copy_to_user(dst, src, size))
return -EFAULT;
}
}
@@ -942,8 +965,7 @@ int kfd_wait_on_events(struct kfd_process *p,
goto out_unlock;
}
- ret = init_event_waiter(p, &event_waiters[i],
- event_data.event_id);
+ ret = init_event_waiter(p, &event_waiters[i], &event_data);
if (ret)
goto out_unlock;
}
@@ -1125,7 +1147,7 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
}
#ifdef KFD_SUPPORT_IOMMU_V2
-void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
+void kfd_signal_iommu_event(struct kfd_node *dev, u32 pasid,
unsigned long address, bool is_write_requested,
bool is_execute_requested)
{
@@ -1221,8 +1243,9 @@ void kfd_signal_hw_exception_event(u32 pasid)
kfd_unref_process(p);
}
-void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
- struct kfd_vm_fault_info *info)
+void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+ struct kfd_vm_fault_info *info,
+ struct kfd_hsa_memory_exception_data *data)
{
struct kfd_event *ev;
uint32_t id;
@@ -1239,19 +1262,24 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
return;
}
- memset(&memory_exception_data, 0, sizeof(memory_exception_data));
- memory_exception_data.gpu_id = user_gpu_id;
- memory_exception_data.failure.imprecise = true;
- /* Set failure reason */
- if (info) {
- memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
- memory_exception_data.failure.NotPresent =
- info->prot_valid ? 1 : 0;
- memory_exception_data.failure.NoExecute =
- info->prot_exec ? 1 : 0;
- memory_exception_data.failure.ReadOnly =
- info->prot_write ? 1 : 0;
- memory_exception_data.failure.imprecise = 0;
+ /* SoC15 chips and onwards will pass in data from now on. */
+ if (!data) {
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+ memory_exception_data.gpu_id = user_gpu_id;
+ memory_exception_data.failure.imprecise = true;
+
+ /* Set failure reason */
+ if (info) {
+ memory_exception_data.va = (info->page_addr) <<
+ PAGE_SHIFT;
+ memory_exception_data.failure.NotPresent =
+ info->prot_valid ? 1 : 0;
+ memory_exception_data.failure.NoExecute =
+ info->prot_exec ? 1 : 0;
+ memory_exception_data.failure.ReadOnly =
+ info->prot_write ? 1 : 0;
+ memory_exception_data.failure.imprecise = 0;
+ }
}
rcu_read_lock();
@@ -1260,7 +1288,8 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
idr_for_each_entry_continue(&p->event_idr, ev, id)
if (ev->type == KFD_EVENT_TYPE_MEMORY) {
spin_lock(&ev->lock);
- ev->memory_exception_data = memory_exception_data;
+ ev->memory_exception_data = data ? *data :
+ memory_exception_data;
set_event(ev);
spin_unlock(&ev->lock);
}
@@ -1269,7 +1298,7 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
kfd_unref_process(p);
}
-void kfd_signal_reset_event(struct kfd_dev *dev)
+void kfd_signal_reset_event(struct kfd_node *dev)
{
struct kfd_hsa_hw_exception_data hw_exception_data;
struct kfd_hsa_memory_exception_data memory_exception_data;
@@ -1325,7 +1354,7 @@ void kfd_signal_reset_event(struct kfd_dev *dev)
srcu_read_unlock(&kfd_processes_srcu, idx);
}
-void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid)
+void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
{
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
struct kfd_hsa_memory_exception_data memory_exception_data;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index 1c62c8dd6460..52ccfd397c2b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -53,6 +53,7 @@ struct signal_page;
struct kfd_event {
u32 event_id;
+ u64 event_age;
bool signaled;
bool auto_reset;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 8aebe408c544..da2ca00d79e5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -322,21 +322,21 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_VI();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
- if (!pdd->dev->use_iommu_v2) {
+ if (!pdd->dev->kfd->use_iommu_v2) {
/* dGPUs: SVM aperture starting at 0
* with small reserved space for kernel.
* Set them to CANONICAL addresses.
*/
pdd->gpuvm_base = SVM_USER_BASE;
pdd->gpuvm_limit =
- pdd->dev->shared_resources.gpuvm_size - 1;
+ pdd->dev->kfd->shared_resources.gpuvm_size - 1;
} else {
/* set them to non CANONICAL addresses, and no SVM is
* allocated.
*/
pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1);
pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base,
- pdd->dev->shared_resources.gpuvm_size);
+ pdd->dev->kfd->shared_resources.gpuvm_size);
}
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
@@ -356,7 +356,7 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
*/
pdd->gpuvm_base = SVM_USER_BASE;
pdd->gpuvm_limit =
- pdd->dev->shared_resources.gpuvm_size - 1;
+ pdd->dev->kfd->shared_resources.gpuvm_size - 1;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
@@ -365,7 +365,7 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
int kfd_init_apertures(struct kfd_process *process)
{
uint8_t id = 0;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
struct kfd_process_device *pdd;
/*Iterating over all devices*/
@@ -417,7 +417,7 @@ int kfd_init_apertures(struct kfd_process *process)
}
}
- if (!dev->use_iommu_v2) {
+ if (!dev->kfd->use_iommu_v2) {
/* dGPUs: the reserved space for kernel
* before SVM
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
new file mode 100644
index 000000000000..c7991e07b6be
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kfd_events.h"
+#include "kfd_debug.h"
+#include "soc15_int.h"
+#include "kfd_device_queue_manager.h"
+
+/*
+ * GFX10 SQ Interrupts
+ *
+ * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit
+ * packet to the Interrupt Handler:
+ * Auto - Generated by the SQG (various cmd overflows, timestamps etc)
+ * Wave - Generated by S_SENDMSG through a shader program
+ * Error - HW generated errors (Illegal instructions, Memviols, EDC etc)
+ *
+ * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus
+ * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such:
+ *
+ * - context_id1[7:6]
+ * Encoding type (0 = Auto, 1 = Wave, 2 = Error)
+ *
+ * - context_id0[24]
+ * PRIV bit indicates that Wave S_SEND or error occurred within trap
+ *
+ * - context_id0[22:0]
+ * 23-bit data with the following layout per encoding type:
+ * Auto - only context_id0[8:0] is used, which reports various interrupts
+ * generated by SQG. The rest is 0.
+ * Wave - user data sent from m0 via S_SENDMSG
+ * Error - Error type (context_id0[22:19]), Error Details (rest of bits)
+ *
+ * The other context_id bits show coordinates (SE/SH/CU/SIMD/WGP) for wave
+ * S_SENDMSG and Errors. These are 0 for Auto.
+ */
+
+enum SQ_INTERRUPT_WORD_ENCODING {
+ SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
+ SQ_INTERRUPT_WORD_ENCODING_INST,
+ SQ_INTERRUPT_WORD_ENCODING_ERROR,
+};
+
+enum SQ_INTERRUPT_ERROR_TYPE {
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0,
+ SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST,
+ SQ_INTERRUPT_ERROR_TYPE_MEMVIOL,
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FED,
+};
+
+/* SQ_INTERRUPT_WORD_AUTO_CTXID */
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE__SHIFT 0
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT__SHIFT 1
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL__SHIFT 2
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL__SHIFT 3
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR__SHIFT 7
+#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID__SHIFT 4
+#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING__SHIFT 6
+
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_MASK 0x00000001
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT_MASK 0x00000002
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL_MASK 0x00000004
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL_MASK 0x00000008
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR_MASK 0x00000080
+#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID_MASK 0x030
+#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING_MASK 0x0c0
+
+/* SQ_INTERRUPT_WORD_WAVE_CTXID */
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA__SHIFT 0
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID__SHIFT 23
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV__SHIFT 24
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID__SHIFT 25
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID__SHIFT 30
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID__SHIFT 0
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID__SHIFT 4
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING__SHIFT 6
+
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA_MASK 0x000007fffff
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID_MASK 0x0000800000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK 0x00001000000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID_MASK 0x0003e000000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID_MASK 0x000c0000000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID_MASK 0x00f
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID_MASK 0x030
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING_MASK 0x0c0
+
+#define KFD_CTXID0__ERR_TYPE_MASK 0x780000
+#define KFD_CTXID0__ERR_TYPE__SHIFT 19
+
+/* GFX10 SQ interrupt ENC type bit (context_id1[7:6]) for wave s_sendmsg */
+#define KFD_CONTEXT_ID1_ENC_TYPE_WAVE_MASK 0x40
+/* GFX10 SQ interrupt PRIV bit (context_id0[24]) for s_sendmsg inside trap */
+#define KFD_CONTEXT_ID0_PRIV_MASK 0x1000000
+/*
+ * The debugger will send user data(m0) with PRIV=1 to indicate it requires
+ * notification from the KFD with the following queue id (DOORBELL_ID) and
+ * trap code (TRAP_CODE).
+ */
+#define KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK 0x0003ff
+#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT 10
+#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK 0x07fc00
+#define KFD_DEBUG_DOORBELL_ID(ctxid0) ((ctxid0) & \
+ KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK)
+#define KFD_DEBUG_TRAP_CODE(ctxid0) (((ctxid0) & \
+ KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK) \
+ >> KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT)
+#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00
+#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10
+#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \
+ KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \
+ >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT)
+
+static void event_interrupt_poison_consumption(struct kfd_node *dev,
+ uint16_t pasid, uint16_t client_id)
+{
+ int old_poison, ret = -EINVAL;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return;
+
+ /* all queues of a process will be unmapped in one time */
+ old_poison = atomic_cmpxchg(&p->poison, 0, 1);
+ kfd_unref_process(p);
+ if (old_poison)
+ return;
+
+ switch (client_id) {
+ case SOC15_IH_CLIENTID_SE0SH:
+ case SOC15_IH_CLIENTID_SE1SH:
+ case SOC15_IH_CLIENTID_SE2SH:
+ case SOC15_IH_CLIENTID_SE3SH:
+ case SOC15_IH_CLIENTID_UTCL2:
+ ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
+ break;
+ case SOC15_IH_CLIENTID_SDMA0:
+ case SOC15_IH_CLIENTID_SDMA1:
+ case SOC15_IH_CLIENTID_SDMA2:
+ case SOC15_IH_CLIENTID_SDMA3:
+ case SOC15_IH_CLIENTID_SDMA4:
+ break;
+ default:
+ break;
+ }
+
+ kfd_signal_poison_consumed_event(dev, pasid);
+
+ /* resetting queue passes, do page retirement without gpu reset
+ * resetting queue fails, fallback to gpu reset solution
+ */
+ if (!ret) {
+ dev_warn(dev->adev->dev,
+ "RAS poison consumption, unmap queue flow succeeded: client id %d\n",
+ client_id);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ } else {
+ dev_warn(dev->adev->dev,
+ "RAS poison consumption, fall back to gpu reset flow: client id %d\n",
+ client_id);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+ }
+}
+
+static bool event_interrupt_isr_v10(struct kfd_node *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
+{
+ uint16_t source_id, client_id, pasid, vmid;
+ const uint32_t *data = ih_ring_entry;
+
+ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+
+ /* Only handle interrupts from KFD VMIDs */
+ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+ if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
+ (vmid < dev->vm_info.first_vmid_kfd ||
+ vmid > dev->vm_info.last_vmid_kfd))
+ return false;
+
+ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+
+ /* Only handle clients we care about */
+ if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
+ client_id != SOC15_IH_CLIENTID_SDMA0 &&
+ client_id != SOC15_IH_CLIENTID_SDMA1 &&
+ client_id != SOC15_IH_CLIENTID_SDMA2 &&
+ client_id != SOC15_IH_CLIENTID_SDMA3 &&
+ client_id != SOC15_IH_CLIENTID_SDMA4 &&
+ client_id != SOC15_IH_CLIENTID_SDMA5 &&
+ client_id != SOC15_IH_CLIENTID_SDMA6 &&
+ client_id != SOC15_IH_CLIENTID_SDMA7 &&
+ client_id != SOC15_IH_CLIENTID_VMC &&
+ client_id != SOC15_IH_CLIENTID_VMC1 &&
+ client_id != SOC15_IH_CLIENTID_UTCL2 &&
+ client_id != SOC15_IH_CLIENTID_SE0SH &&
+ client_id != SOC15_IH_CLIENTID_SE1SH &&
+ client_id != SOC15_IH_CLIENTID_SE2SH &&
+ client_id != SOC15_IH_CLIENTID_SE3SH)
+ return false;
+
+ pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3],
+ data[4], data[5], data[6], data[7]);
+
+ /* If there is no valid PASID, it's likely a bug */
+ if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ return 0;
+
+ /* Interrupt types we care about: various signals and faults.
+ * They will be forwarded to a work queue (see below).
+ */
+ return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
+ source_id == SOC15_INTSRC_SDMA_TRAP ||
+ source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
+ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+ client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
+ client_id == SOC15_IH_CLIENTID_UTCL2 ||
+ KFD_IRQ_IS_FENCE(client_id, source_id);
+}
+
+static void event_interrupt_wq_v10(struct kfd_node *dev,
+ const uint32_t *ih_ring_entry)
+{
+ uint16_t source_id, client_id, pasid, vmid;
+ uint32_t context_id0, context_id1;
+ uint32_t encoding, sq_intr_err_type;
+
+ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+ context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
+ context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry);
+
+ if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
+ client_id == SOC15_IH_CLIENTID_SE0SH ||
+ client_id == SOC15_IH_CLIENTID_SE1SH ||
+ client_id == SOC15_IH_CLIENTID_SE2SH ||
+ client_id == SOC15_IH_CLIENTID_SE3SH) {
+ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
+ kfd_signal_event_interrupt(pasid, context_id0, 32);
+ else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) {
+ encoding = REG_GET_FIELD(context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+ pr_debug(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ WLT),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF0_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF1_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+ pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
+ if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) {
+ if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(context_id0),
+ KFD_DEBUG_TRAP_CODE(context_id0),
+ NULL, 0))
+ return;
+ }
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
+ ERR_TYPE);
+ pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID),
+ sq_intr_err_type);
+ if (sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
+ sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
+ event_interrupt_poison_consumption(dev, pasid, source_id);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(context_id0),
+ KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
+ NULL,
+ 0);
+ }
+ } else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
+ client_id == SOC15_IH_CLIENTID_SDMA1 ||
+ client_id == SOC15_IH_CLIENTID_SDMA2 ||
+ client_id == SOC15_IH_CLIENTID_SDMA3 ||
+ (client_id == SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid &&
+ KFD_GC_VERSION(dev) == IP_VERSION(10, 3, 0)) ||
+ client_id == SOC15_IH_CLIENTID_SDMA4 ||
+ client_id == SOC15_IH_CLIENTID_SDMA5 ||
+ client_id == SOC15_IH_CLIENTID_SDMA6 ||
+ client_id == SOC15_IH_CLIENTID_SDMA7) {
+ if (source_id == SOC15_INTSRC_SDMA_TRAP) {
+ kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
+ } else if (source_id == SOC15_INTSRC_SDMA_ECC) {
+ event_interrupt_poison_consumption(dev, pasid, source_id);
+ return;
+ }
+ } else if (client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
+ client_id == SOC15_IH_CLIENTID_UTCL2) {
+ struct kfd_vm_fault_info info = {0};
+ uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ struct kfd_hsa_memory_exception_data exception_data;
+
+ if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
+ amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
+ event_interrupt_poison_consumption(dev, pasid, client_id);
+ return;
+ }
+
+ info.vmid = vmid;
+ info.mc_id = client_id;
+ info.page_addr = ih_ring_entry[4] |
+ (uint64_t)(ih_ring_entry[5] & 0xf) << 32;
+ info.prot_valid = ring_id & 0x08;
+ info.prot_read = ring_id & 0x10;
+ info.prot_write = ring_id & 0x20;
+
+ memset(&exception_data, 0, sizeof(exception_data));
+ exception_data.gpu_id = dev->id;
+ exception_data.va = (info.page_addr) << PAGE_SHIFT;
+ exception_data.failure.NotPresent = info.prot_valid ? 1 : 0;
+ exception_data.failure.NoExecute = info.prot_exec ? 1 : 0;
+ exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
+ exception_data.failure.imprecise = 0;
+
+ kfd_set_dbg_ev_from_interrupt(dev,
+ pasid,
+ -1,
+ KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
+ &exception_data,
+ sizeof(exception_data));
+ } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
+ kfd_process_close_interrupt_drain(pasid);
+ }
+}
+
+const struct kfd_event_interrupt_class event_interrupt_class_v10 = {
+ .interrupt_isr = event_interrupt_isr_v10,
+ .interrupt_wq = event_interrupt_wq_v10,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index 0d53f6067422..f933bd231fb9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -26,6 +26,7 @@
#include "kfd_device_queue_manager.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "kfd_smi_events.h"
+#include "kfd_debug.h"
/*
* GFX11 SQ Interrupts
@@ -187,7 +188,7 @@ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID));
}
-static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev,
+static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
uint16_t pasid, uint16_t source_id)
{
int ret = -EINVAL;
@@ -225,7 +226,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev,
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
}
-static bool event_interrupt_isr_v11(struct kfd_dev *dev,
+static bool event_interrupt_isr_v11(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
@@ -238,7 +239,7 @@ static bool event_interrupt_isr_v11(struct kfd_dev *dev,
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
/* Only handle interrupts from KFD VMIDs */
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
- if (/*!KFD_IRQ_IS_FENCE(client_id, source_id) &&*/
+ if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
(vmid < dev->vm_info.first_vmid_kfd ||
vmid > dev->vm_info.last_vmid_kfd))
return false;
@@ -267,19 +268,19 @@ static bool event_interrupt_isr_v11(struct kfd_dev *dev,
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
source_id == SOC21_INTSRC_SDMA_TRAP ||
- /* KFD_IRQ_IS_FENCE(client_id, source_id) || */
+ KFD_IRQ_IS_FENCE(client_id, source_id) ||
(((client_id == SOC21_IH_CLIENTID_VMC) ||
((client_id == SOC21_IH_CLIENTID_GFX) &&
(source_id == UTCL2_1_0__SRCID__FAULT))) &&
!amdgpu_no_queue_eviction_on_vm_fault);
}
-static void event_interrupt_wq_v11(struct kfd_dev *dev,
+static void event_interrupt_wq_v11(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, ring_id, pasid, vmid;
uint32_t context_id0, context_id1;
- uint8_t sq_int_enc, sq_int_errtype, sq_int_priv;
+ uint8_t sq_int_enc, sq_int_priv, sq_int_errtype;
struct kfd_vm_fault_info info = {0};
struct kfd_hsa_memory_exception_data exception_data;
@@ -312,9 +313,9 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
exception_data.failure.imprecise = 0;
- /*kfd_set_dbg_ev_from_interrupt(dev, pasid, -1,
+ kfd_set_dbg_ev_from_interrupt(dev, pasid, -1,
KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
- &exception_data, sizeof(exception_data));*/
+ &exception_data, sizeof(exception_data));
kfd_smi_event_update_vmfault(dev, pasid);
/* GRBM, SDMA, SE, PMM */
@@ -324,11 +325,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
/* CP */
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
- /*else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
- NULL, 0);*/
+ NULL, 0);
/* SDMA */
else if (source_id == SOC21_INTSRC_SDMA_TRAP)
@@ -350,11 +351,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
print_sq_intr_info_inst(context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
- /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_CTXID0_TRAP_CODE(context_id0),
NULL, 0)))
- return;*/
+ return;
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
print_sq_intr_info_error(context_id0, context_id1);
@@ -373,8 +374,8 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
}
- /*} else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
- kfd_process_close_interrupt_drain(pasid);*/
+ } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
+ kfd_process_close_interrupt_drain(pasid);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 0b75a37b689b..d5c9f30552e3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -23,10 +23,40 @@
#include "kfd_priv.h"
#include "kfd_events.h"
+#include "kfd_debug.h"
#include "soc15_int.h"
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
+/*
+ * GFX9 SQ Interrupts
+ *
+ * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit
+ * packet to the Interrupt Handler:
+ * Auto - Generated by the SQG (various cmd overflows, timestamps etc)
+ * Wave - Generated by S_SENDMSG through a shader program
+ * Error - HW generated errors (Illegal instructions, Memviols, EDC etc)
+ *
+ * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus
+ * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such:
+ *
+ * - context_id0[27:26]
+ * Encoding type (0 = Auto, 1 = Wave, 2 = Error)
+ *
+ * - context_id0[13]
+ * PRIV bit indicates that Wave S_SEND or error occurred within trap
+ *
+ * - {context_id1[7:0],context_id0[31:28],context_id0[11:0]}
+ * 24-bit data with the following layout per encoding type:
+ * Auto - only context_id0[8:0] is used, which reports various interrupts
+ * generated by SQG. The rest is 0.
+ * Wave - user data sent from m0 via S_SENDMSG
+ * Error - Error type (context_id1[7:4]), Error Details (rest of bits)
+ *
+ * The other context_id bits show coordinates (SE/SH/CU/SIMD/WAVE) for wave
+ * S_SENDMSG and Errors. These are 0 for Auto.
+ */
+
enum SQ_INTERRUPT_WORD_ENCODING {
SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
SQ_INTERRUPT_WORD_ENCODING_INST,
@@ -84,13 +114,33 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000
#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000
+/* GFX9 SQ interrupt 24-bit data from context_id<0,1> */
#define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \
((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000))
#define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
-static void event_interrupt_poison_consumption_v9(struct kfd_dev *dev,
+/*
+ * The debugger will send user data(m0) with PRIV=1 to indicate it requires
+ * notification from the KFD with the following queue id (DOORBELL_ID) and
+ * trap code (TRAP_CODE).
+ */
+#define KFD_INT_DATA_DEBUG_DOORBELL_MASK 0x0003ff
+#define KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT 10
+#define KFD_INT_DATA_DEBUG_TRAP_CODE_MASK 0x07fc00
+#define KFD_DEBUG_DOORBELL_ID(sq_int_data) ((sq_int_data) & \
+ KFD_INT_DATA_DEBUG_DOORBELL_MASK)
+#define KFD_DEBUG_TRAP_CODE(sq_int_data) (((sq_int_data) & \
+ KFD_INT_DATA_DEBUG_TRAP_CODE_MASK) \
+ >> KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT)
+#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00
+#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10
+#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \
+ KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \
+ >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT)
+
+static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
int old_poison, ret = -EINVAL;
@@ -160,7 +210,7 @@ static bool context_id_expected(struct kfd_dev *dev)
}
}
-static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+static bool event_interrupt_isr_v9(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
@@ -168,14 +218,16 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
uint16_t source_id, client_id, pasid, vmid;
const uint32_t *data = ih_ring_entry;
+ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+
/* Only handle interrupts from KFD VMIDs */
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
- if (vmid < dev->vm_info.first_vmid_kfd ||
- vmid > dev->vm_info.last_vmid_kfd)
+ if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
+ (vmid < dev->vm_info.first_vmid_kfd ||
+ vmid > dev->vm_info.last_vmid_kfd))
return false;
- source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
- client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
/* Only handle clients we care about */
@@ -194,7 +246,8 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
client_id != SOC15_IH_CLIENTID_SE0SH &&
client_id != SOC15_IH_CLIENTID_SE1SH &&
client_id != SOC15_IH_CLIENTID_SE2SH &&
- client_id != SOC15_IH_CLIENTID_SE3SH)
+ client_id != SOC15_IH_CLIENTID_SE3SH &&
+ !KFD_IRQ_IS_FENCE(client_id, source_id))
return false;
/* This is a known issue for gfx9. Under non HWS, pasid is not set
@@ -206,7 +259,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
*patched_flag = true;
memcpy(patched_ihre, ih_ring_entry,
- dev->device_info.ih_ring_entry_size);
+ dev->kfd->device_info.ih_ring_entry_size);
pasid = dev->dqm->vmid_pasid[vmid];
@@ -235,7 +288,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
uint32_t context_id =
SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
- if (context_id == 0 && context_id_expected(dev))
+ if (context_id == 0 && context_id_expected(dev->kfd))
return false;
}
@@ -247,13 +300,14 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
source_id == SOC15_INTSRC_SDMA_ECC ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+ KFD_IRQ_IS_FENCE(client_id, source_id) ||
((client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) &&
!amdgpu_no_queue_eviction_on_vm_fault);
}
-static void event_interrupt_wq_v9(struct kfd_dev *dev,
+static void event_interrupt_wq_v9(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, pasid, vmid;
@@ -302,6 +356,13 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
sq_int_data);
+ if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) {
+ if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(sq_int_data),
+ KFD_DEBUG_TRAP_CODE(sq_int_data),
+ NULL, 0))
+ return;
+ }
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
@@ -324,8 +385,12 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
break;
}
kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
- kfd_signal_hw_exception_event(pasid);
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(context_id0),
+ KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
+ NULL, 0);
+ }
} else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
client_id == SOC15_IH_CLIENTID_SDMA1 ||
client_id == SOC15_IH_CLIENTID_SDMA2 ||
@@ -345,6 +410,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ struct kfd_hsa_memory_exception_data exception_data;
if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
@@ -360,9 +426,23 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
info.prot_read = ring_id & 0x10;
info.prot_write = ring_id & 0x20;
+ memset(&exception_data, 0, sizeof(exception_data));
+ exception_data.gpu_id = dev->id;
+ exception_data.va = (info.page_addr) << PAGE_SHIFT;
+ exception_data.failure.NotPresent = info.prot_valid ? 1 : 0;
+ exception_data.failure.NoExecute = info.prot_exec ? 1 : 0;
+ exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
+ exception_data.failure.imprecise = 0;
+
+ kfd_set_dbg_ev_from_interrupt(dev,
+ pasid,
+ -1,
+ KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
+ &exception_data,
+ sizeof(exception_data));
kfd_smi_event_update_vmfault(dev, pasid);
- kfd_dqm_evict_pasid(dev->dqm, pasid);
- kfd_signal_vm_fault_event(dev, pasid, &info);
+ } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
+ kfd_process_close_interrupt_drain(pasid);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 34772fe74296..dd3c43c1ad70 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -50,29 +50,29 @@
static void interrupt_wq(struct work_struct *);
-int kfd_interrupt_init(struct kfd_dev *kfd)
+int kfd_interrupt_init(struct kfd_node *node)
{
int r;
- r = kfifo_alloc(&kfd->ih_fifo,
- KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size,
+ r = kfifo_alloc(&node->ih_fifo,
+ KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size,
GFP_KERNEL);
if (r) {
- dev_err(kfd->adev->dev, "Failed to allocate IH fifo\n");
+ dev_err(node->adev->dev, "Failed to allocate IH fifo\n");
return r;
}
- kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
- if (unlikely(!kfd->ih_wq)) {
- kfifo_free(&kfd->ih_fifo);
- dev_err(kfd->adev->dev, "Failed to allocate KFD IH workqueue\n");
+ node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+ if (unlikely(!node->ih_wq)) {
+ kfifo_free(&node->ih_fifo);
+ dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
return -ENOMEM;
}
- spin_lock_init(&kfd->interrupt_lock);
+ spin_lock_init(&node->interrupt_lock);
- INIT_WORK(&kfd->interrupt_work, interrupt_wq);
+ INIT_WORK(&node->interrupt_work, interrupt_wq);
- kfd->interrupts_active = true;
+ node->interrupts_active = true;
/*
* After this function returns, the interrupt will be enabled. This
@@ -84,7 +84,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
return 0;
}
-void kfd_interrupt_exit(struct kfd_dev *kfd)
+void kfd_interrupt_exit(struct kfd_node *node)
{
/*
* Stop the interrupt handler from writing to the ring and scheduling
@@ -93,31 +93,31 @@ void kfd_interrupt_exit(struct kfd_dev *kfd)
*/
unsigned long flags;
- spin_lock_irqsave(&kfd->interrupt_lock, flags);
- kfd->interrupts_active = false;
- spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
+ spin_lock_irqsave(&node->interrupt_lock, flags);
+ node->interrupts_active = false;
+ spin_unlock_irqrestore(&node->interrupt_lock, flags);
/*
* flush_work ensures that there are no outstanding
* work-queue items that will access interrupt_ring. New work items
* can't be created because we stopped interrupt handling above.
*/
- flush_workqueue(kfd->ih_wq);
+ flush_workqueue(node->ih_wq);
- kfifo_free(&kfd->ih_fifo);
+ kfifo_free(&node->ih_fifo);
}
/*
* Assumption: single reader/writer. This function is not re-entrant
*/
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
+bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry)
{
int count;
- count = kfifo_in(&kfd->ih_fifo, ih_ring_entry,
- kfd->device_info.ih_ring_entry_size);
- if (count != kfd->device_info.ih_ring_entry_size) {
- dev_dbg_ratelimited(kfd->adev->dev,
+ count = kfifo_in(&node->ih_fifo, ih_ring_entry,
+ node->kfd->device_info.ih_ring_entry_size);
+ if (count != node->kfd->device_info.ih_ring_entry_size) {
+ dev_dbg_ratelimited(node->adev->dev,
"Interrupt ring overflow, dropping interrupt %d\n",
count);
return false;
@@ -129,32 +129,32 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
/*
* Assumption: single reader/writer. This function is not re-entrant
*/
-static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
+static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry)
{
int count;
- count = kfifo_out(&kfd->ih_fifo, ih_ring_entry,
- kfd->device_info.ih_ring_entry_size);
+ count = kfifo_out(&node->ih_fifo, ih_ring_entry,
+ node->kfd->device_info.ih_ring_entry_size);
- WARN_ON(count && count != kfd->device_info.ih_ring_entry_size);
+ WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size);
- return count == kfd->device_info.ih_ring_entry_size;
+ return count == node->kfd->device_info.ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
{
- struct kfd_dev *dev = container_of(work, struct kfd_dev,
+ struct kfd_node *dev = container_of(work, struct kfd_node,
interrupt_work);
uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
unsigned long start_jiffies = jiffies;
- if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
+ if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
dev_err_once(dev->adev->dev, "Ring entry too small\n");
return;
}
while (dequeue_ih_ring_entry(dev, ih_ring_entry)) {
- dev->device_info.event_interrupt_class->interrupt_wq(dev,
+ dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
if (time_is_before_jiffies(start_jiffies + HZ)) {
/* If we spent more than a second processing signals,
@@ -166,14 +166,14 @@ static void interrupt_wq(struct work_struct *work)
}
}
-bool interrupt_is_wanted(struct kfd_dev *dev,
+bool interrupt_is_wanted(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre, bool *flag)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned int wanted = 0;
- wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev,
+ wanted |= dev->kfd->device_info.event_interrupt_class->interrupt_isr(dev,
ih_ring_entry, patched_ihre, flag);
return wanted != 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index ec1bf611624e..808ee010520a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -109,11 +109,11 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
*/
int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
struct kfd_process *p = pdd->process;
int err;
- if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND)
+ if (!dev->kfd->use_iommu_v2 || pdd->bound == PDD_BOUND)
return 0;
if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
@@ -121,6 +121,12 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
return -EINVAL;
}
+ if (!kfd_is_first_node(dev)) {
+ dev_warn_once(kfd_device,
+ "IOMMU supported only on first node\n");
+ return 0;
+ }
+
err = amd_iommu_bind_pasid(dev->adev->pdev, p->pasid, p->lead_thread);
if (!err)
pdd->bound = PDD_BOUND;
@@ -138,7 +144,8 @@ void kfd_iommu_unbind_process(struct kfd_process *p)
int i;
for (i = 0; i < p->n_pdds; i++)
- if (p->pdds[i]->bound == PDD_BOUND)
+ if ((p->pdds[i]->bound == PDD_BOUND) &&
+ (kfd_is_first_node((p->pdds[i]->dev))))
amd_iommu_unbind_pasid(p->pdds[i]->dev->adev->pdev,
p->pasid);
}
@@ -146,7 +153,7 @@ void kfd_iommu_unbind_process(struct kfd_process *p)
/* Callback for process shutdown invoked by the IOMMU driver */
static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid)
{
- struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
+ struct kfd_node *dev = kfd_device_by_pci_dev(pdev);
struct kfd_process *p;
struct kfd_process_device *pdd;
@@ -182,7 +189,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid)
static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid,
unsigned long address, u16 flags)
{
- struct kfd_dev *dev;
+ struct kfd_node *dev;
dev_warn_ratelimited(kfd_device,
"Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
@@ -205,7 +212,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid,
* Bind processes do the device that have been temporarily unbound
* (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
*/
-static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
+static int kfd_bind_processes_to_device(struct kfd_node *knode)
{
struct kfd_process_device *pdd;
struct kfd_process *p;
@@ -216,14 +223,14 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
mutex_lock(&p->mutex);
- pdd = kfd_get_process_device_data(kfd, p);
+ pdd = kfd_get_process_device_data(knode, p);
if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) {
mutex_unlock(&p->mutex);
continue;
}
- err = amd_iommu_bind_pasid(kfd->adev->pdev, p->pasid,
+ err = amd_iommu_bind_pasid(knode->adev->pdev, p->pasid,
p->lead_thread);
if (err < 0) {
pr_err("Unexpected pasid 0x%x binding failure\n",
@@ -246,7 +253,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
* processes will be restored to PDD_BOUND state in
* kfd_bind_processes_to_device.
*/
-static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
+static void kfd_unbind_processes_from_device(struct kfd_node *knode)
{
struct kfd_process_device *pdd;
struct kfd_process *p;
@@ -256,7 +263,7 @@ static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
mutex_lock(&p->mutex);
- pdd = kfd_get_process_device_data(kfd, p);
+ pdd = kfd_get_process_device_data(knode, p);
if (WARN_ON(!pdd)) {
mutex_unlock(&p->mutex);
@@ -281,7 +288,7 @@ void kfd_iommu_suspend(struct kfd_dev *kfd)
if (!kfd->use_iommu_v2)
return;
- kfd_unbind_processes_from_device(kfd);
+ kfd_unbind_processes_from_device(kfd->nodes[0]);
amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL);
amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL);
@@ -312,7 +319,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd)
amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev,
iommu_invalid_ppr_cb);
- err = kfd_bind_processes_to_device(kfd);
+ err = kfd_bind_processes_to_device(kfd->nodes[0]);
if (err) {
amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL);
amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index bcf7bc3302c9..1bea629c49ca 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -38,7 +38,7 @@
/* Initialize a kernel queue, including allocations of GART memory
* needed for the queue.
*/
-static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
enum kfd_queue_type type, unsigned int queue_size)
{
struct queue_properties prop;
@@ -75,7 +75,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
if (!kq->mqd_mgr)
return false;
- prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
+ prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off);
if (!prop.doorbell_ptr) {
pr_err("Failed to initialize doorbell");
@@ -112,7 +112,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
- retval = kfd_gtt_sa_allocate(dev, dev->device_info.doorbell_size,
+ retval = kfd_gtt_sa_allocate(dev, dev->kfd->device_info.doorbell_size,
&kq->wptr_mem);
if (retval != 0)
@@ -189,7 +189,7 @@ err_rptr_allocate_vidmem:
err_eop_allocate_vidmem:
kfd_gtt_sa_free(dev, kq->pq);
err_pq_allocate_vidmem:
- kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
+ kfd_release_kernel_doorbell(dev->kfd, prop.doorbell_ptr);
err_get_kernel_doorbell:
return false;
@@ -220,7 +220,7 @@ static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
kfd_gtt_sa_free(kq->dev, kq->eop_mem);
kfd_gtt_sa_free(kq->dev, kq->pq);
- kfd_release_kernel_doorbell(kq->dev,
+ kfd_release_kernel_doorbell(kq->dev->kfd,
kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue);
}
@@ -298,7 +298,7 @@ void kq_submit_packet(struct kernel_queue *kq)
}
pr_debug("\n");
#endif
- if (kq->dev->device_info.doorbell_size == 8) {
+ if (kq->dev->kfd->device_info.doorbell_size == 8) {
*kq->wptr64_kernel = kq->pending_wptr64;
write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
kq->pending_wptr64);
@@ -311,7 +311,7 @@ void kq_submit_packet(struct kernel_queue *kq)
void kq_rollback_packet(struct kernel_queue *kq)
{
- if (kq->dev->device_info.doorbell_size == 8) {
+ if (kq->dev->kfd->device_info.doorbell_size == 8) {
kq->pending_wptr64 = *kq->wptr64_kernel;
kq->pending_wptr = *kq->wptr_kernel %
(kq->queue->properties.queue_size / 4);
@@ -320,7 +320,7 @@ void kq_rollback_packet(struct kernel_queue *kq)
}
}
-struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type)
{
struct kernel_queue *kq;
@@ -345,7 +345,7 @@ void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
}
/* FIXME: Can this test be removed? */
-static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
+static __attribute__((unused)) void test_kq(struct kfd_node *dev)
{
struct kernel_queue *kq;
uint32_t *buffer, i;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 383202fd1ea2..9a6244430845 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -53,7 +53,7 @@ void kq_rollback_packet(struct kernel_queue *kq);
struct kernel_queue {
/* data */
- struct kfd_dev *dev;
+ struct kfd_node *dev;
struct mqd_manager *mqd_mgr;
struct queue *queue;
uint64_t pending_wptr64;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 54933903bcb8..709ac885ca6d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -64,7 +64,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = npages * 8;
- r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
@@ -206,7 +206,7 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
{
- return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
+ return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
}
static void
@@ -236,7 +236,7 @@ svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
unsigned long addr;
addr = page_to_pfn(page) << PAGE_SHIFT;
- return (addr - adev->kfd.dev->pgmap.range.start);
+ return (addr - adev->kfd.pgmap.range.start);
}
static struct page *
@@ -287,11 +287,12 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
}
static int
-svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
{
- uint64_t npages = migrate->npages;
+ uint64_t npages = migrate->cpages;
+ struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
dma_addr_t *src;
@@ -321,7 +322,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
if (r) {
- dev_err(adev->dev, "%s: fail %d dma_map_page\n",
+ dev_err(dev, "%s: fail %d dma_map_page\n",
__func__, r);
goto out_free_vram_pages;
}
@@ -390,12 +391,13 @@ out_free_vram_pages:
}
static long
-svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
+ struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
@@ -421,9 +423,9 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.dst = migrate.src + npages;
scratch = (dma_addr_t *)(migrate.dst + npages);
- kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
+ kfd_smi_event_migration_start(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- 0, adev->kfd.dev->id, prange->prefetch_loc,
+ 0, node->id, prange->prefetch_loc,
prange->preferred_loc, trigger);
r = migrate_vma_setup(&migrate);
@@ -445,7 +447,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
else
pr_debug("0x%lx pages migrated\n", cpages);
- r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
+ r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
@@ -454,9 +456,9 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- 0, adev->kfd.dev->id, trigger);
+ 0, node->id, trigger);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
svm_range_free_dma_mappings(prange);
@@ -465,7 +467,7 @@ out_free:
kvfree(buf);
out:
if (!r && cpages) {
- pdd = svm_range_get_pdd_by_adev(prange, adev);
+ pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
@@ -492,8 +494,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
- struct amdgpu_device *adev;
uint64_t ttm_res_offset;
+ struct kfd_node *node;
unsigned long cpages = 0;
long r = 0;
@@ -503,9 +505,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
return 0;
}
- adev = svm_range_get_adev_by_id(prange, best_loc);
- if (!adev) {
- pr_debug("failed to get device by id 0x%x\n", best_loc);
+ node = svm_range_get_node_by_id(prange, best_loc);
+ if (!node) {
+ pr_debug("failed to get kfd node by id 0x%x\n", best_loc);
return -ENODEV;
}
@@ -515,9 +517,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
- r = svm_range_vram_node_new(adev, prange, true);
+ r = svm_range_vram_node_new(node, prange, true);
if (r) {
- dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
+ dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
return r;
}
ttm_res_offset = prange->offset << PAGE_SHIFT;
@@ -530,7 +532,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
break;
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
+ r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
break;
@@ -649,11 +651,13 @@ out_oom:
/**
* svm_migrate_vma_to_ram - migrate range inside one vma from device to system
*
- * @adev: amdgpu device to migrate from
* @prange: svm range structure
* @vma: vm_area_struct that range [start, end] belongs to
* @start: range start virtual address in pages
* @end: range end virtual address in pages
+ * @node: kfd node device to migrate from
+ * @trigger: reason of migration
+ * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
@@ -663,7 +667,7 @@ out_oom:
* positive values - partial migration, number of pages not migrated
*/
static long
-svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
+svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end,
uint32_t trigger, struct page *fault_page)
{
@@ -671,6 +675,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
unsigned long upages = npages;
unsigned long cpages = 0;
+ struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
@@ -699,9 +704,9 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.fault_page = fault_page;
scratch = (dma_addr_t *)(migrate.dst + npages);
- kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
+ kfd_smi_event_migration_start(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- adev->kfd.dev->id, 0, prange->prefetch_loc,
+ node->id, 0, prange->prefetch_loc,
prange->preferred_loc, trigger);
r = migrate_vma_setup(&migrate);
@@ -735,9 +740,9 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- adev->kfd.dev->id, 0, trigger);
+ node->id, 0, trigger);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
@@ -745,7 +750,7 @@ out_free:
kvfree(buf);
out:
if (!r && cpages) {
- pdd = svm_range_get_pdd_by_adev(prange, adev);
+ pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
}
@@ -757,6 +762,7 @@ out:
* @prange: range structure
* @mm: process mm, use current->mm if NULL
* @trigger: reason of migration
+ * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
@@ -766,7 +772,7 @@ out:
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
uint32_t trigger, struct page *fault_page)
{
- struct amdgpu_device *adev;
+ struct kfd_node *node;
struct vm_area_struct *vma;
unsigned long addr;
unsigned long start;
@@ -780,13 +786,11 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
return 0;
}
- adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
- if (!adev) {
- pr_debug("failed to get device by id 0x%x\n",
- prange->actual_loc);
+ node = svm_range_get_node_by_id(prange, prange->actual_loc);
+ if (!node) {
+ pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
return -ENODEV;
}
-
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
prange->svms, prange, prange->start, prange->last,
prange->actual_loc);
@@ -805,7 +809,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
+ r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger,
fault_page);
if (r < 0) {
pr_debug("failed %ld to migrate prange %p\n", r, prange);
@@ -987,18 +991,21 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
/* Each VRAM page uses sizeof(struct page) on system memory */
#define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
-int svm_migrate_init(struct amdgpu_device *adev)
+int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
{
- struct kfd_dev *kfddev = adev->kfd.dev;
+ struct amdgpu_kfd_dev *kfddev = &adev->kfd;
struct dev_pagemap *pgmap;
struct resource *res = NULL;
unsigned long size;
void *r;
- /* Page migration works on Vega10 or newer */
- if (!KFD_IS_SOC15(kfddev))
+ /* Page migration works on gfx9 or newer */
+ if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1))
return -EINVAL;
+ if (adev->gmc.is_app_apu)
+ return 0;
+
pgmap = &kfddev->pgmap;
memset(pgmap, 0, sizeof(*pgmap));
@@ -1041,8 +1048,6 @@ int svm_migrate_init(struct amdgpu_device *adev)
amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
- svm_range_set_max_pages(adev);
-
pr_info("HMM registered %ldMB device memory\n", size >> 20);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index a5d7e6d22264..487f26368164 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -47,15 +47,6 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
-int svm_migrate_init(struct amdgpu_device *adev);
-
-#else
-
-static inline int svm_migrate_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
#endif /* KFD_MIGRATE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 623ccd227b7d..863cf060af48 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -46,7 +46,7 @@ int pipe_priority_map[] = {
KFD_PIPE_PRIORITY_CS_HIGH
};
-struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q)
+struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj = NULL;
@@ -61,7 +61,7 @@ struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_propertie
return mqd_mem_obj;
}
-struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
+struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj = NULL;
@@ -72,11 +72,12 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
return NULL;
offset = (q->sdma_engine_id *
- dev->device_info.num_sdma_queues_per_engine +
+ dev->kfd->device_info.num_sdma_queues_per_engine +
q->sdma_queue_id) *
dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size;
- offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
+ offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
+ NUM_XCC(dev->xcc_mask);
mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem
+ offset);
@@ -189,7 +190,7 @@ int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
struct queue_properties *p, struct mm_struct *mms)
{
return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
- queue_id, p->doorbell_off);
+ queue_id, p->doorbell_off, 0);
}
int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
@@ -197,7 +198,7 @@ int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
- pipe_id, queue_id);
+ pipe_id, queue_id, 0);
}
void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
@@ -216,7 +217,7 @@ bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
- pipe_id, queue_id);
+ pipe_id, queue_id, 0);
}
int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -246,3 +247,28 @@ bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
{
return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
+
+uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev)
+{
+ return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
+}
+
+void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd_mem_obj,
+ uint32_t virtual_xcc_id)
+{
+ uint64_t offset;
+
+ offset = kfd_hiq_mqd_stride(dev) * virtual_xcc_id;
+
+ mqd_mem_obj->gtt_mem = (virtual_xcc_id == 0) ?
+ dev->dqm->hiq_sdma_mqd.gtt_mem : NULL;
+ mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
+ mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)
+ dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
+}
+
+uint64_t kfd_mqd_stride(struct mqd_manager *mm,
+ struct queue_properties *q)
+{
+ return mm->mqd_size;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 57f900ccaa10..23158db7da03 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -68,7 +68,7 @@
*/
extern int pipe_priority_map[];
struct mqd_manager {
- struct kfd_mem_obj* (*allocate_mqd)(struct kfd_dev *kfd,
+ struct kfd_mem_obj* (*allocate_mqd)(struct kfd_node *kfd,
struct queue_properties *q);
void (*init_mqd)(struct mqd_manager *mm, void **mqd,
@@ -97,6 +97,7 @@ struct mqd_manager {
uint32_t queue_id);
int (*get_wave_state)(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
@@ -119,16 +120,18 @@ struct mqd_manager {
int (*debugfs_show_mqd)(struct seq_file *m, void *data);
#endif
uint32_t (*read_doorbell_id)(void *mqd);
+ uint64_t (*mqd_stride)(struct mqd_manager *mm,
+ struct queue_properties *p);
struct mutex mqd_mutex;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
uint32_t mqd_size;
};
-struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev,
+struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev,
struct queue_properties *q);
-struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
+struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev,
struct queue_properties *q);
void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj);
@@ -164,4 +167,10 @@ bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id);
+void kfd_get_hiq_xcc_mqd(struct kfd_node *dev,
+ struct kfd_mem_obj *mqd_mem_obj, uint32_t virtual_xcc_id);
+
+uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev);
+uint64_t kfd_mqd_stride(struct mqd_manager *mm,
+ struct queue_properties *q);
#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 4889865c725c..65c9f01a1f86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -48,8 +48,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct cik_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
@@ -74,7 +73,7 @@ static void set_priority(struct cik_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
@@ -167,7 +166,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, wptr_mask, mms);
+ wptr_shift, wptr_mask, mms, 0);
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
@@ -390,7 +389,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -428,6 +427,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct cik_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -442,6 +442,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct cik_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -457,6 +458,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct cik_sdma_rlc_registers);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
@@ -470,7 +472,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
}
struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index d3e2b6a599a4..94c0fc2e57b7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -48,8 +48,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct v10_compute_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
@@ -74,7 +73,7 @@ static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
@@ -117,12 +116,17 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+ /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
+ * DISPATCH_PTR. This is required for the kfd debugger
+ */
+ m->cp_hqd_hq_scheduler0 = 1 << 14;
+
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
}
- if (mm->dev->cwsr_enabled) {
+ if (mm->dev->kfd->cwsr_enabled) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
@@ -151,7 +155,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, 0, mms);
+ wptr_shift, 0, mms, 0);
return r;
}
@@ -210,7 +214,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_doorbell_control |=
1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
- if (mm->dev->cwsr_enabled)
+ if (mm->dev->kfd->cwsr_enabled)
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo);
@@ -227,11 +231,13 @@ static uint32_t read_doorbell_id(void *mqd)
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct v10_compute_mqd *m;
+ struct kfd_context_save_area_header header;
m = get_mqd(mqd);
@@ -250,6 +256,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
* accessible to user mode
*/
+ header.wave_state.control_stack_size = *ctl_stack_used_size;
+ header.wave_state.wave_state_size = *save_area_used_size;
+
+ header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
+ header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
+
+ if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
+ return -EFAULT;
+
return 0;
}
@@ -405,7 +420,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
#endif
struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -432,6 +447,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->get_wave_state = get_wave_state;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -447,6 +463,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -478,6 +495,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct v10_sdma_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 5aa75f72caa1..31fec5e70d13 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -46,15 +46,33 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
{
struct v11_compute_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
+ bool has_wa_flag = minfo && (minfo->update_flag & (UPDATE_FLAG_DBG_WA_ENABLE |
+ UPDATE_FLAG_DBG_WA_DISABLE));
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !(has_wa_flag || minfo->cu_mask.ptr))
return;
+ m = get_mqd(mqd);
+
+ if (has_wa_flag) {
+ uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ?
+ 0xffff : 0xffffffff;
+
+ m->compute_static_thread_mgmt_se0 = wa_mask;
+ m->compute_static_thread_mgmt_se1 = wa_mask;
+ m->compute_static_thread_mgmt_se2 = wa_mask;
+ m->compute_static_thread_mgmt_se3 = wa_mask;
+ m->compute_static_thread_mgmt_se4 = wa_mask;
+ m->compute_static_thread_mgmt_se5 = wa_mask;
+ m->compute_static_thread_mgmt_se6 = wa_mask;
+ m->compute_static_thread_mgmt_se7 = wa_mask;
+
+ return;
+ }
+
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
- m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
@@ -81,7 +99,7 @@ static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
@@ -91,12 +109,12 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
* MES write to areas beyond MQD size. So allocate
* 1 PAGE_SIZE memory for MQD is MES is enabled.
*/
- if (kfd->shared_resources.enable_mes)
+ if (node->kfd->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_compute_mqd);
- if (kfd_gtt_sa_allocate(kfd, size, &mqd_mem_obj))
+ if (kfd_gtt_sa_allocate(node, size, &mqd_mem_obj))
return NULL;
return mqd_mem_obj;
@@ -109,11 +127,12 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
uint64_t addr;
struct v11_compute_mqd *m;
int size;
+ uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff;
m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
- if (mm->dev->shared_resources.enable_mes)
+ if (mm->dev->kfd->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_compute_mqd);
@@ -122,14 +141,15 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
- m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
+
+ m->compute_static_thread_mgmt_se0 = wa_mask;
+ m->compute_static_thread_mgmt_se1 = wa_mask;
+ m->compute_static_thread_mgmt_se2 = wa_mask;
+ m->compute_static_thread_mgmt_se3 = wa_mask;
+ m->compute_static_thread_mgmt_se4 = wa_mask;
+ m->compute_static_thread_mgmt_se5 = wa_mask;
+ m->compute_static_thread_mgmt_se6 = wa_mask;
+ m->compute_static_thread_mgmt_se7 = wa_mask;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
@@ -143,6 +163,11 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+ /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
+ * DISPATCH_PTR. This is required for the kfd debugger
+ */
+ m->cp_hqd_hq_status0 = 1 << 14;
+
/*
* GFX11 RS64 CPFW version >= 509 supports PCIe atomics support
* acknowledgment.
@@ -155,7 +180,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
}
- if (mm->dev->cwsr_enabled) {
+ if (mm->dev->kfd->cwsr_enabled) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
@@ -184,7 +209,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, 0, mms);
+ wptr_shift, 0, mms, 0);
return r;
}
@@ -243,7 +268,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_doorbell_control |=
1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
- if (mm->dev->cwsr_enabled)
+ if (mm->dev->kfd->cwsr_enabled)
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo);
@@ -260,12 +285,13 @@ static uint32_t read_doorbell_id(void *mqd)
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct v11_compute_mqd *m;
- /*struct mqd_user_context_save_area_header header;*/
+ struct kfd_context_save_area_header header;
m = get_mqd(mqd);
@@ -283,16 +309,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
* it's part of the context save area that is already
* accessible to user mode
*/
-/*
- header.control_stack_size = *ctl_stack_used_size;
- header.wave_state_size = *save_area_used_size;
+ header.wave_state.control_stack_size = *ctl_stack_used_size;
+ header.wave_state.wave_state_size = *save_area_used_size;
- header.wave_state_offset = m->cp_hqd_wg_state_offset;
- header.control_stack_offset = m->cp_hqd_cntl_stack_offset;
+ header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
+ header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
- if (copy_to_user(ctl_stack, &header, sizeof(header)))
+ if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
return -EFAULT;
-*/
+
return 0;
}
@@ -319,7 +344,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
- if (mm->dev->shared_resources.enable_mes)
+ if (mm->dev->kfd->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_sdma_mqd);
@@ -387,7 +412,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
#endif
struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -463,7 +488,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
* To allocate SDMA MQDs by generic functions
* when MES is enabled.
*/
- if (dev->shared_resources.enable_mes) {
+ if (dev->kfd->shared_resources.enable_mes) {
mqd->allocate_mqd = allocate_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index fdbfd725841f..601bb9f68048 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -32,6 +32,22 @@
#include "gc/gc_9_0_sh_mask.h"
#include "sdma0/sdma0_4_0_sh_mask.h"
#include "amdgpu_amdkfd.h"
+#include "kfd_device_queue_manager.h"
+
+static void update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
+ struct mqd_update_info *minfo);
+
+static uint64_t mqd_stride_v9(struct mqd_manager *mm,
+ struct queue_properties *q)
+{
+ if (mm->dev->kfd->cwsr_enabled &&
+ q->type == KFD_QUEUE_TYPE_COMPUTE)
+ return ALIGN(q->ctl_stack_size, PAGE_SIZE) +
+ ALIGN(sizeof(struct v9_mqd), PAGE_SIZE);
+
+ return mm->mqd_size;
+}
static inline struct v9_mqd *get_mqd(void *mqd)
{
@@ -49,8 +65,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct v9_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
@@ -83,7 +98,7 @@ static void set_priority(struct v9_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node,
struct queue_properties *q)
{
int retval;
@@ -105,28 +120,30 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
* pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
* amdgpu memory functions to do so.
*/
- if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
+ if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev,
- ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
+ retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev,
+ (ALIGN(q->ctl_stack_size, PAGE_SIZE) +
+ ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) *
+ NUM_XCC(node->xcc_mask),
&(mqd_mem_obj->gtt_mem),
&(mqd_mem_obj->gpu_addr),
(void *)&(mqd_mem_obj->cpu_ptr), true);
+
+ if (retval) {
+ kfree(mqd_mem_obj);
+ return NULL;
+ }
} else {
- retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd),
+ retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd),
&mqd_mem_obj);
- }
-
- if (retval) {
- kfree(mqd_mem_obj);
- return NULL;
+ if (retval)
+ return NULL;
}
return mqd_mem_obj;
-
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
@@ -135,7 +152,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
{
uint64_t addr;
struct v9_mqd *m;
- struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev;
m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
@@ -165,31 +181,21 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
- if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
+ * DISPATCH_PTR. This is required for the kfd debugger
+ */
+ m->cp_hqd_hq_status0 = 1 << 14;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
- /* On GC 9.4.3, DW 41 is re-purposed as
- * compute_tg_chunk_size.
- * TODO: review this setting when active CUs in the
- * partition play a role
- */
- m->compute_static_thread_mgmt_se6 = 1;
- }
- } else {
- /* PM4 queue */
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
- m->compute_static_thread_mgmt_se6 = 0;
- /* TODO: program pm4_target_xcc */
- }
- }
if (q->tba_addr) {
m->compute_pgm_rsrc2 |=
(1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
}
- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
@@ -205,7 +211,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q, NULL);
+ update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
@@ -217,14 +223,13 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, 0, mms);
+ wptr_shift, 0, mms, 0);
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev;
struct v9_mqd *m;
m = get_mqd(mqd);
@@ -257,9 +262,14 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
* Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
* more than (EOP entry count - 1) so a queue size of 0x800 dwords
* is safe, giving a maximum field value of 0xA.
+ *
+ * Also, do calculation only if EOP is used (size > 0), otherwise
+ * the order_base_2 calculation provides incorrect result.
+ *
*/
- m->cp_hqd_eop_control = min(0xA,
- order_base_2(q->eop_ring_buffer_size / 4) - 1);
+ m->cp_hqd_eop_control = q->eop_ring_buffer_size ?
+ min(0xA, order_base_2(q->eop_ring_buffer_size / 4) - 1) : 0;
+
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
@@ -270,17 +280,14 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
- m->cp_hqd_pq_control |=
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
- if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
- m->cp_hqd_pq_control |=
- CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
m->cp_hqd_pq_doorbell_control |= 1 <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo);
@@ -298,11 +305,13 @@ static uint32_t read_doorbell_id(void *mqd)
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct v9_mqd *m;
+ struct kfd_context_save_area_header header;
/* Control stack is located one page after MQD. */
void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
@@ -314,7 +323,18 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
- if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
+ header.wave_state.control_stack_size = *ctl_stack_used_size;
+ header.wave_state.wave_state_size = *save_area_used_size;
+
+ header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
+ header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
+
+ if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
+ return -EFAULT;
+
+ if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset,
+ mqd_ctl_stack + m->cp_hqd_cntl_stack_offset,
+ *ctl_stack_used_size))
return -EFAULT;
return 0;
@@ -467,6 +487,288 @@ static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
qp->is_active = 0;
}
+static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ struct v9_mqd *m;
+ int xcc = 0;
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ uint64_t xcc_gart_addr = 0;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc);
+
+ init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
+
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+ 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
+ m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
+ if (xcc == 0) {
+ /* Set no_update_rptr = 0 in Master XCC */
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
+
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = m;
+ *gart_addr = xcc_gart_addr;
+ }
+ }
+}
+
+static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int xcc_id, err, inst = 0;
+ void *xcc_mqd;
+ uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
+
+ for_each_inst(xcc_id, xcc_mask) {
+ xcc_mqd = mqd + hiq_mqd_size * inst;
+ err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd,
+ pipe_id, queue_id,
+ p->doorbell_off, xcc_id);
+ if (err) {
+ pr_debug("Failed to load HIQ MQD for XCC: %d\n", inst);
+ break;
+ }
+ ++inst;
+ }
+
+ return err;
+}
+
+static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type, unsigned int timeout,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int xcc_id, err, inst = 0;
+ void *xcc_mqd;
+ uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
+
+ for_each_inst(xcc_id, xcc_mask) {
+ xcc_mqd = mqd + hiq_mqd_size * inst;
+ err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
+ type, timeout, pipe_id,
+ queue_id, xcc_id);
+ if (err) {
+ pr_debug("Destroy MQD failed for xcc: %d\n", inst);
+ break;
+ }
+ ++inst;
+ }
+
+ return err;
+}
+
+static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj,
+ struct kfd_mem_obj *xcc_mqd_mem_obj,
+ uint64_t offset)
+{
+ xcc_mqd_mem_obj->gtt_mem = (offset == 0) ?
+ mqd_mem_obj->gtt_mem : NULL;
+ xcc_mqd_mem_obj->gpu_addr = mqd_mem_obj->gpu_addr + offset;
+ xcc_mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)mqd_mem_obj->cpu_ptr
+ + offset);
+}
+
+static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ struct v9_mqd *m;
+ int xcc = 0;
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ uint64_t xcc_gart_addr = 0;
+ uint64_t xcc_ctx_save_restore_area_address;
+ uint64_t offset = mm->mqd_stride(mm, q);
+ uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc);
+
+ init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
+
+ m->cp_mqd_stride_size = offset;
+
+ /*
+ * Update the CWSR address for each XCC if CWSR is enabled
+ * and CWSR area is allocated in thunk
+ */
+ if (mm->dev->kfd->cwsr_enabled &&
+ q->ctx_save_restore_area_address) {
+ xcc_ctx_save_restore_area_address =
+ q->ctx_save_restore_area_address +
+ (xcc * q->ctx_save_restore_area_size);
+
+ m->cp_hqd_ctx_save_base_addr_lo =
+ lower_32_bits(xcc_ctx_save_restore_area_address);
+ m->cp_hqd_ctx_save_base_addr_hi =
+ upper_32_bits(xcc_ctx_save_restore_area_address);
+ }
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ m->compute_tg_chunk_size = 1;
+ m->compute_current_logic_xcc_id =
+ (local_xcc_start + xcc) %
+ NUM_XCC(mm->dev->xcc_mask);
+
+ switch (xcc) {
+ case 0:
+ /* Master XCC */
+ m->cp_hqd_pq_control &=
+ ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* PM4 Queue */
+ m->compute_current_logic_xcc_id = 0;
+ m->compute_tg_chunk_size = 0;
+ m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
+ }
+
+ if (xcc == 0) {
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = m;
+ *gart_addr = xcc_gart_addr;
+ }
+ }
+}
+
+static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q, struct mqd_update_info *minfo)
+{
+ struct v9_mqd *m;
+ int xcc = 0;
+ uint64_t size = mm->mqd_stride(mm, q);
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ m = get_mqd(mqd + size * xcc);
+ update_mqd(mm, m, q, minfo);
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ switch (xcc) {
+ case 0:
+ /* Master XCC */
+ m->cp_hqd_pq_control &=
+ ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
+ break;
+ default:
+ break;
+ }
+ m->compute_tg_chunk_size = 1;
+ } else {
+ /* PM4 Queue */
+ m->compute_current_logic_xcc_id = 0;
+ m->compute_tg_chunk_size = 0;
+ m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
+ }
+ }
+}
+
+static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type, unsigned int timeout,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int xcc_id, err, inst = 0;
+ void *xcc_mqd;
+ struct v9_mqd *m;
+ uint64_t mqd_offset;
+
+ m = get_mqd(mqd);
+ mqd_offset = m->cp_mqd_stride_size;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ xcc_mqd = mqd + mqd_offset * inst;
+ err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
+ type, timeout, pipe_id,
+ queue_id, xcc_id);
+ if (err) {
+ pr_debug("Destroy MQD failed for xcc: %d\n", inst);
+ break;
+ }
+ ++inst;
+ }
+
+ return err;
+}
+
+static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
+ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int xcc_id, err, inst = 0;
+ void *xcc_mqd;
+ uint64_t mqd_stride_size = mm->mqd_stride(mm, p);
+
+ for_each_inst(xcc_id, xcc_mask) {
+ xcc_mqd = mqd + mqd_stride_size * inst;
+ err = mm->dev->kfd2kgd->hqd_load(
+ mm->dev->adev, xcc_mqd, pipe_id, queue_id,
+ (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms,
+ xcc_id);
+ if (err) {
+ pr_debug("Load MQD failed for xcc: %d\n", inst);
+ break;
+ }
+ ++inst;
+ }
+
+ return err;
+}
+
+static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size)
+{
+ int xcc, err = 0;
+ void *xcc_mqd;
+ void __user *xcc_ctl_stack;
+ uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
+ u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0;
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ xcc_mqd = mqd + mqd_stride_size * xcc;
+ xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack +
+ q->ctx_save_restore_area_size * xcc);
+
+ err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,
+ &tmp_ctl_stack_used_size,
+ &tmp_save_area_used_size);
+ if (err)
+ break;
+
+ /*
+ * Set the ctl_stack_used_size and save_area_used_size to
+ * ctl_stack_used_size and save_area_used_size of XCC 0 when
+ * passing the info the user-space.
+ * For multi XCC, user-space would have to look at the header
+ * info of each Control stack area to determine the control
+ * stack size and save area used.
+ */
+ if (xcc == 0) {
+ *ctl_stack_used_size = tmp_ctl_stack_used_size;
+ *save_area_used_size = tmp_save_area_used_size;
+ }
+ }
+
+ return err;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
@@ -486,7 +788,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
#endif
struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -502,34 +804,50 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
mqd->allocate_mqd = allocate_mqd;
- mqd->init_mqd = init_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
- mqd->get_wave_state = get_wave_state;
mqd->get_checkpoint_info = get_checkpoint_info;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
+ mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
+ if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
+ mqd->init_mqd = init_mqd_v9_4_3;
+ mqd->load_mqd = load_mqd_v9_4_3;
+ mqd->update_mqd = update_mqd_v9_4_3;
+ mqd->destroy_mqd = destroy_mqd_v9_4_3;
+ mqd->get_wave_state = get_wave_state_v9_4_3;
+ } else {
+ mqd->init_mqd = init_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->get_wave_state = get_wave_state;
+ }
break;
case KFD_MQD_TYPE_HIQ:
mqd->allocate_mqd = allocate_hiq_mqd;
- mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v9_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
mqd->read_doorbell_id = read_doorbell_id;
+ if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
+ mqd->init_mqd = init_mqd_hiq_v9_4_3;
+ mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
+ mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
+ } else {
+ mqd->init_mqd = init_mqd_hiq;
+ mqd->load_mqd = kfd_hiq_load_mqd_kiq;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ }
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
@@ -555,6 +873,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct v9_sdma_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 530ba6f5b57e..d1e962da51dd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -51,8 +51,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct vi_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
@@ -77,7 +76,7 @@ static void set_priority(struct vi_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
@@ -136,7 +135,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
(1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
}
- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
@@ -165,7 +164,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, wptr_mask, mms);
+ wptr_shift, wptr_mask, mms, 0);
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
@@ -227,7 +226,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control =
atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
@@ -261,6 +260,7 @@ static void update_mqd_tonga(struct mqd_manager *mm, void *mqd,
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
@@ -446,7 +446,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
#endif
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -486,6 +486,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct vi_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -500,6 +501,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct vi_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -515,6 +517,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct vi_sdma_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
@@ -528,7 +531,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
}
struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index f612325241aa..401096c103b2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -45,7 +45,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
unsigned int max_proc_per_quantum = 1;
- struct kfd_dev *dev = pm->dqm->dev;
+ struct kfd_node *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->active_queue_count;
@@ -370,6 +370,38 @@ out:
return retval;
}
+int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
+{
+ int retval = 0;
+ uint32_t *buffer, size;
+
+ size = pm->pmf->set_grace_period_size;
+
+ mutex_lock(&pm->lock);
+
+ if (size) {
+ kq_acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t),
+ (unsigned int **)&buffer);
+
+ if (!buffer) {
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
+ if (!retval)
+ kq_submit_packet(pm->priv_queue);
+ else
+ kq_rollback_packet(pm->priv_queue);
+ }
+
+out:
+ mutex_unlock(&pm->lock);
+ return retval;
+}
+
int pm_send_unmap_queue(struct packet_manager *pm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 18250845a989..29a2d0499b67 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -34,6 +34,9 @@ static int pm_map_process_v9(struct packet_manager *pm,
{
struct pm4_mes_map_process *packet;
uint64_t vm_page_table_base_addr = qpd->page_table_base;
+ struct kfd_node *kfd = pm->dqm->dev;
+ struct kfd_process_device *pdd =
+ container_of(qpd, struct kfd_process_device, qpd);
packet = (struct pm4_mes_map_process *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
@@ -49,6 +52,12 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->bitfields14.sdma_enable = 1;
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+ if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled &&
+ pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) {
+ packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid;
+ packet->bitfields2.new_debug = 1;
+ }
+
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
if (qpd->tba_addr) {
@@ -79,6 +88,10 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
{
struct pm4_mes_map_process_aldebaran *packet;
uint64_t vm_page_table_base_addr = qpd->page_table_base;
+ struct kfd_dev *kfd = pm->dqm->dev->kfd;
+ struct kfd_process_device *pdd =
+ container_of(qpd, struct kfd_process_device, qpd);
+ int i;
packet = (struct pm4_mes_map_process_aldebaran *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
@@ -93,6 +106,16 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
packet->bitfields14.num_oac = qpd->num_oac;
packet->bitfields14.sdma_enable = 1;
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+ packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override |
+ pdd->spi_dbg_launch_mode;
+
+ if (pdd->process->debug_trap_enabled) {
+ for (i = 0; i < kfd->device_info.num_of_watch_points; i++)
+ packet->tcp_watch_cntl[i] = pdd->watch_points[i];
+
+ packet->bitfields2.single_memops =
+ !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP);
+ }
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
@@ -119,7 +142,7 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
struct pm4_mes_runlist *packet;
int concurrent_proc_cnt = 0;
- struct kfd_dev *kfd = pm->dqm->dev;
+ struct kfd_node *kfd = pm->dqm->dev;
/* Determine the number of processes to map together to HW:
* it can not exceed the number of VMIDs available to the
@@ -220,13 +243,24 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
use_static = false; /* no static queues under SDMA */
- if (q->properties.sdma_engine_id < 2 && !pm_use_ext_eng(q->device))
+ if (q->properties.sdma_engine_id < 2 &&
+ !pm_use_ext_eng(q->device->kfd))
packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
engine_sel__mes_map_queues__sdma0_vi;
else {
- packet->bitfields2.extended_engine_sel =
- extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
- packet->bitfields2.engine_sel = q->properties.sdma_engine_id;
+ /*
+ * For GFX9.4.3, SDMA engine id can be greater than 8.
+ * For such cases, set extended_engine_sel to 2 and
+ * ensure engine_sel lies between 0-7.
+ */
+ if (q->properties.sdma_engine_id >= 8)
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_map_queues__sdma8_to_15_sel;
+ else
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
+
+ packet->bitfields2.engine_sel = q->properties.sdma_engine_id % 8;
}
break;
default:
@@ -251,6 +285,41 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
+static int pm_set_grace_period_v9(struct packet_manager *pm,
+ uint32_t *buffer,
+ uint32_t grace_period)
+{
+ struct pm4_mec_write_data_mmio *packet;
+ uint32_t reg_offset = 0;
+ uint32_t reg_data = 0;
+
+ pm->dqm->dev->kfd2kgd->build_grace_period_packet_info(
+ pm->dqm->dev->adev,
+ pm->dqm->wait_times,
+ grace_period,
+ &reg_offset,
+ &reg_data);
+
+ if (grace_period == USE_DEFAULT_GRACE_PERIOD)
+ reg_data = pm->dqm->wait_times;
+
+ packet = (struct pm4_mec_write_data_mmio *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio));
+
+ packet->header.u32All = pm_build_pm4_header(IT_WRITE_DATA,
+ sizeof(struct pm4_mec_write_data_mmio));
+
+ packet->bitfields2.dst_sel = dst_sel___write_data__mem_mapped_register;
+ packet->bitfields2.addr_incr =
+ addr_incr___write_data__do_not_increment_address;
+
+ packet->bitfields3.dst_mmreg_addr = reg_offset;
+
+ packet->data = reg_data;
+
+ return 0;
+}
+
static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset)
@@ -263,7 +332,8 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues));
- packet->bitfields2.extended_engine_sel = pm_use_ext_eng(pm->dqm->dev) ?
+ packet->bitfields2.extended_engine_sel =
+ pm_use_ext_eng(pm->dqm->dev->kfd) ?
extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel :
extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
@@ -333,6 +403,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
+ .set_grace_period = pm_set_grace_period_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -340,6 +411,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
+ .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
@@ -350,6 +422,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
+ .set_grace_period = pm_set_grace_period_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process_aldebaran),
@@ -357,6 +430,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
+ .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index 4f951eaa6ee8..c1199d06d131 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -77,7 +77,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
{
struct pm4_mes_runlist *packet;
int concurrent_proc_cnt = 0;
- struct kfd_dev *kfd = pm->dqm->dev;
+ struct kfd_node *kfd = pm->dqm->dev;
if (WARN_ON(!ib))
return -EFAULT;
@@ -303,6 +303,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources = pm_set_resources_vi,
.map_queues = pm_map_queues_vi,
.unmap_queues = pm_unmap_queues_vi,
+ .set_grace_period = NULL,
.query_status = pm_query_status_vi,
.release_mem = pm_release_mem_vi,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -310,6 +311,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
+ .set_grace_period_size = 0,
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = sizeof(struct pm4_mec_release_mem)
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index a666710ed403..8b6b2bd5c148 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -146,7 +146,10 @@ struct pm4_mes_map_process {
union {
struct {
uint32_t pasid:16;
- uint32_t reserved1:8;
+ uint32_t reserved1:2;
+ uint32_t debug_vmid:4;
+ uint32_t new_debug:1;
+ uint32_t reserved2:1;
uint32_t diq_enable:1;
uint32_t process_quantum:7;
} bitfields2;
@@ -263,7 +266,8 @@ enum mes_map_queues_engine_sel_enum {
enum mes_map_queues_extended_engine_sel_enum {
extended_engine_sel__mes_map_queues__legacy_engine_sel = 0,
- extended_engine_sel__mes_map_queues__sdma0_to_7_sel = 1
+ extended_engine_sel__mes_map_queues__sdma0_to_7_sel = 1,
+ extended_engine_sel__mes_map_queues__sdma8_to_15_sel = 2
};
struct pm4_mes_map_queues {
@@ -583,6 +587,71 @@ struct pm4_mec_release_mem {
#endif
+#ifndef PM4_MEC_WRITE_DATA_DEFINED
+#define PM4_MEC_WRITE_DATA_DEFINED
+
+enum WRITE_DATA_dst_sel_enum {
+ dst_sel___write_data__mem_mapped_register = 0,
+ dst_sel___write_data__tc_l2 = 2,
+ dst_sel___write_data__gds = 3,
+ dst_sel___write_data__memory = 5,
+ dst_sel___write_data__memory_mapped_adc_persistent_state = 6,
+};
+
+enum WRITE_DATA_addr_incr_enum {
+ addr_incr___write_data__increment_address = 0,
+ addr_incr___write_data__do_not_increment_address = 1
+};
+
+enum WRITE_DATA_wr_confirm_enum {
+ wr_confirm___write_data__do_not_wait_for_write_confirmation = 0,
+ wr_confirm___write_data__wait_for_write_confirmation = 1
+};
+
+enum WRITE_DATA_cache_policy_enum {
+ cache_policy___write_data__lru = 0,
+ cache_policy___write_data__stream = 1
+};
+
+
+struct pm4_mec_write_data_mmio {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /*header */
+ unsigned int ordinal1;
+ };
+
+ union {
+ struct {
+ unsigned int reserved1:8;
+ unsigned int dst_sel:4;
+ unsigned int reserved2:4;
+ unsigned int addr_incr:1;
+ unsigned int reserved3:2;
+ unsigned int resume_vf:1;
+ unsigned int wr_confirm:1;
+ unsigned int reserved4:4;
+ unsigned int cache_policy:2;
+ unsigned int reserved5:5;
+ } bitfields2;
+ unsigned int ordinal2;
+ };
+
+ union {
+ struct {
+ unsigned int dst_mmreg_addr:18;
+ unsigned int reserved6:14;
+ } bitfields3;
+ unsigned int ordinal3;
+ };
+
+ uint32_t reserved7;
+
+ uint32_t data;
+
+};
+
+#endif
+
enum {
CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 94a438956868..7364a5d77c6e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -113,6 +113,8 @@
#define KFD_UNMAP_LATENCY_MS (4000)
+#define KFD_MAX_SDMA_QUEUES 128
+
/*
* 512 = 0x200
* The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
@@ -199,6 +201,8 @@ extern int amdgpu_no_queue_eviction_on_vm_fault;
/* Enable eviction debug messages */
extern bool debug_evictions;
+extern struct mutex kfd_processes_mutex;
+
enum cache_policy {
cache_policy_coherent,
cache_policy_noncoherent
@@ -210,11 +214,13 @@ enum cache_policy {
((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)))
+struct kfd_node;
+
struct kfd_event_interrupt_class {
- bool (*interrupt_isr)(struct kfd_dev *dev,
+ bool (*interrupt_isr)(struct kfd_node *dev,
const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
bool *patched_flag);
- void (*interrupt_wq)(struct kfd_dev *dev,
+ void (*interrupt_wq)(struct kfd_node *dev,
const uint32_t *ih_ring_entry);
};
@@ -233,11 +239,11 @@ struct kfd_device_info {
uint32_t no_atomic_fw_version;
unsigned int num_sdma_queues_per_engine;
unsigned int num_reserved_sdma_queues_per_engine;
- uint64_t reserved_sdma_queues_bitmap;
+ DECLARE_BITMAP(reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
};
-unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev);
-unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev);
+unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev);
+unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev);
struct kfd_mem_obj {
uint32_t range_start;
@@ -253,13 +259,70 @@ struct kfd_vmid_info {
uint32_t vmid_num_kfd;
};
+#define MAX_KFD_NODES 8
+
+struct kfd_dev;
+
+struct kfd_node {
+ unsigned int node_id;
+ struct amdgpu_device *adev; /* Duplicated here along with keeping
+ * a copy in kfd_dev to save a hop
+ */
+ const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with
+ * keeping a copy in kfd_dev to
+ * save a hop
+ */
+ struct kfd_vmid_info vm_info;
+ unsigned int id; /* topology stub index */
+ uint32_t xcc_mask; /* Instance mask of XCCs present */
+ struct amdgpu_xcp *xcp;
+
+ /* Interrupts */
+ struct kfifo ih_fifo;
+ struct workqueue_struct *ih_wq;
+ struct work_struct interrupt_work;
+ spinlock_t interrupt_lock;
+
+ /*
+ * Interrupts of interest to KFD are copied
+ * from the HW ring into a SW ring.
+ */
+ bool interrupts_active;
+ uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */
+
+ /* QCM Device instance */
+ struct device_queue_manager *dqm;
+
+ /* Global GWS resource shared between processes */
+ void *gws;
+ bool gws_debug_workaround;
+
+ /* Clients watching SMI events */
+ struct list_head smi_clients;
+ spinlock_t smi_lock;
+ uint32_t reset_seq_num;
+
+ /* SRAM ECC flag */
+ atomic_t sram_ecc_flag;
+
+ /*spm process id */
+ unsigned int spm_pasid;
+
+ /* Maximum process number mapped to HW scheduler */
+ unsigned int max_proc_per_quantum;
+
+ unsigned int compute_vmid_bitmap;
+
+ struct kfd_local_mem_info local_mem_info;
+
+ struct kfd_dev *kfd;
+};
+
struct kfd_dev {
struct amdgpu_device *adev;
struct kfd_device_info device_info;
- unsigned int id; /* topology stub index */
-
phys_addr_t doorbell_base; /* Start of actual doorbells used by
* KFD. It is aligned for mapping
* into user mode
@@ -274,8 +337,6 @@ struct kfd_dev {
*/
struct kgd2kfd_shared_resources shared_resources;
- struct kfd_vmid_info vm_info;
- struct kfd_local_mem_info local_mem_info;
const struct kfd2kgd_calls *kfd2kgd;
struct mutex doorbell_mutex;
@@ -290,30 +351,13 @@ struct kfd_dev {
unsigned int gtt_sa_chunk_size;
unsigned int gtt_sa_num_of_chunks;
- /* Interrupts */
- struct kfifo ih_fifo;
- struct workqueue_struct *ih_wq;
- struct work_struct interrupt_work;
- spinlock_t interrupt_lock;
-
- /* QCM Device instance */
- struct device_queue_manager *dqm;
-
bool init_complete;
- /*
- * Interrupts of interest to KFD are copied
- * from the HW ring into a SW ring.
- */
- bool interrupts_active;
/* Firmware versions */
uint16_t mec_fw_version;
uint16_t mec2_fw_version;
uint16_t sdma_fw_version;
- /* Maximum process number mapped to HW scheduler */
- unsigned int max_proc_per_quantum;
-
/* CWSR */
bool cwsr_enabled;
const void *cwsr_isa;
@@ -327,28 +371,20 @@ struct kfd_dev {
/* Use IOMMU v2 flag */
bool use_iommu_v2;
- /* SRAM ECC flag */
- atomic_t sram_ecc_flag;
-
/* Compute Profile ref. count */
atomic_t compute_profile;
- /* Global GWS resource shared between processes */
- void *gws;
-
- /* Clients watching SMI events */
- struct list_head smi_clients;
- spinlock_t smi_lock;
-
- uint32_t reset_seq_num;
-
struct ida doorbell_ida;
unsigned int max_doorbell_slices;
int noretry;
- /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
- struct dev_pagemap pgmap;
+ struct kfd_node *nodes[MAX_KFD_NODES];
+ unsigned int num_nodes;
+
+ /* Track per device allocated watch points */
+ uint32_t alloc_watch_ids;
+ spinlock_t watch_points_lock;
};
enum kfd_mempool {
@@ -478,8 +514,13 @@ struct queue_properties {
uint32_t doorbell_off;
bool is_interop;
bool is_evicted;
+ bool is_suspended;
+ bool is_being_destroyed;
bool is_active;
bool is_gws;
+ uint32_t pm4_target_xcc;
+ bool is_dbg_wa;
+ bool is_user_cu_masked;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid;
/* Relevant only for sdma queues*/
@@ -494,15 +535,18 @@ struct queue_properties {
uint32_t ctl_stack_size;
uint64_t tba_addr;
uint64_t tma_addr;
+ uint64_t exception_status;
};
#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
(q).queue_address != 0 && \
(q).queue_percent > 0 && \
- !(q).is_evicted)
+ !(q).is_evicted && \
+ !(q).is_suspended)
enum mqd_update_flag {
- UPDATE_FLAG_CU_MASK = 0,
+ UPDATE_FLAG_DBG_WA_ENABLE = 1,
+ UPDATE_FLAG_DBG_WA_DISABLE = 2,
};
struct mqd_update_info {
@@ -563,7 +607,7 @@ struct queue {
unsigned int doorbell_id;
struct kfd_process *process;
- struct kfd_dev *device;
+ struct kfd_node *device;
void *gws;
/* procfs */
@@ -697,7 +741,7 @@ enum kfd_pdd_bound {
/* Data that is per-process-per device. */
struct kfd_process_device {
/* The device that owns this data. */
- struct kfd_dev *dev;
+ struct kfd_node *dev;
/* The process that owns this kfd_process_device. */
struct kfd_process *process;
@@ -783,6 +827,18 @@ struct kfd_process_device {
uint64_t faults;
uint64_t page_in;
uint64_t page_out;
+
+ /* Exception code status*/
+ uint64_t exception_status;
+ void *vm_fault_exc_data;
+ size_t vm_fault_exc_data_size;
+
+ /* Tracks debug per-vmid request settings */
+ uint32_t spi_dbg_override;
+ uint32_t spi_dbg_launch_mode;
+ uint32_t watch_points[4];
+ uint32_t alloc_watch_ids;
+
/*
* If this process has been checkpointed before, then the user
* application will use the original gpu_id on the
@@ -887,19 +943,57 @@ struct kfd_process {
*/
unsigned long last_restore_timestamp;
+ /* Indicates device process is debug attached with reserved vmid. */
+ bool debug_trap_enabled;
+
+ /* per-process-per device debug event fd file */
+ struct file *dbg_ev_file;
+
+ /* If the process is a kfd debugger, we need to know so we can clean
+ * up at exit time. If a process enables debugging on itself, it does
+ * its own clean-up, so we don't set the flag here. We track this by
+ * counting the number of processes this process is debugging.
+ */
+ atomic_t debugged_process_count;
+
+ /* If the process is a debugged, this is the debugger process */
+ struct kfd_process *debugger_process;
+
/* Kobj for our procfs */
struct kobject *kobj;
struct kobject *kobj_queues;
struct attribute attr_pasid;
+ /* Keep track cwsr init */
+ bool has_cwsr;
+
+ /* Exception code enable mask and status */
+ uint64_t exception_enable_mask;
+ uint64_t exception_status;
+
+ /* Used to drain stale interrupts */
+ wait_queue_head_t wait_irq_drain;
+ bool irq_drain_is_open;
+
/* shared virtual memory registered by this process */
struct svm_range_list svms;
bool xnack_enabled;
+ /* Work area for debugger event writer worker. */
+ struct work_struct debug_event_workarea;
+
+ /* Tracks debug per-vmid request for debug flags */
+ bool dbg_flags;
+
atomic_t poison;
/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
bool queues_paused;
+
+ /* Tracks runtime enable status */
+ struct semaphore runtime_enable_sema;
+ bool is_runtime_retry;
+ struct kfd_runtime_info runtime_info;
};
#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
@@ -925,20 +1019,19 @@ struct amdkfd_ioctl_desc {
unsigned int cmd_drv;
const char *name;
};
-bool kfd_dev_is_large_bar(struct kfd_dev *dev);
+bool kfd_dev_is_large_bar(struct kfd_node *dev);
int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
void kfd_cleanup_processes(void);
-struct kfd_process *kfd_create_process(struct file *filep);
+struct kfd_process *kfd_create_process(struct task_struct *thread);
struct kfd_process *kfd_get_process(const struct task_struct *task);
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
-int kfd_process_gpuid_from_adev(struct kfd_process *p,
- struct amdgpu_device *adev, uint32_t *gpuid,
- uint32_t *gpuidx);
+int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
+ uint32_t *gpuid, uint32_t *gpuidx);
static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p,
uint32_t gpuidx, uint32_t *gpuid) {
return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL;
@@ -961,16 +1054,16 @@ int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id);
int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct file *drm_file);
-struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
struct kfd_process *p);
-struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
struct kfd_process *p);
-struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
struct kfd_process *p);
bool kfd_process_xnack_mode(struct kfd_process *p, bool supported);
-int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma);
/* KFD process API for creating and translating handles */
@@ -994,7 +1087,7 @@ void kfd_pasid_free(u32 pasid);
size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
int kfd_doorbell_init(struct kfd_dev *kfd);
void kfd_doorbell_fini(struct kfd_dev *kfd);
-int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma);
void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
unsigned int *doorbell_off);
@@ -1012,10 +1105,10 @@ void kfd_free_process_doorbells(struct kfd_dev *kfd,
unsigned int doorbell_index);
/* GTT Sub-Allocator */
-int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
struct kfd_mem_obj **mem_obj);
-int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
+int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj);
extern struct device *kfd_device;
@@ -1028,27 +1121,53 @@ void kfd_procfs_del_queue(struct queue *q);
/* Topology */
int kfd_topology_init(void);
void kfd_topology_shutdown(void);
-int kfd_topology_add_device(struct kfd_dev *gpu);
-int kfd_topology_remove_device(struct kfd_dev *gpu);
+int kfd_topology_add_device(struct kfd_node *gpu);
+int kfd_topology_remove_device(struct kfd_node *gpu);
struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
uint32_t proximity_domain);
struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
uint32_t proximity_domain);
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
-struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
-struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev);
-int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
+struct kfd_node *kfd_device_by_id(uint32_t gpu_id);
+struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id,
+ uint32_t vmid)
+{
+ return (node->interrupt_bitmap & (1 << node_id)) != 0 &&
+ (node->compute_vmid_bitmap & (1 << vmid)) != 0;
+}
+static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
+ uint32_t node_id, uint32_t vmid) {
+ struct kfd_dev *dev = adev->kfd.dev;
+ uint32_t i;
+
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
+ return dev->nodes[0];
+
+ for (i = 0; i < dev->num_nodes; i++)
+ if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid))
+ return dev->nodes[i];
+
+ return NULL;
+}
+int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id);
void kfd_double_confirm_iommu_support(struct kfd_dev *gpu);
/* Interrupts */
-int kfd_interrupt_init(struct kfd_dev *dev);
-void kfd_interrupt_exit(struct kfd_dev *dev);
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool interrupt_is_wanted(struct kfd_dev *dev,
+#define KFD_IRQ_FENCE_CLIENTID 0xff
+#define KFD_IRQ_FENCE_SOURCEID 0xff
+#define KFD_IRQ_IS_FENCE(client, source) \
+ ((client) == KFD_IRQ_FENCE_CLIENTID && \
+ (source) == KFD_IRQ_FENCE_SOURCEID)
+int kfd_interrupt_init(struct kfd_node *dev);
+void kfd_interrupt_exit(struct kfd_node *dev);
+bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry);
+bool interrupt_is_wanted(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre, bool *flag);
+int kfd_process_drain_interrupts(struct kfd_process_device *pdd);
+void kfd_process_close_interrupt_drain(unsigned int pasid);
/* amdkfd Apertures */
int kfd_init_apertures(struct kfd_process *process);
@@ -1056,6 +1175,11 @@ int kfd_init_apertures(struct kfd_process *process);
void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr);
+void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
+ bool enabled);
+
+/* CWSR initialization */
+int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep);
/* CRIU */
/*
@@ -1174,22 +1298,22 @@ void print_queue_properties(struct queue_properties *q);
void print_queue(struct queue *q);
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
-struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
+ struct kfd_node *dev);
+struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
-struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
@@ -1206,7 +1330,7 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
void pqm_uninit(struct process_queue_manager *pqm);
int pqm_create_queue(struct process_queue_manager *pqm,
- struct kfd_dev *dev,
+ struct kfd_node *dev,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
@@ -1231,6 +1355,11 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
+int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
+ uint64_t exception_clear_mask,
+ void __user *buf,
+ int *num_qss_entries,
+ uint32_t *entry_size);
int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
uint64_t fence_value,
@@ -1270,6 +1399,8 @@ struct packet_manager_funcs {
int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset);
+ int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer,
+ uint32_t grace_period);
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint64_t fence_value);
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
@@ -1280,6 +1411,7 @@ struct packet_manager_funcs {
int set_resources_size;
int map_queues_size;
int unmap_queues_size;
+ int set_grace_period_size;
int query_status_size;
int release_mem_size;
};
@@ -1302,6 +1434,8 @@ int pm_send_unmap_queue(struct packet_manager *pm,
void pm_release_ib(struct packet_manager *pm);
+int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period);
+
/* Following PM funcs can be shared among VI and AI */
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
@@ -1310,6 +1444,7 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
/* Events */
extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
+extern const struct kfd_event_interrupt_class event_interrupt_class_v10;
extern const struct kfd_event_interrupt_class event_interrupt_class_v11;
extern const struct kfd_device_global_init_class device_global_init_class_cik;
@@ -1323,7 +1458,7 @@ int kfd_wait_on_events(struct kfd_process *p,
uint32_t *wait_result);
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits);
-void kfd_signal_iommu_event(struct kfd_dev *dev,
+void kfd_signal_iommu_event(struct kfd_node *dev,
u32 pasid, unsigned long address,
bool is_write_requested, bool is_execute_requested);
void kfd_signal_hw_exception_event(u32 pasid);
@@ -1339,32 +1474,36 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
int kfd_get_num_events(struct kfd_process *p);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
-void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
- struct kfd_vm_fault_info *info);
+void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+ struct kfd_vm_fault_info *info,
+ struct kfd_hsa_memory_exception_data *data);
-void kfd_signal_reset_event(struct kfd_dev *dev);
+void kfd_signal_reset_event(struct kfd_node *dev);
-void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid);
+void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid);
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
- return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
- (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) &&
- dev->adev->sdma.instance[0].fw_version >= 18) ||
+ return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
}
+int kfd_send_exception_to_runtime(struct kfd_process *p,
+ unsigned int queue_id,
+ uint64_t error_reason);
bool kfd_is_locked(void);
/* Compute profile */
-void kfd_inc_compute_active(struct kfd_dev *dev);
-void kfd_dec_compute_active(struct kfd_dev *dev);
+void kfd_inc_compute_active(struct kfd_node *dev);
+void kfd_dec_compute_active(struct kfd_node *dev);
/* Cgroup Support */
/* Check with device cgroup if @kfd device is accessible */
-static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
+static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
{
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = adev_to_drm(kfd->adev);
@@ -1377,6 +1516,11 @@ static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
#endif
}
+static inline bool kfd_is_first_node(struct kfd_node *node)
+{
+ return (node == node->kfd->nodes[0]);
+}
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
@@ -1389,7 +1533,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
int pm_debugfs_runlist(struct seq_file *m, void *data);
-int kfd_debugfs_hang_hws(struct kfd_dev *dev);
+int kfd_debugfs_hang_hws(struct kfd_node *dev);
int pm_debugfs_hang_hws(struct packet_manager *pm);
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 07a9eaf9b7d8..3d3611705d41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -44,13 +44,14 @@ struct mm_struct;
#include "kfd_iommu.h"
#include "kfd_svm.h"
#include "kfd_smi_events.h"
+#include "kfd_debug.h"
/*
* List of struct kfd_process (field kfd_process).
* Unique/indexed by mm_struct*
*/
DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
-static DEFINE_MUTEX(kfd_processes_mutex);
+DEFINE_MUTEX(kfd_processes_mutex);
DEFINE_SRCU(kfd_processes_srcu);
@@ -69,7 +70,6 @@ static struct kfd_process *find_process(const struct task_struct *thread,
bool ref);
static void kfd_process_ref_release(struct kref *ref);
static struct kfd_process *create_process(const struct task_struct *thread);
-static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);
@@ -269,7 +269,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
int cu_cnt;
int wave_cnt;
int max_waves_per_cu;
- struct kfd_dev *dev = NULL;
+ struct kfd_node *dev = NULL;
struct kfd_process *proc = NULL;
struct kfd_process_device *pdd = NULL;
@@ -290,7 +290,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
wave_cnt = 0;
max_waves_per_cu = 0;
dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
- &max_waves_per_cu);
+ &max_waves_per_cu, 0);
/* Translate wave count to number of compute units */
cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
@@ -691,7 +691,7 @@ void kfd_process_destroy_wq(void)
static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_process_device *pdd, void **kptr)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
if (kptr && *kptr) {
amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
@@ -713,7 +713,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
uint64_t gpu_va, uint32_t size,
uint32_t flags, struct kgd_mem **mem, void **kptr)
{
- struct kfd_dev *kdev = pdd->dev;
+ struct kfd_node *kdev = pdd->dev;
int err;
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
@@ -798,18 +798,19 @@ static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
}
-struct kfd_process *kfd_create_process(struct file *filep)
+struct kfd_process *kfd_create_process(struct task_struct *thread)
{
struct kfd_process *process;
- struct task_struct *thread = current;
int ret;
- if (!thread->mm)
+ if (!(thread->mm && mmget_not_zero(thread->mm)))
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
- if (thread->group_leader->mm != thread->mm)
+ if (thread->group_leader->mm != thread->mm) {
+ mmput(thread->mm);
return ERR_PTR(-EINVAL);
+ }
/*
* take kfd processes mutex before starting of process creation
@@ -818,6 +819,12 @@ struct kfd_process *kfd_create_process(struct file *filep)
*/
mutex_lock(&kfd_processes_mutex);
+ if (kfd_is_locked()) {
+ mutex_unlock(&kfd_processes_mutex);
+ pr_debug("KFD is locked! Cannot create process");
+ return ERR_PTR(-EINVAL);
+ }
+
/* A prior open of /dev/kfd could have already created the process. */
process = find_process(thread, false);
if (process) {
@@ -827,10 +834,6 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (IS_ERR(process))
goto out;
- ret = kfd_process_init_cwsr_apu(process, filep);
- if (ret)
- goto out_destroy;
-
if (!procfs.kobj)
goto out;
@@ -859,21 +862,16 @@ struct kfd_process *kfd_create_process(struct file *filep)
kfd_procfs_add_sysfs_stats(process);
kfd_procfs_add_sysfs_files(process);
kfd_procfs_add_sysfs_counters(process);
+
+ init_waitqueue_head(&process->wait_irq_drain);
}
out:
if (!IS_ERR(process))
kref_get(&process->ref);
mutex_unlock(&kfd_processes_mutex);
+ mmput(thread->mm);
return process;
-
-out_destroy:
- hash_del_rcu(&process->kfd_processes);
- mutex_unlock(&kfd_processes_mutex);
- synchronize_srcu(&kfd_processes_srcu);
- /* kfd_process_free_notifier will trigger the cleanup */
- mmu_notifier_put(&process->mmu_notifier);
- return ERR_PTR(ret);
}
struct kfd_process *kfd_get_process(const struct task_struct *thread)
@@ -982,7 +980,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
{
struct kfd_process_device *pdd;
- struct kfd_dev *kdev;
+ struct kfd_node *kdev;
void *mem;
kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
@@ -1040,9 +1038,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
bitmap_free(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
- kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
+ kfd_free_process_doorbells(pdd->dev->kfd, pdd->doorbell_index);
- if (pdd->dev->shared_resources.enable_mes)
+ if (pdd->dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
pdd->proc_ctx_bo);
/*
@@ -1169,11 +1167,40 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
static void kfd_process_notifier_release_internal(struct kfd_process *p)
{
+ int i;
+
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
+ if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+ }
+
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
+ kfd_dbg_trap_disable(p);
+
+ if (atomic_read(&p->debugged_process_count) > 0) {
+ struct kfd_process *target;
+ unsigned int temp;
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
+ if (target->debugger_process && target->debugger_process == p) {
+ mutex_lock_nested(&target->mutex, 1);
+ kfd_dbg_trap_disable(target);
+ mutex_unlock(&target->mutex);
+ if (atomic_read(&p->debugged_process_count) == 0)
+ break;
+ }
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+ }
mmu_notifier_put(&p->mmu_notifier);
}
@@ -1253,16 +1280,19 @@ void kfd_cleanup_processes(void)
mmu_notifier_synchronize();
}
-static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;
int i;
+ if (p->has_cwsr)
+ return 0;
+
for (i = 0; i < p->n_pdds; i++) {
- struct kfd_dev *dev = p->pdds[i]->dev;
+ struct kfd_node *dev = p->pdds[i]->dev;
struct qcm_process_device *qpd = &p->pdds[i]->qpd;
- if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
+ if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
continue;
offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
@@ -1279,19 +1309,23 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
return err;
}
- memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+ memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
+
+ kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
}
+ p->has_cwsr = true;
+
return 0;
}
static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
@@ -1300,7 +1334,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
void *kaddr;
int ret;
- if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
+ if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
return 0;
/* cwsr_base is only set for dGPU */
@@ -1313,7 +1347,10 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
qpd->cwsr_kaddr = kaddr;
qpd->tba_addr = qpd->cwsr_base;
- memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+ memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
+
+ kfd_process_set_trap_debug_flag(&pdd->qpd,
+ pdd->process->debug_trap_enabled);
qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
@@ -1324,10 +1361,10 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
- if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
+ if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
return;
kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
@@ -1371,7 +1408,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* support retry.
*/
for (i = 0; i < p->n_pdds; i++) {
- struct kfd_dev *dev = p->pdds[i]->dev;
+ struct kfd_node *dev = p->pdds[i]->dev;
/* Only consider GFXv9 and higher GPUs. Older GPUs don't
* support the SVM APIs and don't need to be considered
@@ -1394,13 +1431,23 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
return false;
- if (dev->noretry)
+ if (dev->kfd->noretry)
return false;
}
return true;
}
+void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
+ bool enabled)
+{
+ if (qpd->cwsr_kaddr) {
+ uint64_t *tma =
+ (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
+ tma[2] = enabled;
+ }
+}
+
/*
* On return the kfd_process is fully operational and will be freed when the
* mm is released
@@ -1428,6 +1475,11 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err)
goto err_event_init;
process->is_32bit_user_mode = in_compat_syscall();
+ process->debug_trap_enabled = false;
+ process->debugger_process = NULL;
+ process->exception_enable_mask = 0;
+ atomic_set(&process->debugged_process_count, 0);
+ sema_init(&process->runtime_enable_sema, 0);
process->pasid = kfd_pasid_alloc();
if (process->pasid == 0) {
@@ -1475,6 +1527,8 @@ static struct kfd_process *create_process(const struct task_struct *thread)
kfd_unref_process(process);
get_task_struct(process->lead_thread);
+ INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
+
return process;
err_register_notifier:
@@ -1528,7 +1582,7 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
return 0;
}
-struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
int i;
@@ -1540,7 +1594,7 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
return NULL;
}
-struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
@@ -1552,7 +1606,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
if (!pdd)
return NULL;
- if (init_doorbell_bitmap(&pdd->qpd, dev)) {
+ if (init_doorbell_bitmap(&pdd->qpd, dev->kfd)) {
pr_err("Failed to init doorbell for process\n");
goto err_free_pdd;
}
@@ -1573,7 +1627,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);
- if (dev->shared_resources.enable_mes) {
+ if (dev->kfd->shared_resources.enable_mes) {
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
AMDGPU_MES_PROC_CTX_SIZE,
&pdd->proc_ctx_bo,
@@ -1588,6 +1642,11 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
}
p->pdds[p->n_pdds++] = pdd;
+ if (kfd_dbg_is_per_vmid_supported(pdd->dev))
+ pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
+ pdd->dev->adev,
+ false,
+ 0);
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
@@ -1619,7 +1678,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct amdgpu_fpriv *drv_priv;
struct amdgpu_vm *avm;
struct kfd_process *p;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
int ret;
if (!drm_file)
@@ -1679,7 +1738,7 @@ err_reserve_ib_mem:
*
* Assumes that the process lock is held.
*/
-struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd;
@@ -1885,13 +1944,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
}
int
-kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
- uint32_t *gpuid, uint32_t *gpuidx)
+kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
+ uint32_t *gpuid, uint32_t *gpuidx)
{
int i;
for (i = 0; i < p->n_pdds; i++)
- if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
+ if (p->pdds[i] && p->pdds[i]->dev == node) {
*gpuid = p->pdds[i]->user_gpu_id;
*gpuidx = i;
return 0;
@@ -1961,8 +2020,10 @@ static void restore_process_worker(struct work_struct *work)
*/
p->last_restore_timestamp = get_jiffies_64();
- ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
- &p->ef);
+ /* VMs may not have been acquired yet during debugging. */
+ if (p->kgd_process_info)
+ ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
+ &p->ef);
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
@@ -1988,7 +2049,7 @@ void kfd_suspend_all_processes(void)
WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
cancel_delayed_work_sync(&p->eviction_work);
- cancel_delayed_work_sync(&p->restore_work);
+ flush_delayed_work(&p->restore_work);
if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
pr_err("Failed to suspend process 0x%x\n", p->pasid);
@@ -2016,7 +2077,7 @@ int kfd_resume_all_processes(void)
return ret;
}
-int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
struct kfd_process_device *pdd;
@@ -2051,7 +2112,9 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
{
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
+ uint32_t xcc_mask = dev->xcc_mask;
+ int xcc = 0;
/*
* It can be that we race and lose here, but that is extremely unlikely
@@ -2069,11 +2132,126 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
pdd->qpd.vmid);
} else {
- amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
- pdd->process->pasid, type);
+ for_each_inst(xcc, xcc_mask)
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(
+ dev->adev, pdd->process->pasid, type, xcc);
}
}
+/* assumes caller holds process lock. */
+int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
+{
+ uint32_t irq_drain_fence[8];
+ int r = 0;
+
+ if (!KFD_IS_SOC15(pdd->dev))
+ return 0;
+
+ pdd->process->irq_drain_is_open = true;
+
+ memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
+ irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
+ KFD_IRQ_FENCE_CLIENTID;
+ irq_drain_fence[3] = pdd->process->pasid;
+
+ /* ensure stale irqs scheduled KFD interrupts and send drain fence. */
+ if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
+ irq_drain_fence)) {
+ pdd->process->irq_drain_is_open = false;
+ return 0;
+ }
+
+ r = wait_event_interruptible(pdd->process->wait_irq_drain,
+ !READ_ONCE(pdd->process->irq_drain_is_open));
+ if (r)
+ pdd->process->irq_drain_is_open = false;
+
+ return r;
+}
+
+void kfd_process_close_interrupt_drain(unsigned int pasid)
+{
+ struct kfd_process *p;
+
+ p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return;
+
+ WRITE_ONCE(p->irq_drain_is_open, false);
+ wake_up_all(&p->wait_irq_drain);
+ kfd_unref_process(p);
+}
+
+struct send_exception_work_handler_workarea {
+ struct work_struct work;
+ struct kfd_process *p;
+ unsigned int queue_id;
+ uint64_t error_reason;
+};
+
+static void send_exception_work_handler(struct work_struct *work)
+{
+ struct send_exception_work_handler_workarea *workarea;
+ struct kfd_process *p;
+ struct queue *q;
+ struct mm_struct *mm;
+ struct kfd_context_save_area_header __user *csa_header;
+ uint64_t __user *err_payload_ptr;
+ uint64_t cur_err;
+ uint32_t ev_id;
+
+ workarea = container_of(work,
+ struct send_exception_work_handler_workarea,
+ work);
+ p = workarea->p;
+
+ mm = get_task_mm(p->lead_thread);
+
+ if (!mm)
+ return;
+
+ kthread_use_mm(mm);
+
+ q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
+
+ if (!q)
+ goto out;
+
+ csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
+
+ get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
+ get_user(cur_err, err_payload_ptr);
+ cur_err |= workarea->error_reason;
+ put_user(cur_err, err_payload_ptr);
+ get_user(ev_id, &csa_header->err_event_id);
+
+ kfd_set_event(p, ev_id);
+
+out:
+ kthread_unuse_mm(mm);
+ mmput(mm);
+}
+
+int kfd_send_exception_to_runtime(struct kfd_process *p,
+ unsigned int queue_id,
+ uint64_t error_reason)
+{
+ struct send_exception_work_handler_workarea worker;
+
+ INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
+
+ worker.p = p;
+ worker.queue_id = queue_id;
+ worker.error_reason = error_reason;
+
+ schedule_work(&worker.work);
+ flush_work(&worker.work);
+ destroy_work_on_stack(&worker.work);
+
+ return 0;
+}
+
struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
{
int i;
@@ -2133,4 +2311,3 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
}
#endif
-
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 4236539d9f93..9ad1a2186a24 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -81,7 +81,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
if (pdd->already_dequeued)
return;
@@ -93,7 +93,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws)
{
- struct kfd_dev *dev = NULL;
+ struct kfd_node *dev = NULL;
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
struct kgd_mem *mem = NULL;
@@ -178,7 +178,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
}
static int init_user_queue(struct process_queue_manager *pqm,
- struct kfd_dev *dev, struct queue **q,
+ struct kfd_node *dev, struct queue **q,
struct queue_properties *q_properties,
struct file *f, struct amdgpu_bo *wptr_bo,
unsigned int qid)
@@ -187,6 +187,7 @@ static int init_user_queue(struct process_queue_manager *pqm,
/* Doorbell initialized in user space*/
q_properties->doorbell_ptr = NULL;
+ q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
/* let DQM handle it*/
q_properties->vmid = 0;
@@ -199,7 +200,7 @@ static int init_user_queue(struct process_queue_manager *pqm,
(*q)->device = dev;
(*q)->process = pqm->process;
- if (dev->shared_resources.enable_mes) {
+ if (dev->kfd->shared_resources.enable_mes) {
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
AMDGPU_MES_GANG_CTX_SIZE,
&(*q)->gang_ctx_bo,
@@ -224,7 +225,7 @@ cleanup:
}
int pqm_create_queue(struct process_queue_manager *pqm,
- struct kfd_dev *dev,
+ struct kfd_node *dev,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
@@ -242,6 +243,13 @@ int pqm_create_queue(struct process_queue_manager *pqm,
enum kfd_queue_type type = properties->type;
unsigned int max_queues = 127; /* HWS limit */
+ /*
+ * On GFX 9.4.3, increase the number of queues that
+ * can be created to 255. No HWS limit on GFX 9.4.3.
+ */
+ if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))
+ max_queues = 255;
+
q = NULL;
kq = NULL;
@@ -258,7 +266,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* Hence we also check the type as well
*/
if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
- max_queues = dev->device_info.max_no_of_hqd/2;
+ max_queues = dev->kfd->device_info.max_no_of_hqd/2;
if (pdd->qpd.queue_count >= max_queues)
return -ENOSPC;
@@ -330,6 +338,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq->queue->properties.queue_id = *qid;
pqn->kq = kq;
pqn->q = NULL;
+ retval = kfd_process_drain_interrupts(pdd);
+ if (retval)
+ break;
+
retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
kq, &pdd->qpd);
break;
@@ -354,7 +366,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
*/
*p_doorbell_offset_in_process =
(q->properties.doorbell_off * sizeof(uint32_t)) &
- (kfd_doorbell_process_slice(dev) - 1);
+ (kfd_doorbell_process_slice(dev->kfd) - 1);
pr_debug("PQM After DQM create queue\n");
@@ -387,7 +399,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
struct device_queue_manager *dqm;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
int retval;
dqm = NULL;
@@ -439,7 +451,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
pdd->qpd.num_gws = 0;
}
- if (dev->shared_resources.enable_mes) {
+ if (dev->kfd->shared_resources.enable_mes) {
amdgpu_amdkfd_free_gtt_mem(dev->adev,
pqn->q->gang_ctx_bo);
if (pqn->q->wptr_bo)
@@ -477,6 +489,7 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm,
pqn->q->properties.queue_size = p->queue_size;
pqn->q->properties.queue_percent = p->queue_percent;
pqn->q->properties.priority = p->priority;
+ pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, NULL);
@@ -498,8 +511,12 @@ int pqm_update_mqd(struct process_queue_manager *pqm,
return -EFAULT;
}
+ /* CUs are masked for debugger requirements so deny user mask */
+ if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
+ return -EBUSY;
+
/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
- if (minfo && minfo->update_flag == UPDATE_FLAG_CU_MASK && minfo->cu_mask.ptr &&
+ if (minfo && minfo->cu_mask.ptr &&
KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
int i;
@@ -518,6 +535,9 @@ int pqm_update_mqd(struct process_queue_manager *pqm,
if (retval != 0)
return retval;
+ if (minfo && minfo->cu_mask.ptr)
+ pqn->q->properties.is_user_cu_masked = true;
+
return 0;
}
@@ -565,6 +585,46 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
save_area_used_size);
}
+int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
+ uint64_t exception_clear_mask,
+ void __user *buf,
+ int *num_qss_entries,
+ uint32_t *entry_size)
+{
+ struct process_queue_node *pqn;
+ struct kfd_queue_snapshot_entry src;
+ uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
+ int r = 0;
+
+ *num_qss_entries = 0;
+ if (!(*entry_size))
+ return -EINVAL;
+
+ *entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
+ mutex_lock(&pqm->process->event_mutex);
+
+ memset(&src, 0, sizeof(src));
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (!pqn->q)
+ continue;
+
+ if (*num_qss_entries < tmp_qss_entries) {
+ set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
+
+ if (copy_to_user(buf, &src, *entry_size)) {
+ r = -EFAULT;
+ break;
+ }
+ buf += tmp_entry_size;
+ }
+ *num_qss_entries += 1;
+ }
+
+ mutex_unlock(&pqm->process->event_mutex);
+ return r;
+}
+
static int get_queue_data_sizes(struct kfd_process_device *pdd,
struct queue *q,
uint32_t *mqd_size,
@@ -859,7 +919,7 @@ int kfd_criu_restore_queue(struct kfd_process *p,
}
if (!pdd->doorbell_index &&
- kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+ kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) {
ret = -ENOMEM;
goto exit;
}
@@ -927,7 +987,9 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
struct queue *q;
enum KFD_MQD_TYPE mqd_type;
struct mqd_manager *mqd_mgr;
- int r = 0;
+ int r = 0, xcc, num_xccs = 1;
+ void *mqd;
+ uint64_t size = 0;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
if (pqn->q) {
@@ -943,6 +1005,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
seq_printf(m, " Compute queue on device %x\n",
q->device->id);
mqd_type = KFD_MQD_TYPE_CP;
+ num_xccs = NUM_XCC(q->device->xcc_mask);
break;
default:
seq_printf(m,
@@ -951,6 +1014,8 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
continue;
}
mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
+ size = mqd_mgr->mqd_stride(mqd_mgr,
+ &q->properties);
} else if (pqn->kq) {
q = pqn->kq->queue;
mqd_mgr = pqn->kq->mqd_mgr;
@@ -972,9 +1037,12 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
continue;
}
- r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
- if (r != 0)
- break;
+ for (xcc = 0; xcc < num_xccs; xcc++) {
+ mqd = q->mqd + size * xcc;
+ r = mqd_mgr->debugfs_show_mqd(m, mqd);
+ if (r != 0)
+ break;
+ }
}
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 0472b56de245..d9953c2b2661 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -36,7 +36,7 @@ struct kfd_smi_client {
wait_queue_head_t wait_queue;
/* events enabled */
uint64_t events;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
spinlock_t lock;
struct rcu_head rcu;
pid_t pid;
@@ -149,7 +149,7 @@ static void kfd_smi_ev_client_free(struct rcu_head *p)
static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
{
struct kfd_smi_client *client = filep->private_data;
- struct kfd_dev *dev = client->dev;
+ struct kfd_node *dev = client->dev;
spin_lock(&dev->smi_lock);
list_del_rcu(&client->list);
@@ -171,7 +171,7 @@ static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
}
-static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev,
+static void add_event_to_kfifo(pid_t pid, struct kfd_node *dev,
unsigned int smi_event, char *event_msg, int len)
{
struct kfd_smi_client *client;
@@ -196,7 +196,7 @@ static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev,
}
__printf(4, 5)
-static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev,
+static void kfd_smi_event_add(pid_t pid, struct kfd_node *dev,
unsigned int event, char *fmt, ...)
{
char fifo_in[KFD_SMI_EVENT_MSG_SIZE];
@@ -215,7 +215,7 @@ static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev,
add_event_to_kfifo(pid, dev, event, fifo_in, len);
}
-void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
+void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset)
{
unsigned int event;
@@ -228,7 +228,7 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
kfd_smi_event_add(0, dev, event, "%x\n", dev->reset_seq_num);
}
-void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
+void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
uint64_t throttle_bitmask)
{
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n",
@@ -236,7 +236,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
amdgpu_dpm_get_thermal_throttling_counter(dev->adev));
}
-void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
+void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
{
struct amdgpu_task_info task_info;
@@ -250,58 +250,58 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
task_info.pid, task_info.task_name);
}
-void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
unsigned long address, bool write_fault,
ktime_t ts)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_START,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START,
"%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid,
- address, dev->id, write_fault ? 'W' : 'R');
+ address, node->id, write_fault ? 'W' : 'R');
}
-void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
unsigned long address, bool migration)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_END,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END,
"%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(),
- pid, address, dev->id, migration ? 'M' : 'U');
+ pid, address, node->id, migration ? 'M' : 'U');
}
-void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to,
uint32_t prefetch_loc, uint32_t preferred_loc,
uint32_t trigger)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_START,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START,
"%lld -%d @%lx(%lx) %x->%x %x:%x %d\n",
ktime_get_boottime_ns(), pid, start, end - start,
from, to, prefetch_loc, preferred_loc, trigger);
}
-void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to, uint32_t trigger)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_END,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END,
"%lld -%d @%lx(%lx) %x->%x %d\n",
ktime_get_boottime_ns(), pid, start, end - start,
from, to, trigger);
}
-void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
uint32_t trigger)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_EVICTION,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION,
"%lld -%d %x %d\n", ktime_get_boottime_ns(), pid,
- dev->id, trigger);
+ node->id, trigger);
}
-void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid)
+void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_RESTORE,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE,
"%lld -%d %x\n", ktime_get_boottime_ns(), pid,
- dev->id);
+ node->id);
}
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
@@ -324,16 +324,16 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
kfd_unref_process(p);
}
-void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_UNMAP_FROM_GPU,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU,
"%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(),
- pid, address, last - address + 1, dev->id, trigger);
+ pid, address, last - address + 1, node->id, trigger);
}
-int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
+int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
{
struct kfd_smi_client *client;
int ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index 76fe4e0ec2d2..fa95c2dfd587 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -24,29 +24,29 @@
#ifndef KFD_SMI_EVENTS_H_INCLUDED
#define KFD_SMI_EVENTS_H_INCLUDED
-int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd);
-void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid);
-void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
+int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd);
+void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid);
+void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
uint64_t throttle_bitmask);
-void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset);
-void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset);
+void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
unsigned long address, bool write_fault,
ktime_t ts);
-void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
unsigned long address, bool migration);
-void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to,
uint32_t prefetch_loc, uint32_t preferred_loc,
uint32_t trigger);
-void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to, uint32_t trigger);
-void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
uint32_t trigger);
-void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid);
+void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid);
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
-void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 96a138a39515..5ff1a5a89d96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -170,12 +170,11 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
page = hmm_pfn_to_page(hmm_pfns[i]);
if (is_zone_device_page(page)) {
- struct amdgpu_device *bo_adev =
- amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
+ struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
bo_adev->vm_manager.vram_base_offset -
- bo_adev->kfd.dev->pgmap.range.start;
+ bo_adev->kfd.pgmap.range.start;
addr[i] |= SVM_RANGE_VRAM_DOMAIN;
pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
continue;
@@ -281,7 +280,7 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
if (update_mem_usage && !p->xnack_enabled) {
pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
}
mutex_destroy(&prange->lock);
mutex_destroy(&prange->migrate_mutex);
@@ -314,7 +313,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
p = container_of(svms, struct kfd_process, svms);
if (!p->xnack_enabled && update_mem_usage &&
amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
pr_info("SVM mapping failed, exceeds resident system memory limit\n");
kfree(prange);
return NULL;
@@ -424,10 +423,8 @@ static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
}
static bool
-svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
+svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
{
- struct amdgpu_device *bo_adev;
-
mutex_lock(&prange->lock);
if (!prange->svm_bo) {
mutex_unlock(&prange->lock);
@@ -440,12 +437,11 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
}
if (svm_bo_ref_unless_zero(prange->svm_bo)) {
/*
- * Migrate from GPU to GPU, remove range from source bo_adev
- * svm_bo range list, and return false to allocate svm_bo from
- * destination adev.
+ * Migrate from GPU to GPU, remove range from source svm_bo->node
+ * range list, and return false to allocate svm_bo from destination
+ * node.
*/
- bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
- if (bo_adev != adev) {
+ if (prange->svm_bo->node != node) {
mutex_unlock(&prange->lock);
spin_lock(&prange->svm_bo->list_lock);
@@ -513,7 +509,7 @@ static struct svm_range_bo *svm_range_bo_new(void)
}
int
-svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
+svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
struct amdgpu_bo_param bp;
@@ -528,7 +524,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
prange->start, prange->last);
- if (svm_range_validate_svm_bo(adev, prange))
+ if (svm_range_validate_svm_bo(node, prange))
return 0;
svm_bo = svm_range_bo_new();
@@ -542,6 +538,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
kfree(svm_bo);
return -ESRCH;
}
+ svm_bo->node = node;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
@@ -558,13 +555,20 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
bp.type = ttm_bo_type_device;
bp.resv = NULL;
+ if (node->xcp)
+ bp.xcp_id_plus1 = node->xcp->id + 1;
- r = amdgpu_bo_create_user(adev, &bp, &ubo);
+ r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
if (r) {
pr_debug("failed %d to create bo\n", r);
goto create_bo_failed;
}
bo = &ubo->bo;
+
+ pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
+ bo->tbo.resource->start << PAGE_SHIFT, bp.size,
+ bp.xcp_id_plus1 - 1);
+
r = amdgpu_bo_reserve(bo, true);
if (r) {
pr_debug("failed %d to reserve bo\n", r);
@@ -617,45 +621,30 @@ void svm_range_vram_node_free(struct svm_range *prange)
prange->ttm_res = NULL;
}
-struct amdgpu_device *
-svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
+struct kfd_node *
+svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
{
- struct kfd_process_device *pdd;
struct kfd_process *p;
- int32_t gpu_idx;
+ struct kfd_process_device *pdd;
p = container_of(prange->svms, struct kfd_process, svms);
-
- gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
- if (gpu_idx < 0) {
- pr_debug("failed to get device by id 0x%x\n", gpu_id);
- return NULL;
- }
- pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
+ pdd = kfd_process_device_data_by_id(p, gpu_id);
if (!pdd) {
- pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
+ pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
return NULL;
}
- return pdd->dev->adev;
+ return pdd->dev;
}
struct kfd_process_device *
-svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
+svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
{
struct kfd_process *p;
- int32_t gpu_idx, gpuid;
- int r;
p = container_of(prange->svms, struct kfd_process, svms);
- r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
- if (r) {
- pr_debug("failed to get device id by adev %p\n", adev);
- return NULL;
- }
-
- return kfd_process_device_from_gpuidx(p, gpu_idx);
+ return kfd_get_process_device_data(node, p);
}
static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
@@ -735,7 +724,9 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
case KFD_IOCTL_SVM_ATTR_ACCESS:
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
- *update_mapping = true;
+ if (!p->xnack_enabled)
+ *update_mapping = true;
+
gpuidx = kfd_process_gpuidx_from_gpuid(p,
attrs[i].value);
if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
@@ -818,7 +809,7 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
}
}
- return true;
+ return !prange->is_error_flag;
}
/**
@@ -1146,31 +1137,39 @@ svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
}
return 0;
}
+static bool
+svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
+{
+ return (node_a->adev == node_b->adev ||
+ amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
+}
static uint64_t
-svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
- int domain)
+svm_range_get_pte_flags(struct kfd_node *node,
+ struct svm_range *prange, int domain)
{
- struct amdgpu_device *bo_adev;
+ struct kfd_node *bo_node;
uint32_t flags = prange->flags;
uint32_t mapping_flags = 0;
uint64_t pte_flags;
bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
+ bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/
+ unsigned int mtype_local;
if (domain == SVM_RANGE_VRAM_DOMAIN)
- bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
+ bo_node = prange->svm_bo->node;
- switch (KFD_GC_VERSION(adev->kfd.dev)) {
+ switch (node->adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 1):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_adev == adev) {
+ if (bo_node == node) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
} else {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- if (amdgpu_xgmi_same_hive(adev, bo_adev))
+ if (svm_nodes_in_same_hive(node, bo_node))
snoop = true;
}
} else {
@@ -1180,15 +1179,15 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
break;
case IP_VERSION(9, 4, 2):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_adev == adev) {
+ if (bo_node == node) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (node->adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- if (amdgpu_xgmi_same_hive(adev, bo_adev))
+ if (svm_nodes_in_same_hive(node, bo_node))
snoop = true;
}
} else {
@@ -1196,6 +1195,37 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
+ case IP_VERSION(9, 4, 3):
+ mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
+ (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW);
+ snoop = true;
+ if (uncached) {
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ } else if (domain == SVM_RANGE_VRAM_DOMAIN) {
+ /* local HBM region close to partition */
+ if (bo_node->adev == node->adev &&
+ (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
+ mapping_flags |= mtype_local;
+ /* local HBM region far from partition or remote XGMI GPU */
+ else if (svm_nodes_in_same_hive(bo_node, node))
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ /* PCIe P2P */
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ /* system memory accessed by the APU */
+ } else if (node->adev->flags & AMD_IS_APU) {
+ /* On NUMA systems, locality is determined per-page
+ * in amdgpu_gmc_override_vm_pte_flags
+ */
+ if (num_possible_nodes() <= 1)
+ mapping_flags |= mtype_local;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ /* system memory accessed by the dGPU */
+ } else {
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ }
+ break;
default:
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
@@ -1212,7 +1242,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
- pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
+ pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
return pte_flags;
}
@@ -1319,7 +1349,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
- pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
+ pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
@@ -1328,6 +1358,10 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
(last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
pte_flags);
+ /* For dGPU mode, we use same vm_manager to allocate VRAM for
+ * different memory partition based on fpfn/lpfn, we should use
+ * same vm_manager.vram_base_offset regardless memory partition.
+ */
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
last_start, prange->start + i,
pte_flags,
@@ -1365,16 +1399,14 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
unsigned long *bitmap, bool wait, bool flush_tlb)
{
struct kfd_process_device *pdd;
- struct amdgpu_device *bo_adev;
+ struct amdgpu_device *bo_adev = NULL;
struct kfd_process *p;
struct dma_fence *fence = NULL;
uint32_t gpuidx;
int r = 0;
if (prange->svm_bo && prange->ttm_res)
- bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
- else
- bo_adev = NULL;
+ bo_adev = prange->svm_bo->node->adev;
p = container_of(prange->svms, struct kfd_process, svms);
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
@@ -1522,48 +1554,54 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
struct svm_range *prange, int32_t gpuidx,
bool intr, bool wait, bool flush_tlb)
{
- struct svm_validate_context ctx;
+ struct svm_validate_context *ctx;
unsigned long start, end, addr;
struct kfd_process *p;
void *owner;
int32_t idx;
int r = 0;
- ctx.process = container_of(prange->svms, struct kfd_process, svms);
- ctx.prange = prange;
- ctx.intr = intr;
+ ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->process = container_of(prange->svms, struct kfd_process, svms);
+ ctx->prange = prange;
+ ctx->intr = intr;
if (gpuidx < MAX_GPU_INSTANCE) {
- bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
- bitmap_set(ctx.bitmap, gpuidx, 1);
- } else if (ctx.process->xnack_enabled) {
- bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
+ bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
+ bitmap_set(ctx->bitmap, gpuidx, 1);
+ } else if (ctx->process->xnack_enabled) {
+ bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
/* If prefetch range to GPU, or GPU retry fault migrate range to
* GPU, which has ACCESS attribute to the range, create mapping
* on that GPU.
*/
if (prange->actual_loc) {
- gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
+ gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
prange->actual_loc);
if (gpuidx < 0) {
WARN_ONCE(1, "failed get device by id 0x%x\n",
prange->actual_loc);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_ctx;
}
if (test_bit(gpuidx, prange->bitmap_access))
- bitmap_set(ctx.bitmap, gpuidx, 1);
+ bitmap_set(ctx->bitmap, gpuidx, 1);
}
} else {
- bitmap_or(ctx.bitmap, prange->bitmap_access,
+ bitmap_or(ctx->bitmap, prange->bitmap_access,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
- if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
- if (!prange->mapped_to_gpu)
- return 0;
+ if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
+ if (!prange->mapped_to_gpu) {
+ r = 0;
+ goto free_ctx;
+ }
- bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
+ bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
}
if (prange->actual_loc && !prange->ttm_res) {
@@ -1571,15 +1609,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
* svm_migrate_ram_to_vram after allocating a BO.
*/
WARN_ONCE(1, "VRAM BO missing during validation\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto free_ctx;
}
- svm_range_reserve_bos(&ctx);
+ svm_range_reserve_bos(ctx);
p = container_of(prange->svms, struct kfd_process, svms);
- owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
+ owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
MAX_GPU_INSTANCE));
- for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
+ for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
if (kfd_svm_page_owner(p, idx) != owner) {
owner = NULL;
break;
@@ -1616,7 +1655,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
offset = (addr - start) >> PAGE_SHIFT;
- r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
+ r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
hmm_range->hmm_pfns);
if (r) {
pr_debug("failed %d to dma map range\n", r);
@@ -1636,7 +1675,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
r = svm_range_map_to_gpus(prange, offset, npages, readonly,
- ctx.bitmap, wait, flush_tlb);
+ ctx->bitmap, wait, flush_tlb);
unlock_out:
svm_range_unlock(prange);
@@ -1650,11 +1689,15 @@ unlock_out:
}
unreserve_out:
- svm_range_unreserve_bos(&ctx);
+ svm_range_unreserve_bos(ctx);
+ prange->is_error_flag = !!r;
if (!r)
prange->validate_timestamp = ktime_get_boottime();
+free_ctx:
+ kfree(ctx);
+
return r;
}
@@ -1783,6 +1826,7 @@ out_reschedule:
* @mm: current process mm_struct
* @start: starting process queue number
* @last: last process queue number
+ * @event: mmu notifier event when range is evicted or migrated
*
* Stop all queues of the process to ensure GPU doesn't access the memory, then
* return to let CPU evict the buffer and proceed CPU pagetable update.
@@ -1906,14 +1950,23 @@ void svm_range_set_max_pages(struct amdgpu_device *adev)
{
uint64_t max_pages;
uint64_t pages, _pages;
+ uint64_t min_pages = 0;
+ int i, id;
+
+ for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
+ if (adev->kfd.dev->nodes[i]->xcp)
+ id = adev->kfd.dev->nodes[i]->xcp->id;
+ else
+ id = -1;
+ pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
+ pages = clamp(pages, 1ULL << 9, 1ULL << 18);
+ pages = rounddown_pow_of_two(pages);
+ min_pages = min_not_zero(min_pages, pages);
+ }
- /* 1/32 VRAM size in pages */
- pages = adev->gmc.real_vram_size >> 17;
- pages = clamp(pages, 1ULL << 9, 1ULL << 18);
- pages = rounddown_pow_of_two(pages);
do {
max_pages = READ_ONCE(max_svm_range_pages);
- _pages = min_not_zero(max_pages, pages);
+ _pages = min_not_zero(max_pages, min_pages);
} while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
}
@@ -2507,29 +2560,31 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
*/
static int32_t
svm_range_best_restore_location(struct svm_range *prange,
- struct amdgpu_device *adev,
+ struct kfd_node *node,
int32_t *gpuidx)
{
- struct amdgpu_device *bo_adev, *preferred_adev;
+ struct kfd_node *bo_node, *preferred_node;
struct kfd_process *p;
uint32_t gpuid;
int r;
p = container_of(prange->svms, struct kfd_process, svms);
- r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
+ r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
if (r < 0) {
pr_debug("failed to get gpuid from kgd\n");
return -1;
}
+ if (node->adev->gmc.is_app_apu)
+ return 0;
+
if (prange->preferred_loc == gpuid ||
prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
return prange->preferred_loc;
} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
- preferred_adev = svm_range_get_adev_by_id(prange,
- prange->preferred_loc);
- if (amdgpu_xgmi_same_hive(adev, preferred_adev))
+ preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
+ if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
return prange->preferred_loc;
/* fall through */
}
@@ -2541,8 +2596,8 @@ svm_range_best_restore_location(struct svm_range *prange,
if (!prange->actual_loc)
return 0;
- bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
- if (amdgpu_xgmi_same_hive(adev, bo_adev))
+ bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
+ if (bo_node && svm_nodes_in_same_hive(node, bo_node))
return prange->actual_loc;
else
return 0;
@@ -2659,7 +2714,7 @@ svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
}
static struct
-svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
+svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
struct kfd_process *p,
struct mm_struct *mm,
int64_t addr)
@@ -2694,7 +2749,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
pr_debug("Failed to create prange in address [0x%llx]\n", addr);
return NULL;
}
- if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
+ if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
pr_debug("failed to get gpuid from kgd\n");
svm_range_free(prange, true);
return NULL;
@@ -2748,7 +2803,7 @@ static bool svm_range_skip_recover(struct svm_range *prange)
}
static void
-svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
+svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
int32_t gpuidx)
{
struct kfd_process_device *pdd;
@@ -2761,7 +2816,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
uint32_t gpuid;
int r;
- r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
+ r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
if (r < 0)
return;
}
@@ -2789,6 +2844,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
int
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+ uint32_t vmid, uint32_t node_id,
uint64_t addr, bool write_fault)
{
struct mm_struct *mm = NULL;
@@ -2796,6 +2852,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
struct svm_range *prange;
struct kfd_process *p;
ktime_t timestamp = ktime_get_boottime();
+ struct kfd_node *node;
int32_t best_loc;
int32_t gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
@@ -2803,7 +2860,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
bool migration = false;
int r = 0;
- if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
+ if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
pr_debug("device does not support SVM\n");
return -EFAULT;
}
@@ -2839,6 +2896,13 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
goto out;
}
+ node = kfd_node_by_irq_ids(adev, node_id, vmid);
+ if (!node) {
+ pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
+ vmid);
+ r = -EFAULT;
+ goto out;
+ }
mmap_read_lock(mm);
retry_write_locked:
mutex_lock(&svms->lock);
@@ -2857,7 +2921,7 @@ retry_write_locked:
write_locked = true;
goto retry_write_locked;
}
- prange = svm_range_create_unregistered_range(adev, p, mm, addr);
+ prange = svm_range_create_unregistered_range(node, p, mm, addr);
if (!prange) {
pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
svms, addr);
@@ -2872,7 +2936,7 @@ retry_write_locked:
mutex_lock(&prange->migrate_mutex);
if (svm_range_skip_recover(prange)) {
- amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
+ amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
r = 0;
goto out_unlock_range;
}
@@ -2903,7 +2967,7 @@ retry_write_locked:
goto out_unlock_range;
}
- best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
+ best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
if (best_loc == -1) {
pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
svms, prange->start, prange->last);
@@ -2915,7 +2979,7 @@ retry_write_locked:
svms, prange->start, prange->last, best_loc,
prange->actual_loc);
- kfd_smi_event_page_fault_start(adev->kfd.dev, p->lead_thread->pid, addr,
+ kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
write_fault, timestamp);
if (prange->actual_loc != best_loc) {
@@ -2953,7 +3017,7 @@ retry_write_locked:
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
r, svms, prange->start, prange->last);
- kfd_smi_event_page_fault_end(adev->kfd.dev, p->lead_thread->pid, addr,
+ kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
migration);
out_unlock_range:
@@ -2962,7 +3026,7 @@ out_unlock_svms:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
- svm_range_count_fault(adev, p, gpuidx);
+ svm_range_count_fault(node, p, gpuidx);
mmput(mm);
out:
@@ -2970,7 +3034,7 @@ out:
if (r == -EAGAIN) {
pr_debug("recover vm fault later\n");
- amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
+ amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
r = 0;
}
return r;
@@ -2994,10 +3058,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
if (xnack_enabled) {
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
} else {
r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
if (r)
goto out_unlock;
reserved_size += size;
@@ -3007,10 +3071,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
size = (prange->last - prange->start + 1) << PAGE_SHIFT;
if (xnack_enabled) {
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
} else {
r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
if (r)
goto out_unlock;
reserved_size += size;
@@ -3023,7 +3087,7 @@ out_unlock:
if (r)
amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
else
/* Change xnack mode must be inside svms lock, to avoid race with
* svm_range_deferred_list_work unreserve memory in parallel.
@@ -3081,7 +3145,7 @@ int svm_range_list_init(struct kfd_process *p)
spin_lock_init(&svms->deferred_list_lock);
for (i = 0; i < p->n_pdds; i++)
- if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
+ if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
bitmap_set(svms->bitmap_supported, i, 1);
return 0;
@@ -3212,7 +3276,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
uint32_t best_loc = prange->prefetch_loc;
struct kfd_process_device *pdd;
- struct amdgpu_device *bo_adev;
+ struct kfd_node *bo_node;
struct kfd_process *p;
uint32_t gpuidx;
@@ -3221,9 +3285,14 @@ svm_range_best_prefetch_location(struct svm_range *prange)
if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
goto out;
- bo_adev = svm_range_get_adev_by_id(prange, best_loc);
- if (!bo_adev) {
- WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
+ bo_node = svm_range_get_node_by_id(prange, best_loc);
+ if (!bo_node) {
+ WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
+ best_loc = 0;
+ goto out;
+ }
+
+ if (bo_node->adev->gmc.is_app_apu) {
best_loc = 0;
goto out;
}
@@ -3241,10 +3310,10 @@ svm_range_best_prefetch_location(struct svm_range *prange)
continue;
}
- if (pdd->dev->adev == bo_adev)
+ if (pdd->dev->adev == bo_node->adev)
continue;
- if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
+ if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
best_loc = 0;
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 7a33b93f9df6..21b14510882b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -48,6 +48,7 @@ struct svm_range_bo {
struct work_struct eviction_work;
uint32_t evicting;
struct work_struct release_work;
+ struct kfd_node *node;
};
enum svm_work_list_ops {
@@ -133,6 +134,7 @@ struct svm_range {
DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
bool validated_once;
bool mapped_to_gpu;
+ bool is_error_flag;
};
static inline void svm_range_lock(struct svm_range *prange)
@@ -163,16 +165,17 @@ int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
struct svm_range *svm_range_from_addr(struct svm_range_list *svms,
unsigned long addr,
struct svm_range **parent);
-struct amdgpu_device *svm_range_get_adev_by_id(struct svm_range *prange,
- uint32_t id);
-int svm_range_vram_node_new(struct amdgpu_device *adev,
- struct svm_range *prange, bool clear);
+struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange,
+ uint32_t gpu_id);
+int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
+ bool clear);
void svm_range_vram_node_free(struct svm_range *prange);
int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
unsigned long addr, struct svm_range *parent,
struct svm_range *prange);
-int svm_range_restore_pages(struct amdgpu_device *adev,
- unsigned int pasid, uint64_t addr, bool write_fault);
+int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+ uint32_t vmid, uint32_t node_id, uint64_t addr,
+ bool write_fault);
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
void svm_range_add_list_work(struct svm_range_list *svms,
struct svm_range *prange, struct mm_struct *mm,
@@ -192,13 +195,14 @@ int kfd_criu_restore_svm(struct kfd_process *p,
uint64_t max_priv_data_size);
int kfd_criu_resume_svm(struct kfd_process *p);
struct kfd_process_device *
-svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
+svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node);
void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
/* SVM API and HMM page migration work together, device memory type
* is initialized to not 0 when page migration register device memory.
*/
-#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
+#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
+ (adev)->gmc.is_app_apu)
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
@@ -219,8 +223,9 @@ static inline void svm_range_list_fini(struct kfd_process *p)
}
static inline int svm_range_restore_pages(struct amdgpu_device *adev,
- unsigned int pasid, uint64_t addr,
- bool write_fault)
+ unsigned int pasid,
+ uint32_t client_id, uint32_t node_id,
+ uint64_t addr, bool write_fault)
{
return -EFAULT;
}
@@ -261,6 +266,10 @@ static inline int kfd_criu_resume_svm(struct kfd_process *p)
return 0;
}
+static inline void svm_range_set_max_pages(struct amdgpu_device *adev)
+{
+}
+
#define KFD_IS_SVM_API_SUPPORTED(dev) false
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 8e4124dcb6e4..90b86a6ac7bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -96,7 +96,7 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
return ret;
}
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+struct kfd_node *kfd_device_by_id(uint32_t gpu_id)
{
struct kfd_topology_device *top_dev;
@@ -107,10 +107,10 @@ struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
return top_dev->gpu;
}
-struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
+struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev)
{
struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
+ struct kfd_node *device = NULL;
down_read(&topology_lock);
@@ -125,24 +125,6 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
return device;
}
-struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev)
-{
- struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
-
- down_read(&topology_lock);
-
- list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu && top_dev->gpu->adev == adev) {
- device = top_dev->gpu;
- break;
- }
-
- up_read(&topology_lock);
-
- return device;
-}
-
/* Called with write topology_lock acquired */
static void kfd_release_topology_device(struct kfd_topology_device *dev)
{
@@ -468,7 +450,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, offs, "simd_count",
- dev->gpu ? dev->node_props.simd_count : 0);
+ dev->gpu ? (dev->node_props.simd_count *
+ NUM_XCC(dev->gpu->xcc_mask)) : 0);
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, offs, "caches_count",
@@ -492,7 +475,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "wave_front_size",
dev->node_props.wave_front_size);
sysfs_show_32bit_prop(buffer, offs, "array_count",
- dev->node_props.array_count);
+ dev->gpu ? (dev->node_props.array_count *
+ NUM_XCC(dev->gpu->xcc_mask)) : 0);
sysfs_show_32bit_prop(buffer, offs, "simd_arrays_per_engine",
dev->node_props.simd_arrays_per_engine);
sysfs_show_32bit_prop(buffer, offs, "cu_per_simd_array",
@@ -526,7 +510,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (dev->gpu) {
log_max_watch_addr =
- __ilog2_u32(dev->gpu->device_info.num_of_watch_points);
+ __ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points);
if (log_max_watch_addr) {
dev->node_props.capability |=
@@ -548,14 +532,17 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_64bit_prop(buffer, offs, "local_mem_size", 0ULL);
sysfs_show_32bit_prop(buffer, offs, "fw_version",
- dev->gpu->mec_fw_version);
+ dev->gpu->kfd->mec_fw_version);
sysfs_show_32bit_prop(buffer, offs, "capability",
dev->node_props.capability);
+ sysfs_show_64bit_prop(buffer, offs, "debug_prop",
+ dev->node_props.debug_prop);
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
- dev->gpu->sdma_fw_version);
+ dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
dev->gpu->adev->unique_id);
-
+ sysfs_show_32bit_prop(buffer, offs, "num_xcc",
+ NUM_XCC(dev->gpu->xcc_mask));
}
return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute",
@@ -1157,10 +1144,10 @@ void kfd_topology_shutdown(void)
up_write(&topology_lock);
}
-static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
+static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu)
{
uint32_t hashout;
- uint32_t buf[7];
+ uint32_t buf[8];
uint64_t local_mem_size;
int i;
@@ -1177,8 +1164,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
buf[4] = gpu->adev->pdev->bus->number;
buf[5] = lower_32_bits(local_mem_size);
buf[6] = upper_32_bits(local_mem_size);
+ buf[7] = (ffs(gpu->xcc_mask) - 1) | (NUM_XCC(gpu->xcc_mask) << 16);
- for (i = 0, hashout = 0; i < 7; i++)
+ for (i = 0, hashout = 0; i < 8; i++)
hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
return hashout;
@@ -1188,7 +1176,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
* list then return NULL. This means a new topology device has to
* be created for this GPU.
*/
-static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+static struct kfd_topology_device *kfd_assign_gpu(struct kfd_node *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
@@ -1201,7 +1189,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
/* Discrete GPUs need their own topology device list
* entries. Don't assign them to CPU/APU nodes.
*/
- if (!gpu->use_iommu_v2 &&
+ if (!gpu->kfd->use_iommu_v2 &&
dev->node_props.cpu_cores_count)
continue;
@@ -1248,7 +1236,8 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
* for APUs - If CRAT from ACPI reports more than one bank, then
* all the banks will report the same mem_clk_max information
*/
- amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info,
+ dev->gpu->xcp);
list_for_each_entry(mem, &dev->mem_props, list)
mem->mem_clk_max = local_mem_info.mem_clk_max;
@@ -1275,7 +1264,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
/* set gpu (dev) flags. */
} else {
- if (!dev->gpu->pci_atomic_requested ||
+ if (!dev->gpu->kfd->pci_atomic_requested ||
dev->gpu->adev->asic_type == CHIP_HAWAII)
link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
@@ -1323,10 +1312,16 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
continue;
/* Include the CPU peer in GPU hive if connected over xGMI. */
- if (!peer_dev->gpu && !peer_dev->node_props.hive_id &&
- dev->node_props.hive_id &&
- dev->gpu->adev->gmc.xgmi.connected_to_cpu)
+ if (!peer_dev->gpu &&
+ link->iolink_type == CRAT_IOLINK_TYPE_XGMI) {
+ /*
+ * If the GPU is not part of a GPU hive, use its pci
+ * device location as the hive ID to bind with the CPU.
+ */
+ if (!dev->node_props.hive_id)
+ dev->node_props.hive_id = pci_dev_id(dev->gpu->adev->pdev);
peer_dev->node_props.hive_id = dev->node_props.hive_id;
+ }
list_for_each_entry(inbound_link, &peer_dev->io_link_props,
list) {
@@ -1569,8 +1564,8 @@ static int kfd_dev_create_p2p_links(void)
if (dev == new_dev)
break;
if (!dev->gpu || !dev->gpu->adev ||
- (dev->gpu->hive_id &&
- dev->gpu->hive_id == new_dev->gpu->hive_id))
+ (dev->gpu->kfd->hive_id &&
+ dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id))
goto next;
/* check if node(s) is/are peer accessible in one direction or bi-direction */
@@ -1590,7 +1585,6 @@ out:
return ret;
}
-
/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info,
@@ -1723,7 +1717,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
/* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info
* tables
*/
-static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev)
+static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
{
struct kfd_gpu_cache_info *pcache_info = NULL;
int i, j, k;
@@ -1805,7 +1799,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
pr_debug("Added [%d] GPU cache entries\n", num_of_entries);
}
-static int kfd_topology_add_device_locked(struct kfd_dev *gpu, uint32_t gpu_id,
+static int kfd_topology_add_device_locked(struct kfd_node *gpu, uint32_t gpu_id,
struct kfd_topology_device **dev)
{
int proximity_domain = ++topology_crat_proximity_domain;
@@ -1865,7 +1859,103 @@ err:
return res;
}
-int kfd_topology_add_device(struct kfd_dev *gpu)
+static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *dev)
+{
+ bool firmware_supported = true;
+
+ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) &&
+ KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) {
+ uint32_t mes_api_rev = (dev->gpu->adev->mes.sched_version &
+ AMDGPU_MES_API_VERSION_MASK) >>
+ AMDGPU_MES_API_VERSION_SHIFT;
+ uint32_t mes_rev = dev->gpu->adev->mes.sched_version &
+ AMDGPU_MES_VERSION_MASK;
+
+ firmware_supported = (mes_api_rev >= 14) && (mes_rev >= 64);
+ goto out;
+ }
+
+ /*
+ * Note: Any unlisted devices here are assumed to support exception handling.
+ * Add additional checks here as needed.
+ */
+ switch (KFD_GC_VERSION(dev->gpu)) {
+ case IP_VERSION(9, 0, 1):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 459 + 32768;
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 459;
+ break;
+ case IP_VERSION(9, 4, 1):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 60;
+ break;
+ case IP_VERSION(9, 4, 2):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 51;
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 144;
+ break;
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 89;
+ break;
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 3):
+ firmware_supported = false;
+ break;
+ default:
+ break;
+ }
+
+out:
+ if (firmware_supported)
+ dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED;
+}
+
+static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
+{
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+
+ dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_SUPPORT |
+ HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED |
+ HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED;
+
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
+ dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 |
+ HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
+
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 4, 2))
+ dev->node_props.debug_prop |=
+ HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
+ else
+ dev->node_props.capability |=
+ HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
+ } else {
+ dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
+ HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
+
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(11, 0, 0))
+ dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
+ else
+ dev->node_props.capability |=
+ HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
+ }
+
+ kfd_topology_set_dbg_firmware_support(dev);
+}
+
+int kfd_topology_add_device(struct kfd_node *gpu)
{
uint32_t gpu_id;
struct kfd_topology_device *dev;
@@ -1916,28 +2006,37 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.simd_arrays_per_engine =
cu_info.num_shader_arrays_per_engine;
- dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version;
+ dev->node_props.gfx_target_version =
+ gpu->kfd->device_info.gfx_target_version;
dev->node_props.vendor_id = gpu->adev->pdev->vendor;
dev->node_props.device_id = gpu->adev->pdev->device;
dev->node_props.capability |=
((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) &
HSA_CAP_ASIC_REVISION_MASK);
+
dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
+ if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3))
+ dev->node_props.location_id |= dev->gpu->node_id;
+
dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev);
dev->node_props.max_engine_clk_ccompute =
cpufreq_quick_get_max(0) / 1000;
- dev->node_props.drm_render_minor =
- gpu->shared_resources.drm_render_minor;
- dev->node_props.hive_id = gpu->hive_id;
+ if (gpu->xcp)
+ dev->node_props.drm_render_minor = gpu->xcp->ddev->render->index;
+ else
+ dev->node_props.drm_render_minor =
+ gpu->kfd->shared_resources.drm_render_minor;
+
+ dev->node_props.hive_id = gpu->kfd->hive_id;
dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu);
dev->node_props.num_sdma_xgmi_engines =
kfd_get_num_xgmi_sdma_engines(gpu);
dev->node_props.num_sdma_queues_per_engine =
- gpu->device_info.num_sdma_queues_per_engine -
- gpu->device_info.num_reserved_sdma_queues_per_engine;
+ gpu->kfd->device_info.num_sdma_queues_per_engine -
+ gpu->kfd->device_info.num_reserved_sdma_queues_per_engine;
dev->node_props.num_gws = (dev->gpu->gws &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
dev->gpu->adev->gds.gws_size : 0;
@@ -1966,20 +2065,18 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
default:
- if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1))
- dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
- HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
- HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
- else
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 0, 1))
WARN(1, "Unexpected ASIC family %u",
dev->gpu->adev->asic_type);
+ else
+ kfd_topology_set_capabilities(dev);
}
/*
* Overwrite ATS capability according to needs_iommu_device to fix
* potential missing corresponding bit in CRAT of BIOS.
*/
- if (dev->gpu->use_iommu_v2)
+ if (dev->gpu->kfd->use_iommu_v2)
dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
else
dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
@@ -2007,7 +2104,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0;
- if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev))
+ if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
kfd_debug_print_topology();
@@ -2079,7 +2176,7 @@ static void kfd_topology_update_io_links(int proximity_domain)
}
}
-int kfd_topology_remove_device(struct kfd_dev *gpu)
+int kfd_topology_remove_device(struct kfd_node *gpu)
{
struct kfd_topology_device *dev, *tmp;
uint32_t gpu_id;
@@ -2119,7 +2216,7 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
* Return - 0: On success (@kdev will be NULL for non GPU nodes)
* -1: If end of list
*/
-int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
+int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev)
{
struct kfd_topology_device *top_dev;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index fca30d00a9bb..cba2cd5ed9d1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -31,6 +31,11 @@
#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 6
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 7
+#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT \
+ (29 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT)
+
struct kfd_node_properties {
uint64_t hive_id;
uint32_t cpu_cores_count;
@@ -42,6 +47,7 @@ struct kfd_node_properties {
uint32_t cpu_core_id_base;
uint32_t simd_id_base;
uint32_t capability;
+ uint64_t debug_prop;
uint32_t max_waves_per_simd;
uint32_t lds_size_in_kb;
uint32_t gds_size_in_kb;
@@ -75,7 +81,7 @@ struct kfd_mem_properties {
uint32_t flags;
uint32_t width;
uint32_t mem_clk_max;
- struct kfd_dev *gpu;
+ struct kfd_node *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -93,7 +99,7 @@ struct kfd_cache_properties {
uint32_t cache_latency;
uint32_t cache_type;
uint8_t sibling_map[CACHE_SIBLINGMAP_SIZE];
- struct kfd_dev *gpu;
+ struct kfd_node *gpu;
struct kobject *kobj;
struct attribute attr;
uint32_t sibling_map_size;
@@ -113,7 +119,7 @@ struct kfd_iolink_properties {
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
uint32_t flags;
- struct kfd_dev *gpu;
+ struct kfd_node *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -135,7 +141,7 @@ struct kfd_topology_device {
struct list_head io_link_props;
struct list_head p2p_link_props;
struct list_head perf_props;
- struct kfd_dev *gpu;
+ struct kfd_node *gpu;
struct kobject *kobj_node;
struct kobject *kobj_mem;
struct kobject *kobj_cache;