summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorShiwu Zhang <shiwu.zhang@amd.com>2022-06-03 06:08:12 +0300
committerAlex Deucher <alexander.deucher@amd.com>2023-06-09 16:42:08 +0300
commit147862d00bcf7e23e0a125f910f5db224f7b6722 (patch)
tree67285e91d625e738a22d06b4fe9eb6f64aa5410f /drivers/gpu/drm/amd
parent89cf4549a949b4ba3ce771163b75285979c95602 (diff)
downloadlinux-147862d00bcf7e23e0a125f910f5db224f7b6722.tar.xz
drm/amdgpu: enable the ring and IB test for slave kcq
With the mec FW update to utilize the mqd base set by driver for kcq mapping, slave kcq ring test and IB test can be re-enabled. Signed-off-by: Shiwu Zhang <shiwu.zhang@amd.com> Reviewed-by: Le Ma <Le.Ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c12
3 files changed, 32 insertions, 44 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 4a4d71ff9b95..682c157f2d8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -449,8 +449,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
- if (!adev->gfx.mec.mqd_backup[j])
+ adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL);
+ if (!adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings])
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
}
@@ -502,22 +502,20 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
return -EINVAL;
spin_lock(&kiq->ring_lock);
- if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_compute_rings)) {
- spin_unlock(&kiq->ring_lock);
- return -ENOMEM;
- }
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+ adev->gfx.num_compute_rings)) {
+ spin_unlock(&kiq->ring_lock);
+ return -ENOMEM;
+ }
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- j = i + xcc_id * adev->gfx.num_compute_rings;
- kiq->pmf->kiq_unmap_queues(kiq_ring,
- &adev->gfx.compute_ring[i],
- RESET_QUEUES, 0, 0);
- }
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_unmap_queues(kiq_ring,
+ &adev->gfx.compute_ring[i],
+ RESET_QUEUES, 0, 0);
}
- if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
+ if (kiq_ring->sched.ready && !adev->job_hang)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
@@ -598,26 +596,23 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue);
spin_lock(&kiq->ring_lock);
- /* No need to map kcq on the slave */
- if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
- r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
- adev->gfx.num_compute_rings +
- kiq->pmf->set_resources_size);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- spin_unlock(&kiq->ring_lock);
- return r;
- }
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
+ adev->gfx.num_compute_rings +
+ kiq->pmf->set_resources_size);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&kiq->ring_lock);
+ return r;
+ }
- if (adev->enable_mes)
- queue_mask = ~0ULL;
+ if (adev->enable_mes)
+ queue_mask = ~0ULL;
- kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
kiq->pmf->kiq_map_queues(kiq_ring,
- &adev->gfx.compute_ring[i]);
- }
+ &adev->gfx.compute_ring[j]);
}
r = amdgpu_ring_test_helper(kiq_ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index c955c3f060cd..b27ac48ca123 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -433,11 +433,6 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
else
tmo = tmo_gfx;
- /* skip ib test on the slave kcq */
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
- !amdgpu_gfx_is_master_xcc(adev, ring->xcc_id))
- continue;
-
r = amdgpu_ring_test_ib(ring, tmo);
if (!r) {
DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 4ef39837e4c7..ec273a217666 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -1956,13 +1956,11 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
- /* skip ring test on slave kcq */
- if (amdgpu_gfx_is_master_xcc(adev, i)) {
- for (j = 0; j < adev->gfx.num_compute_rings; j++) {
- ring = &adev->gfx.compute_ring[j +
- i * adev->gfx.num_compute_rings];
- amdgpu_ring_test_helper(ring);
- }
+ for (j = 0; j < adev->gfx.num_compute_rings; j++) {
+ ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
}
gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i);