summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
diff options
context:
space:
mode:
authorGraham Sider <Graham.Sider@amd.com>2023-03-30 20:47:05 +0300
committerAlex Deucher <alexander.deucher@amd.com>2023-04-12 01:03:45 +0300
commit27488686cb1835f1c69d3efb0eedeb411f675d73 (patch)
treee844237c8b128aea0736a63def3785cf6338f8a6 /drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
parent00fa40353bf3894adb495f8cce10a8bce43cd375 (diff)
downloadlinux-27488686cb1835f1c69d3efb0eedeb411f675d73.tar.xz
drm/amdgpu: Enable GFX11 SDMA context empty interrupt
Enable SDMA queue empty context switching. SDMA context switch due to quantum programming no longer done here (as of sdma v6), so re-name sdma_v6_0_ctx_switch_enable to sdma_v6_0_ctxempty_int_enable to reflect this. Also program SDMAx_QUEUEx_SCHEDULE_CNTL for context switch due to quantum in KFD. Set to amdgpu_sdma_phase_quantum (defaults to 32 i.e. 3200us). Signed-off-by: Graham Sider <Graham.Sider@amd.com> Reviewed-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com> Reviewed-by: Stanley Yang <Stanley.Yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index fdc6dfd60621..fc0f14ed93d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -403,15 +403,26 @@ static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v6_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v6_0_ctxempty_int_enable - enable or disable context empty interrupts
*
* @adev: amdgpu_device pointer
- * @enable: enable/disable the DMA MEs context switch.
+ * @enable: enable/disable context switching due to queue empty conditions
*
- * Halt or unhalt the async dma engines context switch.
+ * Enable or disable the async dma engines queue empty context switch.
*/
-static void sdma_v6_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+static void sdma_v6_0_ctxempty_int_enable(struct amdgpu_device *adev, bool enable)
{
+ u32 f32_cntl;
+ int i;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ CTXEMPTY_INT_ENABLE, enable ? 1 : 0);
+ WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL), f32_cntl);
+ }
+ }
}
/**
@@ -579,10 +590,8 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
ring->sched.ready = true;
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v6_0_ctx_switch_enable(adev, true);
+ if (amdgpu_sriov_vf(adev))
sdma_v6_0_enable(adev, true);
- }
r = amdgpu_ring_test_helper(ring);
if (r) {
@@ -778,7 +787,6 @@ static int sdma_v6_0_start(struct amdgpu_device *adev)
int r = 0;
if (amdgpu_sriov_vf(adev)) {
- sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);
/* set RB registers */
@@ -799,7 +807,7 @@ static int sdma_v6_0_start(struct amdgpu_device *adev)
/* unhalt the MEs */
sdma_v6_0_enable(adev, true);
/* enable sdma ring preemption */
- sdma_v6_0_ctx_switch_enable(adev, true);
+ sdma_v6_0_ctxempty_int_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = sdma_v6_0_gfx_resume(adev);
@@ -1340,7 +1348,7 @@ static int sdma_v6_0_hw_fini(void *handle)
return 0;
}
- sdma_v6_0_ctx_switch_enable(adev, false);
+ sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);
return 0;