summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
diff options
context:
space:
mode:
authorJames Zhu <James.Zhu@amd.com>2020-06-18 06:58:04 +0300
committerAlex Deucher <alexander.deucher@amd.com>2020-07-01 08:59:24 +0300
commit450da2ef41d6551bf5bc2fd50c00ec3bcfe1047a (patch)
treed8540711f6e2d0b57419b9fb3fa98b5858cced07 /drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
parenta20ace1b02cc80bab4a47fbb81d68db4b234450a (diff)
downloadlinux-450da2ef41d6551bf5bc2fd50c00ec3bcfe1047a.tar.xz
drm/amdgpu: fix unused variable
SOC15_WAIT_ON_RREG's return value needn't always been handled by caller. new design is to fix this kind of unused variable. Signed-off-by: James Zhu <James.Zhu@amd.com> Reported-by: kernel test robot <lkp@intel.com> Acked-by: Leo Liu <leo.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c34
1 files changed, 16 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index ba1c4fbc19dc..e99bef6e2354 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -549,7 +549,6 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data;
- int ret = 0;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -589,7 +588,7 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
- SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret);
+ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
@@ -1302,25 +1301,24 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
- int ret_code = 0;
uint32_t tmp;
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* wait for read ptr to be equal to write ptr */
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
- SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
- SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
- SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
@@ -1343,7 +1341,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
}
/* wait for vcn idle */
- SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
if (r)
return r;
@@ -1351,7 +1349,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
UVD_LMI_STATUS__READ_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r)
return r;
@@ -1362,7 +1360,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r)
return r;
@@ -1412,8 +1410,8 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
- SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
@@ -1425,7 +1423,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
/* wait for ACK */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
- UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
/* Stall DPG before WPTR/RPTR reset */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
@@ -1458,13 +1456,13 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
- UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
} else {
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
@@ -1701,8 +1699,8 @@ static int vcn_v2_5_wait_for_idle(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
- UVD_STATUS__IDLE, ret);
+ ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
+ UVD_STATUS__IDLE);
if (ret)
return ret;
}