summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/drm_accel.c2
-rw-r--r--drivers/accel/habanalabs/common/command_submission.c3
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c18
-rw-r--r--drivers/accel/habanalabs/common/device.c55
-rw-r--r--drivers/accel/habanalabs/common/firmware_if.c25
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h43
-rw-r--r--drivers/accel/habanalabs/common/hw_queue.c17
-rw-r--r--drivers/accel/habanalabs/common/hwmon.c29
-rw-r--r--drivers/accel/habanalabs/common/mmu/Makefile2
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu.c223
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v1.c354
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v2.c338
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c24
-rw-r--r--drivers/accel/habanalabs/common/security.c33
-rw-r--r--drivers/accel/habanalabs/common/security.h3
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c9
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c308
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h15
-rw-r--r--drivers/accel/habanalabs/goya/goya.c12
-rw-r--r--drivers/accel/habanalabs/goya/goya_coresight.c3
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h2
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c32
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c12
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h7
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c49
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.c6
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c70
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h6
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c10
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c10
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c12
-rw-r--r--drivers/accel/ivpu/ivpu_job.c20
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c10
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h46
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h32
-rw-r--r--drivers/accel/qaic/mhi_controller.c4
-rw-r--r--drivers/accel/qaic/qaic.h3
-rw-r--r--drivers/accel/qaic/qaic_data.c59
-rw-r--r--drivers/accel/qaic/qaic_drv.c140
-rw-r--r--drivers/char/agp/agp.h1
-rw-r--r--drivers/dma-buf/dma-fence.c8
-rw-r--r--drivers/dma-buf/dma-resv.c4
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/sysfb.c51
-rw-r--r--drivers/firmware/sysfb_simplefb.c5
-rw-r--r--drivers/gpu/drm/Kconfig19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c879
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h202
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c251
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c653
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_si.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c767
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c310
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c570
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c672
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c262
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c1339
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c93
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c3
-rw-r--r--drivers/gpu/drm/amd/display/TODO110
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c170
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c72
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c55
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c119
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c293
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c109
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c167
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h257
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c372
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h22
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h145
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c12
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c132
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h9
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/arct_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h287
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h1348
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h24
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h65
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h219
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h735
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h388
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h1411
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h468
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h692
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h279
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h1029
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h1672
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h7627
-rw-r--r--drivers/gpu/drm/amd/include/atom-bits.h2
-rw-r--r--drivers/gpu/drm/amd/include/beige_goby_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h23
-rw-r--r--drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h9
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h6
-rw-r--r--drivers/gpu/drm/amd/include/navi12_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/navi14_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h6
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/v10_structs.h3
-rw-r--r--drivers/gpu/drm/amd/include/vangogh_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/vega10_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/vega20_ip_offset.h78
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c15
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c42
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c40
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c306
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h10
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c69
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c30
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c28
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c12
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig18
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c207
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c154
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c21
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c16
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c20
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c9
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c19
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c18
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c22
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c18
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c38
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c17
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c45
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c195
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c18
-rw-r--r--drivers/gpu/drm/ci/build.sh1
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml14
-rw-r--r--drivers/gpu/drm/ci/test.yml31
-rw-r--r--drivers/gpu/drm/ci/testlist.txt1937
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt30
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt7
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c17
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c23
-rw-r--r--drivers/gpu/drm/drm_bridge.c17
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c16
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c25
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c2
-rw-r--r--drivers/gpu/drm/drm_ioc32.c4
-rw-r--r--drivers/gpu/drm/drm_managed.c39
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/drm_mode_config.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c22
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/drm_print.c29
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c36
-rw-r--r--drivers/gpu/drm/drm_syncobj.c7
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c30
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c375
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c229
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c67
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c103
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c168
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h38
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c192
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c137
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c176
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c255
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c202
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c130
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h12
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c28
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h2
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c54
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c50
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.h14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c18
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c126
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c64
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_query.c35
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h18
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c19
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c17
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c33
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h3
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c16
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-blkctl.c13
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-ctxld.c14
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c19
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.h1
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dpr.c21
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c12
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dtg.c26
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-scaler.c21
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-ss.c12
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig1
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c2
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.h1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c23
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c39
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.c6
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c18
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.c3
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c37
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c38
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c2
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c2
-rw-r--r--drivers/gpu/drm/mcde/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c31
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c26
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c6
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c24
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.h2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c23
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.h2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c35
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c14
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c33
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c22
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c12
-rw-r--r--drivers/gpu/drm/panel/Kconfig231
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c18
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c322
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c116
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c372
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c23
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c265
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c424
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c8
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c643
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c81
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c104
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c1
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c1
-rw-r--r--drivers/gpu/drm/pl111/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/atom-bits.h2
-rw-r--r--drivers/gpu/drm/radeon/atom.c47
-rw-r--r--drivers/gpu/drm/radeon/atom.h4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c28
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c38
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c90
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c31
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h6
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h9
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h10
-rw-r--r--drivers/gpu/drm/radeon/evergreen_smc.h9
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c33
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.h12
-rw-r--r--drivers/gpu/drm/radeon/nislands_smc.h51
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c1
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/radeon/rs600.c3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c3
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h27
-rw-r--r--drivers/gpu/drm/radeon/si.c63
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c132
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h21
-rw-r--r--drivers/gpu/drm/radeon/smu7.h6
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h51
-rw-r--r--drivers/gpu/drm/radeon/smu7_fusion.h42
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c18
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c2
-rw-r--r--drivers/gpu/drm/renesas/Kconfig1
-rw-r--r--drivers/gpu/drm/renesas/Makefile1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig12
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Makefile8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c422
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h89
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c175
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h78
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c72
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h32
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c371
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h43
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c349
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h82
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c3
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c549
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h3
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c11
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c7
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c370
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h5
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c14
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c59
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c21
-rw-r--r--drivers/gpu/drm/tegra/output.c17
-rw-r--r--drivers/gpu/drm/tegra/rgb.c18
-rw-r--r--drivers/gpu/drm/tegra/sor.c1
-rw-r--r--drivers/gpu/drm/tests/drm_managed_test.c77
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c19
-rw-r--r--drivers/gpu/drm/ttm/tests/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c622
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c48
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c335
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_tt_test.c295
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c76
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c15
-rw-r--r--drivers/gpu/drm/tve200/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c6
-rw-r--r--drivers/gpu/drm/vkms/Kconfig15
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c300
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c32
-rw-r--r--drivers/gpu/drm/xe/.kunitconfig5
-rw-r--r--drivers/gpu/drm/xe/Kconfig2
-rw-r--r--drivers/gpu/drm/xe/Makefile40
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h44
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h174
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h3
-rw-r--r--drivers/gpu/drm/xe/abi/guc_messages_abi.h2
-rw-r--r--drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h79
-rw-r--r--drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h118
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h10
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h3
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c (renamed from drivers/gpu/drm/xe/xe_display.c)0
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h (renamed from drivers/gpu/drm/xe/xe_display.h)0
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c67
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h27
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h21
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile7
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c201
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_relay_test.c522
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.c90
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.h17
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c36
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c1
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.h1
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test_mod.c10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c16
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c134
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h7
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c1
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c55
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h6
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h13
-rw-r--r--drivers/gpu/drm/xe/xe_device.c75
-rw-r--r--drivers/gpu/drm/xe/xe_device.h10
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h166
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c14
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c42
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c133
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h3
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h55
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c8
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c81
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c71
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c537
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h20
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.c20
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c89
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c17
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c38
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h44
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_printk.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h118
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c115
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c255
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_db_mgr.c266
-rw-r--r--drivers/gpu/drm/xe/xe_guc_db_mgr.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hxg_helpers.h108
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c941
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.h37
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c88
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit_types.h18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h47
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c19
-rw-r--r--drivers/gpu/drm/xe/xe_huc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c144
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c38
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h82
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c32
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c136
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c38
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c430
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h26
-rw-r--r--drivers/gpu/drm/xe/xe_memirq_types.h37
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c25
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c9
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c27
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c5
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c10
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h7
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c38
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c5
-rw-r--r--drivers/gpu/drm/xe/xe_query.c50
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c2
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c8
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c60
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c38
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h5
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h11
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c32
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h12
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c3
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h55
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c8
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c9
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c33
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c60
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c202
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h7
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c128
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.h13
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c191
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules12
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c2
-rw-r--r--drivers/gpu/drm/xe/xe_wopcm_types.h4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c2
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c22
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/bus.h2
-rw-r--r--drivers/gpu/host1x/cdma.c3
-rw-r--r--drivers/media/i2c/tc358743.c7
-rw-r--r--drivers/staging/sm750fb/Kconfig1
-rw-r--r--drivers/video/Kconfig9
-rw-r--r--drivers/video/Makefile7
-rw-r--r--drivers/video/cmdline.c2
-rw-r--r--drivers/video/fbdev/Kconfig35
-rw-r--r--drivers/video/fbdev/core/Kconfig2
-rw-r--r--drivers/video/fbdev/core/fbmem.c2
-rw-r--r--drivers/video/fbdev/efifb.c225
-rw-r--r--drivers/video/fbdev/geode/Kconfig3
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/vesafb.c78
-rw-r--r--drivers/video/screen_info_generic.c146
-rw-r--r--drivers/video/screen_info_pci.c136
859 files changed, 47191 insertions, 8906 deletions
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 24cac4c0274b..16c3edb8c46e 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -23,7 +23,7 @@ static struct idr accel_minors_idr;
static struct dentry *accel_debugfs_root;
-static struct device_type accel_sysfs_device_minor = {
+static const struct device_type accel_sysfs_device_minor = {
.name = "accel_minor"
};
diff --git a/drivers/accel/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c
index 3aa6eeef443b..39e23d625a3c 100644
--- a/drivers/accel/habanalabs/common/command_submission.c
+++ b/drivers/accel/habanalabs/common/command_submission.c
@@ -1360,9 +1360,8 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
return -EINVAL;
}
- if (!hl_device_operational(hdev, &status)) {
+ if (!hl_device_operational(hdev, &status))
return -EBUSY;
- }
if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
!hdev->supports_staged_submission) {
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 01f071d52570..b1c88d1837d9 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -484,7 +484,7 @@ static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
- char kbuf[MMU_KBUF_SIZE];
+ char kbuf[MMU_KBUF_SIZE] = {0};
char *c;
ssize_t rc;
@@ -546,7 +546,7 @@ static ssize_t mmu_ack_error_value_write(struct file *file,
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
- char kbuf[MMU_KBUF_SIZE];
+ char kbuf[MMU_KBUF_SIZE] = {0};
ssize_t rc;
if (count > sizeof(kbuf) - 1)
@@ -1643,19 +1643,19 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hl_data64b_fops);
debugfs_create_file("set_power_state",
- 0200,
+ 0644,
root,
dev_entry,
&hl_power_fops);
debugfs_create_file("device",
- 0200,
+ 0644,
root,
dev_entry,
&hl_device_fops);
debugfs_create_file("clk_gate",
- 0200,
+ 0644,
root,
dev_entry,
&hl_clk_gate_fops);
@@ -1667,13 +1667,13 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hl_stop_on_err_fops);
debugfs_create_file("dump_security_violations",
- 0644,
+ 0400,
root,
dev_entry,
&hl_security_violations_fops);
debugfs_create_file("dump_razwi_events",
- 0644,
+ 0400,
root,
dev_entry,
&hl_razwi_check_fops);
@@ -1706,7 +1706,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
- 0600,
+ 0644,
root,
dev_entry,
&hl_state_dump_fops);
@@ -1724,7 +1724,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
- 0444,
+ 0644,
root,
entry,
&hl_debugfs_fops);
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index a73bd4be94b1..8f92445c5a90 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -55,7 +55,8 @@ static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_regi
if (is_power_of_2(prop->dram_pci_bar_size))
bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
else
- bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) *
+ bar_base_addr = region->region_base +
+ div64_u64((addr - region->region_base), prop->dram_pci_bar_size) *
prop->dram_pci_bar_size;
old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
@@ -1034,14 +1035,14 @@ static void device_early_fini(struct hl_device *hdev)
static bool is_pci_link_healthy(struct hl_device *hdev)
{
- u16 vendor_id;
+ u16 device_id;
if (!hdev->pdev)
return false;
- pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
+ pci_read_config_word(hdev->pdev, PCI_DEVICE_ID, &device_id);
- return (vendor_id == PCI_VENDOR_ID_HABANALABS);
+ return (device_id == hdev->pdev->device);
}
static int hl_device_eq_heartbeat_check(struct hl_device *hdev)
@@ -1768,14 +1769,16 @@ kill_processes:
hdev->device_cpu_disabled = false;
hdev->reset_info.hard_reset_pending = false;
+ /*
+ * Put the device in an unusable state if there are 2 back to back resets due to
+ * fatal errors.
+ */
if (hdev->reset_info.reset_trigger_repeated &&
- (hdev->reset_info.prev_reset_trigger ==
- HL_DRV_RESET_FW_FATAL_ERR)) {
- /* if there 2 back to back resets from FW,
- * ensure driver puts the driver in a unusable state
- */
+ (hdev->reset_info.prev_reset_trigger == HL_DRV_RESET_FW_FATAL_ERR ||
+ hdev->reset_info.prev_reset_trigger ==
+ HL_DRV_RESET_HEARTBEAT)) {
dev_crit(hdev->dev,
- "%s Consecutive FW fatal errors received, stopping hard reset\n",
+ "%s Consecutive fatal errors, stopping hard reset\n",
dev_name(&(hdev)->pdev->dev));
rc = -EIO;
goto out_err;
@@ -2801,3 +2804,35 @@ void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)
atomic_set(&captured_err_info->cs_timeout.write_enable, 1);
captured_err_info->undef_opcode.write_enable = true;
}
+
+void hl_init_cpu_for_irq(struct hl_device *hdev)
+{
+#ifdef CONFIG_NUMA
+ struct cpumask *available_mask = &hdev->irq_affinity_mask;
+ int numa_node = hdev->pdev->dev.numa_node, i;
+ static struct cpumask cpu_mask;
+
+ if (numa_node < 0)
+ return;
+
+ if (!cpumask_and(&cpu_mask, cpumask_of_node(numa_node), cpu_online_mask)) {
+ dev_err(hdev->dev, "No available affinities in current numa node\n");
+ return;
+ }
+
+ /* Remove HT siblings */
+ for_each_cpu(i, &cpu_mask)
+ cpumask_set_cpu(cpumask_first(topology_sibling_cpumask(i)), available_mask);
+#endif
+}
+
+void hl_set_irq_affinity(struct hl_device *hdev, int irq)
+{
+ if (cpumask_empty(&hdev->irq_affinity_mask)) {
+ dev_dbg(hdev->dev, "affinity mask is empty\n");
+ return;
+ }
+
+ if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask))
+ dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq);
+}
diff --git a/drivers/accel/habanalabs/common/firmware_if.c b/drivers/accel/habanalabs/common/firmware_if.c
index 3558a6a8e192..4bd02778a970 100644
--- a/drivers/accel/habanalabs/common/firmware_if.c
+++ b/drivers/accel/habanalabs/common/firmware_if.c
@@ -501,7 +501,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
0, &result);
if (rc)
- dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
+ dev_err(hdev->dev, "failed to unmask event %d", event_type);
return rc;
}
@@ -540,7 +540,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
total_pkt_size, 0, &result);
if (rc)
- dev_err(hdev->dev, "failed to unmask IRQ array\n");
+ dev_err(hdev->dev, "failed to unmask event array\n");
kfree(pkt);
@@ -2718,18 +2718,20 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
}
+ rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, sizeof(struct lkd_msg_comms));
+ if (rc)
+ goto protocol_err;
+
+ if (hdev->asic_prop.support_dynamic_resereved_fw_size)
+ hdev->asic_prop.reserved_fw_mem_size =
+ le32_to_cpu(fw_loader->dynamic_loader.comm_desc.rsvd_mem_size_mb) * SZ_1M;
+
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
struct lkd_fw_binning_info *binning_info;
- rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
- sizeof(struct lkd_msg_comms));
- if (rc)
- goto protocol_err;
-
/* read preboot version */
rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
-
if (rc)
return rc;
@@ -2756,11 +2758,6 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
hdev->decoder_binning, hdev->rotator_binning);
}
- if (hdev->asic_prop.support_dynamic_resereved_fw_size) {
- hdev->asic_prop.reserved_fw_mem_size =
- le32_to_cpu(fw_loader->dynamic_loader.comm_desc.rsvd_mem_size_mb);
- }
-
return 0;
}
@@ -2795,7 +2792,7 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
if (!(hdev->fw_components & FW_TYPE_LINUX)) {
- dev_info(hdev->dev, "Skip loading Linux F/W\n");
+ dev_dbg(hdev->dev, "Skip loading Linux F/W\n");
return 0;
}
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index 2a900c9941fe..48f0f3eea1ef 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -443,18 +443,22 @@ enum hl_collective_mode {
* a CB handle can be provided for jobs on this queue.
* Otherwise, a CB address must be provided.
* @collective_mode: collective mode of current queue
+ * @q_dram_bd_address: PQ dram address, used when PQ need to reside in DRAM.
* @driver_only: true if only the driver is allowed to send a job to this queue,
* false otherwise.
* @binned: True if the queue is binned out and should not be used
* @supports_sync_stream: True if queue supports sync stream
+ * @dram_bd: True if the bd should be copied to dram, needed for PQ which has been allocated on dram
*/
struct hw_queue_properties {
enum hl_queue_type type;
enum queue_cb_alloc_flags cb_alloc_flags;
enum hl_collective_mode collective_mode;
+ u64 q_dram_bd_address;
u8 driver_only;
u8 binned;
u8 supports_sync_stream;
+ u8 dram_bd;
};
/**
@@ -590,8 +594,6 @@ struct hl_hints_range {
* we display to the user
* @mmu_pgt_size: MMU page tables total size.
* @mmu_pte_size: PTE size in MMU page tables.
- * @mmu_hop_table_size: MMU hop table size.
- * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
* @dram_page_size: The DRAM physical page size.
* @cfg_size: configuration space size on SRAM.
* @sram_size: total size of SRAM.
@@ -645,10 +647,10 @@ struct hl_hints_range {
* @num_engine_cores: number of engine cpu cores.
* @max_num_of_engines: maximum number of all engines in the ASIC.
* @num_of_special_blocks: special_blocks array size.
- * @glbl_err_cause_num: global err cause number.
+ * @glbl_err_max_cause_num: global err max cause number.
* @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
* not supported.
- * @reserved_fw_mem_size: size in MB of dram memory reserved for FW.
+ * @reserved_fw_mem_size: size of dram memory reserved for FW.
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -743,8 +745,6 @@ struct asic_fixed_properties {
u32 clk_pll_index;
u32 mmu_pgt_size;
u32 mmu_pte_size;
- u32 mmu_hop_table_size;
- u32 mmu_hop0_tables_total_size;
u32 dram_page_size;
u32 cfg_size;
u32 sram_size;
@@ -779,7 +779,7 @@ struct asic_fixed_properties {
u32 num_engine_cores;
u32 max_num_of_engines;
u32 num_of_special_blocks;
- u32 glbl_err_cause_num;
+ u32 glbl_err_max_cause_num;
u32 hbw_flush_reg;
u32 reserved_fw_mem_size;
u16 collective_first_sob;
@@ -1052,6 +1052,8 @@ struct hl_encaps_signals_mgr {
* @collective_mode: collective mode of current queue
* @kernel_address: holds the queue's kernel virtual address.
* @bus_address: holds the queue's DMA address.
+ * @pq_dram_address: hold the dram address when the PQ is allocated, used when dram_bd is true in
+ * queue properites.
* @pi: holds the queue's pi value.
* @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
* @hw_queue_id: the id of the H/W queue.
@@ -1061,6 +1063,7 @@ struct hl_encaps_signals_mgr {
* @valid: is the queue valid (we have array of 32 queues, not all of them
* exist).
* @supports_sync_stream: True if queue supports sync stream
+ * @dram_bd: True if the bd should be copied to dram, needed for PQ which has been allocated on dram
*/
struct hl_hw_queue {
struct hl_cs_job **shadow_queue;
@@ -1069,6 +1072,7 @@ struct hl_hw_queue {
enum hl_collective_mode collective_mode;
void *kernel_address;
dma_addr_t bus_address;
+ u64 pq_dram_address;
u32 pi;
atomic_t ci;
u32 hw_queue_id;
@@ -1077,6 +1081,7 @@ struct hl_hw_queue {
u16 int_queue_len;
u8 valid;
u8 supports_sync_stream;
+ u8 dram_bd;
};
/**
@@ -2547,7 +2552,7 @@ struct hl_state_dump_specs {
* DEVICES
*/
-#define HL_STR_MAX 32
+#define HL_STR_MAX 64
#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
@@ -3257,6 +3262,7 @@ struct hl_reset_info {
* @clk_throttling: holds information about current/previous clock throttling events
* @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
+ * @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_inner_major_ver: the major of current loaded preboot inner version.
* @fw_inner_minor_ver: the minor of current loaded preboot inner version.
@@ -3446,6 +3452,8 @@ struct hl_device {
struct hl_reset_info reset_info;
+ cpumask_t irq_affinity_mask;
+
u32 *stream_master_qid_arr;
u32 fw_inner_major_ver;
u32 fw_inner_minor_ver;
@@ -3886,6 +3894,7 @@ int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_
struct hl_hr_mmu_funcs *hr_func);
int hl_mmu_if_set_funcs(struct hl_device *hdev);
void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
+void hl_mmu_v2_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
@@ -3893,6 +3902,22 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr);
u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr);
bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
+struct pgt_info *hl_mmu_dr_get_pgt_info(struct hl_ctx *ctx, u64 hop_addr);
+void hl_mmu_dr_free_hop(struct hl_ctx *ctx, u64 hop_addr);
+void hl_mmu_dr_free_pgt_node(struct hl_ctx *ctx, struct pgt_info *pgt_info);
+u64 hl_mmu_dr_get_phys_hop0_addr(struct hl_ctx *ctx);
+u64 hl_mmu_dr_get_hop0_addr(struct hl_ctx *ctx);
+void hl_mmu_dr_write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val);
+void hl_mmu_dr_write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val);
+void hl_mmu_dr_clear_pte(struct hl_ctx *ctx, u64 pte_addr);
+u64 hl_mmu_dr_get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
+void hl_mmu_dr_get_pte(struct hl_ctx *ctx, u64 hop_addr);
+int hl_mmu_dr_put_pte(struct hl_ctx *ctx, u64 hop_addr);
+u64 hl_mmu_dr_get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, bool *is_new_hop);
+u64 hl_mmu_dr_alloc_hop(struct hl_ctx *ctx);
+void hl_mmu_dr_flush(struct hl_ctx *ctx);
+int hl_mmu_dr_init(struct hl_device *hdev);
+void hl_mmu_dr_fini(struct hl_device *hdev);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
@@ -4032,6 +4057,8 @@ void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_
void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info);
void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count);
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info);
+void hl_init_cpu_for_irq(struct hl_device *hdev);
+void hl_set_irq_affinity(struct hl_device *hdev, int irq);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/accel/habanalabs/common/hw_queue.c b/drivers/accel/habanalabs/common/hw_queue.c
index d0087c0ec48c..3d04a7507cce 100644
--- a/drivers/accel/habanalabs/common/hw_queue.c
+++ b/drivers/accel/habanalabs/common/hw_queue.c
@@ -84,6 +84,8 @@ void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
+ u64 addr;
+ int i;
bd = q->kernel_address;
bd += hl_pi_2_offset(q->pi);
@@ -91,7 +93,16 @@ void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
bd->len = cpu_to_le32(len);
bd->ptr = cpu_to_le64(ptr);
+ if (q->dram_bd)
+ for (i = 0 ; i < 2 ; i++) {
+ addr = q->pq_dram_address +
+ ((hl_pi_2_offset(q->pi) * sizeof(struct hl_bd)) + (i * sizeof(u64)));
+ hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM, addr,
+ (u64 *)(bd) + i, DEBUGFS_WRITE64);
+ }
+
q->pi = hl_queue_inc_ptr(q->pi);
+
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
@@ -1087,12 +1098,18 @@ int hl_hw_queues_create(struct hl_device *hdev)
q->supports_sync_stream =
asic->hw_queues_props[i].supports_sync_stream;
q->collective_mode = asic->hw_queues_props[i].collective_mode;
+ q->dram_bd = asic->hw_queues_props[i].dram_bd;
+
rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
"failed to initialize queue %d\n", i);
goto release_queues;
}
+
+ /* Set DRAM PQ address for the queue if it should be at DRAM */
+ if (q->dram_bd)
+ q->pq_dram_address = asic->hw_queues_props[i].q_dram_bd_address;
}
return 0;
diff --git a/drivers/accel/habanalabs/common/hwmon.c b/drivers/accel/habanalabs/common/hwmon.c
index 1ee2ee07e9ed..36b951b5f503 100644
--- a/drivers/accel/habanalabs/common/hwmon.c
+++ b/drivers/accel/habanalabs/common/hwmon.c
@@ -46,7 +46,7 @@ static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types
break;
default:
- dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
+ dev_err_ratelimited(hdev->dev, "unsupported h/w sensor type %d\n", type);
flags = cpucp_flags;
break;
}
@@ -134,7 +134,7 @@ static u32 adjust_hwmon_flags(struct hl_device *hdev, enum hwmon_sensor_types ty
break;
default:
- dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
+ dev_err_ratelimited(hdev->dev, "unsupported h/w sensor type %d\n", type);
flags = cpucp_flags;
break;
}
@@ -162,7 +162,8 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sen
break;
if (type >= HWMON_NR_SENSOR_TYPES) {
- dev_err(hdev->dev, "Got wrong sensor type %d from device\n", type);
+ dev_err_ratelimited(hdev->dev,
+ "Got wrong sensor type %d from device\n", type);
return -EINVAL;
}
@@ -584,7 +585,7 @@ int hl_get_temperature(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get temperature from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -611,7 +612,7 @@ int hl_set_temperature(struct hl_device *hdev,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set temperature of sensor %d, error %d\n",
sensor_index, rc);
@@ -638,7 +639,7 @@ int hl_get_voltage(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get voltage from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -667,7 +668,7 @@ int hl_get_current(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get current from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -696,7 +697,7 @@ int hl_get_fan_speed(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get fan speed from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -725,7 +726,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get pwm info from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -752,7 +753,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set pwm info to sensor %d, error %d\n",
sensor_index, rc);
}
@@ -775,7 +776,7 @@ int hl_set_voltage(struct hl_device *hdev,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set voltage of sensor %d, error %d\n",
sensor_index, rc);
@@ -800,7 +801,7 @@ int hl_set_current(struct hl_device *hdev,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set current of sensor %d, error %d\n",
sensor_index, rc);
@@ -831,7 +832,7 @@ int hl_set_power(struct hl_device *hdev,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set power of sensor %d, error %d\n",
sensor_index, rc);
@@ -858,7 +859,7 @@ int hl_get_power(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get power of sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
diff --git a/drivers/accel/habanalabs/common/mmu/Makefile b/drivers/accel/habanalabs/common/mmu/Makefile
index 1806c524e04a..f4b815bf4f7d 100644
--- a/drivers/accel/habanalabs/common/mmu/Makefile
+++ b/drivers/accel/habanalabs/common/mmu/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o \
- common/mmu/mmu_v2_hr.o
+ common/mmu/mmu_v2.o common/mmu/mmu_v2_hr.o
diff --git a/drivers/accel/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c
index b654302a68fc..d3eaab908457 100644
--- a/drivers/accel/habanalabs/common/mmu/mmu.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu.c
@@ -585,6 +585,8 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
int hl_mmu_if_set_funcs(struct hl_device *hdev)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
if (hdev->mmu_disable)
return 0;
@@ -597,8 +599,9 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
case ASIC_GAUDI2:
case ASIC_GAUDI2B:
case ASIC_GAUDI2C:
- /* MMUs in Gaudi2 are always host resident */
- hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
+ hl_mmu_v2_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
+ if (prop->pmmu.host_resident)
+ hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
@@ -1209,3 +1212,219 @@ int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_
return 0;
}
+struct pgt_info *hl_mmu_dr_get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = NULL;
+
+ hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
+ (unsigned long) hop_addr)
+ if (hop_addr == pgt_info->shadow_addr)
+ break;
+
+ return pgt_info;
+}
+
+void hl_mmu_dr_free_hop(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr);
+
+ hl_mmu_dr_free_pgt_node(ctx, pgt_info);
+}
+
+void hl_mmu_dr_free_pgt_node(struct hl_ctx *ctx, struct pgt_info *pgt_info)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
+ hdev->asic_prop.dmmu.hop_table_size);
+ hash_del(&pgt_info->node);
+ kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
+ kfree(pgt_info);
+}
+
+u64 hl_mmu_dr_get_phys_hop0_addr(struct hl_ctx *ctx)
+{
+ return ctx->hdev->asic_prop.mmu_pgt_addr +
+ (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size);
+}
+
+u64 hl_mmu_dr_get_hop0_addr(struct hl_ctx *ctx)
+{
+ return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
+ (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size);
+}
+
+u64 hl_mmu_dr_get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
+{
+ u64 page_mask = ctx->hdev->asic_prop.dmmu.hop_table_size - 1;
+ u64 shadow_hop_addr = shadow_addr & (~page_mask);
+ u64 pte_offset = shadow_addr & page_mask;
+ u64 phys_hop_addr;
+
+ if (shadow_hop_addr != hl_mmu_dr_get_hop0_addr(ctx))
+ phys_hop_addr = hl_mmu_dr_get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
+ else
+ phys_hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx);
+
+ return phys_hop_addr + pte_offset;
+}
+
+void hl_mmu_dr_write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
+{
+ u64 phys_val = hl_mmu_dr_get_phys_addr(ctx, val);
+
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev, hl_mmu_dr_get_phys_addr(ctx, shadow_pte_addr),
+ phys_val);
+
+ *(u64 *) (uintptr_t) shadow_pte_addr = val;
+}
+
+void hl_mmu_dr_write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
+{
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+ hl_mmu_dr_get_phys_addr(ctx, shadow_pte_addr), val);
+ *(u64 *) (uintptr_t) shadow_pte_addr = val;
+}
+
+void hl_mmu_dr_clear_pte(struct hl_ctx *ctx, u64 pte_addr)
+{
+ hl_mmu_dr_write_final_pte(ctx, pte_addr, 0);
+}
+
+void hl_mmu_dr_get_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ hl_mmu_dr_get_pgt_info(ctx, hop_addr)->num_of_ptes++;
+}
+
+int hl_mmu_dr_put_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr);
+ int num_of_ptes_left;
+
+ pgt_info->num_of_ptes--;
+
+ /*
+ * Need to save the number of ptes left because hl_mmu_free_hop might free
+ * the pgt_info
+ */
+ num_of_ptes_left = pgt_info->num_of_ptes;
+ if (!num_of_ptes_left)
+ hl_mmu_dr_free_pgt_node(ctx, pgt_info);
+
+ return num_of_ptes_left;
+}
+
+u64 hl_mmu_dr_alloc_hop(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pgt_info *pgt_info;
+ u64 phys_addr, shadow_addr;
+
+ pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
+ if (!pgt_info)
+ return ULLONG_MAX;
+
+ phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
+ prop->dmmu.hop_table_size);
+ if (!phys_addr) {
+ dev_err(hdev->dev, "failed to allocate page\n");
+ goto pool_add_err;
+ }
+
+ shadow_addr = (u64) (uintptr_t) kzalloc(prop->dmmu.hop_table_size,
+ GFP_KERNEL);
+ if (!shadow_addr)
+ goto shadow_err;
+
+ pgt_info->phys_addr = phys_addr;
+ pgt_info->shadow_addr = shadow_addr;
+ pgt_info->ctx = ctx;
+ pgt_info->num_of_ptes = 0;
+ hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
+
+ return shadow_addr;
+
+shadow_err:
+ gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool,
+ phys_addr, prop->dmmu.hop_table_size);
+pool_add_err:
+ kfree(pgt_info);
+
+ return ULLONG_MAX;
+}
+
+u64 hl_mmu_dr_get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, bool *is_new_hop)
+{
+ u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+
+ if (hop_addr == ULLONG_MAX) {
+ hop_addr = hl_mmu_dr_alloc_hop(ctx);
+ *is_new_hop = (hop_addr != ULLONG_MAX);
+ }
+
+ return hop_addr;
+}
+
+void hl_mmu_dr_flush(struct hl_ctx *ctx)
+{
+ /* flush all writes from all cores to reach PCI */
+ mb();
+ ctx->hdev->asic_funcs->read_pte(ctx->hdev, hl_mmu_dr_get_phys_hop0_addr(ctx));
+}
+
+int hl_mmu_dr_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ hdev->mmu_priv.dr.mmu_pgt_pool =
+ gen_pool_create(__ffs(prop->dmmu.hop_table_size), -1);
+
+ if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
+ dev_err(hdev->dev, "Failed to create page gen pool\n");
+ return -ENOMEM;
+ }
+
+ rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
+ prop->dmmu.hop0_tables_total_size,
+ prop->dmmu.pgt_size - prop->dmmu.hop0_tables_total_size,
+ -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
+ goto err_pool_add;
+ }
+
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid,
+ prop->dmmu.hop_table_size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
+ rc = -ENOMEM;
+ goto err_pool_add;
+ }
+
+ /* MMU H/W init will be done in device hw_init() */
+
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+
+ return rc;
+}
+
+void hl_mmu_dr_fini(struct hl_device *hdev)
+{
+ /* MMU H/W fini was already done in device hw_fini() */
+
+ if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0))
+ return;
+
+ kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+ gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+
+ /* Make sure that if we arrive here again without init was
+ * called we won't cause kernel panic. This can happen for
+ * example if we fail during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
+}
diff --git a/drivers/accel/habanalabs/common/mmu/mmu_v1.c b/drivers/accel/habanalabs/common/mmu/mmu_v1.c
index d925dc4dd097..845d16aaa637 100644
--- a/drivers/accel/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v1.c
@@ -12,166 +12,6 @@
#define MMU_V1_MAX_HOPS (MMU_HOP4 + 1)
-static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
-
-static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
-{
- struct pgt_info *pgt_info = NULL;
-
- hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
- (unsigned long) hop_addr)
- if (hop_addr == pgt_info->shadow_addr)
- break;
-
- return pgt_info;
-}
-
-static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
-{
- struct hl_device *hdev = ctx->hdev;
-
- gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
- hdev->asic_prop.mmu_hop_table_size);
- hash_del(&pgt_info->node);
- kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
- kfree(pgt_info);
-}
-
-static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
-{
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
-
- _free_hop(ctx, pgt_info);
-}
-
-static u64 alloc_hop(struct hl_ctx *ctx)
-{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct pgt_info *pgt_info;
- u64 phys_addr, shadow_addr;
-
- pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
- if (!pgt_info)
- return ULLONG_MAX;
-
- phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
- prop->mmu_hop_table_size);
- if (!phys_addr) {
- dev_err(hdev->dev, "failed to allocate page\n");
- goto pool_add_err;
- }
-
- shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
- GFP_KERNEL);
- if (!shadow_addr)
- goto shadow_err;
-
- pgt_info->phys_addr = phys_addr;
- pgt_info->shadow_addr = shadow_addr;
- pgt_info->ctx = ctx;
- pgt_info->num_of_ptes = 0;
- hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
-
- return shadow_addr;
-
-shadow_err:
- gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr,
- prop->mmu_hop_table_size);
-pool_add_err:
- kfree(pgt_info);
-
- return ULLONG_MAX;
-}
-
-static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
-{
- return ctx->hdev->asic_prop.mmu_pgt_addr +
- (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
-}
-
-static inline u64 get_hop0_addr(struct hl_ctx *ctx)
-{
- return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
- (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
-}
-
-static void flush(struct hl_ctx *ctx)
-{
- /* flush all writes from all cores to reach PCI */
- mb();
- ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
-}
-
-/* transform the value to physical address when writing to H/W */
-static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
-{
- /*
- * The value to write is actually the address of the next shadow hop +
- * flags at the 12 LSBs.
- * Hence in order to get the value to write to the physical PTE, we
- * clear the 12 LSBs and translate the shadow hop to its associated
- * physical hop, and add back the original 12 LSBs.
- */
- u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
- (val & FLAGS_MASK);
-
- ctx->hdev->asic_funcs->write_pte(ctx->hdev,
- get_phys_addr(ctx, shadow_pte_addr),
- phys_val);
-
- *(u64 *) (uintptr_t) shadow_pte_addr = val;
-}
-
-/* do not transform the value to physical address when writing to H/W */
-static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
- u64 val)
-{
- ctx->hdev->asic_funcs->write_pte(ctx->hdev,
- get_phys_addr(ctx, shadow_pte_addr),
- val);
- *(u64 *) (uintptr_t) shadow_pte_addr = val;
-}
-
-/* clear the last and present bits */
-static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
-{
- /* no need to transform the value to physical address */
- write_final_pte(ctx, pte_addr, 0);
-}
-
-static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
-{
- get_pgt_info(ctx, hop_addr)->num_of_ptes++;
-}
-
-/*
- * put_pte - decrement the num of ptes and free the hop if possible
- *
- * @ctx: pointer to the context structure
- * @hop_addr: addr of the hop
- *
- * This function returns the number of ptes left on this hop. If the number is
- * 0, it means the pte was freed.
- */
-static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
-{
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
- int num_of_ptes_left;
-
- pgt_info->num_of_ptes--;
-
- /*
- * Need to save the number of ptes left because free_hop might free
- * the pgt_info
- */
- num_of_ptes_left = pgt_info->num_of_ptes;
- if (!num_of_ptes_left)
- _free_hop(ctx, pgt_info);
-
- return num_of_ptes_left;
-}
-
static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx)
{
@@ -183,35 +23,6 @@ static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties
ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
-static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
- bool *is_new_hop)
-{
- u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop_addr == ULLONG_MAX) {
- hop_addr = alloc_hop(ctx);
- *is_new_hop = (hop_addr != ULLONG_MAX);
- }
-
- return hop_addr;
-}
-
-/* translates shadow address inside hop to a physical address */
-static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
-{
- u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
- u64 shadow_hop_addr = shadow_addr & ~page_mask;
- u64 pte_offset = shadow_addr & page_mask;
- u64 phys_hop_addr;
-
- if (shadow_hop_addr != get_hop0_addr(ctx))
- phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
- else
- phys_hop_addr = get_phys_hop0_addr(ctx);
-
- return phys_hop_addr + pte_offset;
-}
-
static int dram_default_mapping_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
@@ -232,13 +43,13 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
/* add hop1 and hop2 */
total_hops = num_of_hop3 + 2;
- ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
+ ctx->dram_default_hops = kcalloc(total_hops, HL_PTE_SIZE, GFP_KERNEL);
if (!ctx->dram_default_hops)
return -ENOMEM;
- hop0_addr = get_hop0_addr(ctx);
+ hop0_addr = hl_mmu_dr_get_hop0_addr(ctx);
- hop1_addr = alloc_hop(ctx);
+ hop1_addr = hl_mmu_dr_alloc_hop(ctx);
if (hop1_addr == ULLONG_MAX) {
dev_err(hdev->dev, "failed to alloc hop 1\n");
rc = -ENOMEM;
@@ -247,7 +58,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
ctx->dram_default_hops[total_hops - 1] = hop1_addr;
- hop2_addr = alloc_hop(ctx);
+ hop2_addr = hl_mmu_dr_alloc_hop(ctx);
if (hop2_addr == ULLONG_MAX) {
dev_err(hdev->dev, "failed to alloc hop 2\n");
rc = -ENOMEM;
@@ -257,7 +68,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
ctx->dram_default_hops[total_hops - 2] = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
- ctx->dram_default_hops[i] = alloc_hop(ctx);
+ ctx->dram_default_hops[i] = hl_mmu_dr_alloc_hop(ctx);
if (ctx->dram_default_hops[i] == ULLONG_MAX) {
dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
rc = -ENOMEM;
@@ -268,18 +79,18 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
/* need only pte 0 in hops 0 and 1 */
pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop0_addr, pte_val);
+ hl_mmu_dr_write_pte(ctx, hop0_addr, pte_val);
pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop1_addr, pte_val);
- get_pte(ctx, hop1_addr);
+ hl_mmu_dr_write_pte(ctx, hop1_addr, pte_val);
+ hl_mmu_dr_get_pte(ctx, hop1_addr);
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
- write_pte(ctx, hop2_pte_addr, pte_val);
- get_pte(ctx, hop2_addr);
+ hl_mmu_dr_write_pte(ctx, hop2_pte_addr, pte_val);
+ hl_mmu_dr_get_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
@@ -289,23 +100,23 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
- write_final_pte(ctx, hop3_pte_addr, pte_val);
- get_pte(ctx, ctx->dram_default_hops[i]);
+ hl_mmu_dr_write_final_pte(ctx, hop3_pte_addr, pte_val);
+ hl_mmu_dr_get_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
}
}
- flush(ctx);
+ hl_mmu_dr_flush(ctx);
return 0;
hop3_err:
for (i = 0 ; i < hop3_allocated ; i++)
- free_hop(ctx, ctx->dram_default_hops[i]);
+ hl_mmu_dr_free_hop(ctx, ctx->dram_default_hops[i]);
- free_hop(ctx, hop2_addr);
+ hl_mmu_dr_free_hop(ctx, hop2_addr);
hop2_err:
- free_hop(ctx, hop1_addr);
+ hl_mmu_dr_free_hop(ctx, hop1_addr);
hop1_err:
kfree(ctx->dram_default_hops);
@@ -329,7 +140,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
do_div(num_of_hop3, prop->dram_page_size);
do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
- hop0_addr = get_hop0_addr(ctx);
+ hop0_addr = hl_mmu_dr_get_hop0_addr(ctx);
/* add hop1 and hop2 */
total_hops = num_of_hop3 + 2;
hop1_addr = ctx->dram_default_hops[total_hops - 1];
@@ -338,101 +149,26 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
- clear_pte(ctx, hop3_pte_addr);
- put_pte(ctx, ctx->dram_default_hops[i]);
+ hl_mmu_dr_clear_pte(ctx, hop3_pte_addr);
+ hl_mmu_dr_put_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
}
}
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
- clear_pte(ctx, hop2_pte_addr);
- put_pte(ctx, hop2_addr);
+ hl_mmu_dr_clear_pte(ctx, hop2_pte_addr);
+ hl_mmu_dr_put_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
- clear_pte(ctx, hop1_addr);
- put_pte(ctx, hop1_addr);
- clear_pte(ctx, hop0_addr);
+ hl_mmu_dr_clear_pte(ctx, hop1_addr);
+ hl_mmu_dr_put_pte(ctx, hop1_addr);
+ hl_mmu_dr_clear_pte(ctx, hop0_addr);
kfree(ctx->dram_default_hops);
- flush(ctx);
-}
-
-/**
- * hl_mmu_v1_init() - initialize the MMU module.
- * @hdev: habanalabs device structure.
- *
- * This function does the following:
- * - Create a pool of pages for pgt_infos.
- * - Create a shadow table for pgt
- *
- * Return: 0 for success, non-zero for failure.
- */
-static int hl_mmu_v1_init(struct hl_device *hdev)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- int rc;
-
- hdev->mmu_priv.dr.mmu_pgt_pool =
- gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
-
- if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
- dev_err(hdev->dev, "Failed to create page gen pool\n");
- return -ENOMEM;
- }
-
- rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
- prop->mmu_hop0_tables_total_size,
- prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
- -1);
- if (rc) {
- dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
- goto err_pool_add;
- }
-
- hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, prop->mmu_hop_table_size,
- GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
- rc = -ENOMEM;
- goto err_pool_add;
- }
-
- /* MMU H/W init will be done in device hw_init() */
-
- return 0;
-
-err_pool_add:
- gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
-
- return rc;
-}
-
-/**
- * hl_mmu_v1_fini() - release the MMU module.
- * @hdev: habanalabs device structure.
- *
- * This function does the following:
- * - Disable MMU in H/W.
- * - Free the pgt_infos pool.
- *
- * All contexts should be freed before calling this function.
- */
-static void hl_mmu_v1_fini(struct hl_device *hdev)
-{
- /* MMU H/W fini was already done in device hw_fini() */
-
- if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
- kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
- gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
-
- /* Make sure that if we arrive here again without init was
- * called we won't cause kernel panic. This can happen for
- * example if we fail during hard reset code at certain points
- */
- hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
- }
+ hl_mmu_dr_flush(ctx);
}
/**
@@ -476,7 +212,7 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
- _free_hop(ctx, pgt_info);
+ hl_mmu_dr_free_pgt_node(ctx, pgt_info);
}
}
@@ -495,7 +231,7 @@ static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) {
if (hop_idx == MMU_HOP0) {
- hop_addr[hop_idx] = get_hop0_addr(ctx);
+ hop_addr[hop_idx] = hl_mmu_dr_get_hop0_addr(ctx);
} else {
hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
if (hop_addr[hop_idx] == ULLONG_MAX)
@@ -546,30 +282,30 @@ static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
}
hop_idx = MMU_HOP3;
- write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
- put_pte(ctx, hop_addr[hop_idx]);
+ hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
+ hl_mmu_dr_put_pte(ctx, hop_addr[hop_idx]);
} else {
if (!(curr_pte & PAGE_PRESENT_MASK))
goto not_mapped;
if (hop_addr[MMU_HOP4])
- clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
else
- clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
- if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4]))
+ if (hop_addr[MMU_HOP4] && !hl_mmu_dr_put_pte(ctx, hop_addr[MMU_HOP4]))
clear_hop3 = true;
if (!clear_hop3)
goto mapped;
for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) {
- clear_pte(ctx, hop_pte_addr[hop_idx]);
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[hop_idx]);
if (hop_idx == MMU_HOP0)
break;
- if (put_pte(ctx, hop_addr[hop_idx]))
+ if (hl_mmu_dr_put_pte(ctx, hop_addr[hop_idx]))
goto mapped;
}
}
@@ -616,10 +352,10 @@ static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) {
if (hop_idx == MMU_HOP0) {
- hop_addr[hop_idx] = get_hop0_addr(ctx);
+ hop_addr[hop_idx] = hl_mmu_dr_get_hop0_addr(ctx);
} else {
hop_addr[hop_idx] =
- get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
+ hl_mmu_dr_get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
if (hop_addr[hop_idx] == ULLONG_MAX)
goto err;
}
@@ -666,27 +402,27 @@ static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
| PAGE_PRESENT_MASK;
- write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
+ hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
prev_hop = hop_idx - 1;
if (hop_new[hop_idx]) {
curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
+ hl_mmu_dr_write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
if (hop_idx != MMU_HOP1)
- get_pte(ctx, hop_addr[prev_hop]);
+ hl_mmu_dr_get_pte(ctx, hop_addr[prev_hop]);
}
}
- get_pte(ctx, hop_addr[num_hops - 1]);
+ hl_mmu_dr_get_pte(ctx, hop_addr[num_hops - 1]);
return 0;
err:
for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) {
if (hop_new[hop_idx])
- free_hop(ctx, hop_addr[hop_idx]);
+ hl_mmu_dr_free_hop(ctx, hop_addr[hop_idx]);
}
return rc;
@@ -752,7 +488,7 @@ static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
if (is_huge)
used_hops--;
- hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx);
+ hops->hop_info[0].hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx);
hops->hop_info[0].hop_pte_addr =
hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
hops->hop_info[0].hop_addr, virt_addr);
@@ -801,13 +537,13 @@ static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
*/
void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
{
- mmu->init = hl_mmu_v1_init;
- mmu->fini = hl_mmu_v1_fini;
+ mmu->init = hl_mmu_dr_init;
+ mmu->fini = hl_mmu_dr_fini;
mmu->ctx_init = hl_mmu_v1_ctx_init;
mmu->ctx_fini = hl_mmu_v1_ctx_fini;
mmu->map = hl_mmu_v1_map;
mmu->unmap = hl_mmu_v1_unmap;
- mmu->flush = flush;
+ mmu->flush = hl_mmu_dr_flush;
mmu->swap_out = hl_mmu_v1_swap_out;
mmu->swap_in = hl_mmu_v1_swap_in;
mmu->get_tlb_info = hl_mmu_v1_get_tlb_info;
diff --git a/drivers/accel/habanalabs/common/mmu/mmu_v2.c b/drivers/accel/habanalabs/common/mmu/mmu_v2.c
new file mode 100644
index 000000000000..4bc0268fff1c
--- /dev/null
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v2.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "../habanalabs.h"
+#include "../../include/hw_ip/mmu/mmu_general.h"
+#include "../../include/hw_ip/mmu/mmu_v2_0.h"
+
+#include <linux/slab.h>
+
+/**
+ * hl_mmu_v2_ctx_init() - initialize a context for using the MMU module.
+ * @ctx: pointer to the context structure to initialize.
+ *
+ * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
+ * page tables hops related to this context.
+ * Return: 0 on success, non-zero otherwise.
+ */
+static int hl_mmu_v2_ctx_init(struct hl_ctx *ctx)
+{
+ hash_init(ctx->mmu_shadow_hash);
+
+ return 0;
+}
+
+/*
+ * hl_mmu_v2_ctx_fini - disable a ctx from using the mmu module
+ *
+ * @ctx: pointer to the context structure
+ *
+ * This function does the following:
+ * - Free any pgts which were not freed yet
+ * - Free the mutex
+ * - Free DRAM default page mapping hops
+ */
+static void hl_mmu_v2_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct pgt_info *pgt_info;
+ struct hlist_node *tmp;
+ int i;
+
+ if (!hash_empty(ctx->mmu_shadow_hash))
+ dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
+ ctx->asid);
+
+ hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
+ dev_err_ratelimited(hdev->dev,
+ "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
+ pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
+ hl_mmu_dr_free_pgt_node(ctx, pgt_info);
+ }
+}
+
+static int hl_mmu_v2_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
+{
+ u64 hop_addr[MMU_ARCH_6_HOPS] = { 0 }, hop_pte_addr[MMU_ARCH_6_HOPS] = { 0 }, curr_pte,
+ scrambled_virt_addr;
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_mmu_properties *mmu_prop;
+ bool is_huge = false;
+ int i, hop_last;
+
+ /* device resident in V2 are allowed only for HMMU */
+ if (!is_dram_addr)
+ return -EINVAL;
+
+ mmu_prop = &prop->dmmu;
+
+ hop_last = mmu_prop->num_hops - 1;
+
+ scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+
+ hop_addr[0] = hl_mmu_dr_get_hop0_addr(ctx);
+ hop_pte_addr[0] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
+ hop_addr[0], scrambled_virt_addr);
+ if (hop_pte_addr[0] == U64_MAX)
+ return -EFAULT;
+
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[0];
+
+ for (i = 1 ; i < mmu_prop->num_hops ; i++) {
+ hop_addr[i] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+ if (hop_addr[i] == ULLONG_MAX)
+ goto not_mapped;
+
+ hop_pte_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hop_addr[i], scrambled_virt_addr);
+ if (hop_pte_addr[i] == U64_MAX)
+ return -EFAULT;
+
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[i];
+
+ if ((i <= hop_last) && (curr_pte & mmu_prop->last_mask)) {
+ hop_last = i;
+ is_huge = true;
+ break;
+ }
+ }
+
+ if (is_dram_addr && !is_huge) {
+ dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
+ return -EFAULT;
+ }
+
+ if (!(curr_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+
+ for (i = hop_last ; i > 0 ; i--) {
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[i]);
+ if (hl_mmu_dr_put_pte(ctx, hop_addr[i]))
+ goto mapped;
+ }
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[0]);
+
+mapped:
+ return 0;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+
+ return -EINVAL;
+}
+
+static int hl_mmu_v2_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+ u32 page_size, bool is_dram_addr)
+{
+ u64 hop_addr[MMU_ARCH_6_HOPS] = { 0 }, hop_pte_addr[MMU_ARCH_6_HOPS] = { 0 },
+ curr_pte = 0, scrambled_virt_addr, scrambled_phys_addr;
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
+ bool hop_new[MMU_ARCH_6_HOPS] = { false };
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_mmu_properties *mmu_prop;
+ int rc, i, hop_last;
+
+ /* device resident in V2 are allowed only for HMMU */
+ if (!is_dram_addr)
+ return -EINVAL;
+
+ mmu_prop = &prop->dmmu;
+
+ hop_last = mmu_prop->num_hops - 1;
+
+ scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+ scrambled_phys_addr = hdev->asic_funcs->scramble_addr(hdev, phys_addr);
+
+ /* First hop is preallocated therefore it is treated differently */
+ hop_addr[0] = hl_mmu_dr_get_hop0_addr(ctx);
+ hop_pte_addr[0] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
+ hop_addr[0], scrambled_virt_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[0];
+
+ /* Handle hop1 to hop_last */
+ for (i = 1 ; i <= hop_last ; i++) {
+ hop_addr[i] = hl_mmu_dr_get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[i]);
+ if (hop_addr[i] == ULLONG_MAX) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ hop_pte_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hop_addr[i], scrambled_virt_addr);
+ if (hop_pte_addr[i] == U64_MAX) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (!hop_pte_addr[i]) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[i];
+ }
+
+ if (curr_pte & PAGE_PRESENT_MASK) {
+ dev_err(hdev->dev,
+ "mapping already exists for virt_addr 0x%llx\n",
+ virt_addr);
+
+ for (i = 0 ; i <= hop_last ; i++)
+ dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n",
+ i, *(u64 *) (uintptr_t) hop_pte_addr[i],
+ hop_pte_addr[i]);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ curr_pte = (scrambled_phys_addr & HOP_PHYS_ADDR_MASK)
+ | mmu_prop->last_mask | PAGE_PRESENT_MASK;
+
+ /* Write the PTEs */
+ hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[hop_last], curr_pte);
+
+ /* for each new hop, add its address to the table of previous-hop */
+ for (i = 1 ; i <= hop_last ; i++) {
+ if (hop_new[i]) {
+ curr_pte = (hop_addr[i] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ hl_mmu_dr_write_pte(ctx, hop_pte_addr[i - 1], curr_pte);
+
+ if (i - 1)
+ hl_mmu_dr_get_pte(ctx, hop_addr[i - 1]);
+ }
+ }
+ hl_mmu_dr_get_pte(ctx, hop_addr[hop_last]);
+
+ return 0;
+
+err:
+ for (i = 1 ; i <= hop_last ; i++)
+ if (hop_new[i] && (hop_addr[i] != U64_MAX))
+ hl_mmu_dr_free_hop(ctx, hop_addr[i]);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_v2_swap_out - marks all mapping of the given ctx as swapped out
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v2_swap_out(struct hl_ctx *ctx)
+{
+
+}
+
+/*
+ * hl_mmu_v2_swap_in - marks all mapping of the given ctx as swapped in
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v2_swap_in(struct hl_ctx *ctx)
+{
+
+}
+
+static int hl_mmu_v2_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops)
+{
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_mmu_properties *mmu_prop;
+ bool is_dram_addr;
+ int i;
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+
+ /* device resident in V2 are allowed only for HMMU */
+ if (!is_dram_addr)
+ return -EINVAL;
+
+ mmu_prop = &prop->dmmu;
+ hops->range_type = HL_VA_RANGE_TYPE_DRAM;
+
+ hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+
+ hops->hop_info[0].hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx);
+ hops->hop_info[0].hop_pte_addr = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
+ hops->hop_info[0].hop_addr,
+ hops->scrambled_vaddr);
+ if (hops->hop_info[0].hop_pte_addr == U64_MAX)
+ return -EFAULT;
+
+ hops->hop_info[0].hop_pte_val = hdev->asic_funcs->read_pte(hdev,
+ hops->hop_info[0].hop_pte_addr);
+ if (hops->hop_info[0].hop_pte_val == U64_MAX)
+ return -EFAULT;
+
+ for (i = 1 ; i < mmu_prop->num_hops ; i++) {
+ hops->hop_info[i].hop_addr =
+ hl_mmu_get_next_hop_addr(ctx, hops->hop_info[i - 1].hop_pte_val);
+ if (hops->hop_info[i].hop_addr == ULLONG_MAX)
+ return -EFAULT;
+
+ hops->hop_info[i].hop_pte_addr =
+ hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hops->hop_info[i].hop_addr,
+ hops->scrambled_vaddr);
+ if (hops->hop_info[i].hop_pte_addr == U64_MAX)
+ return -EFAULT;
+
+ hops->hop_info[i].hop_pte_val =
+ hdev->asic_funcs->read_pte(hdev,
+ hops->hop_info[i].hop_pte_addr);
+
+ if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
+ return -EFAULT;
+
+ if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
+ break;
+ }
+
+ /* if passed over all hops then no last hop was found */
+ if (i == mmu_prop->num_hops)
+ return -EFAULT;
+
+ if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
+ return -EFAULT;
+
+ if (hops->scrambled_vaddr != virt_addr)
+ hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr
+ (hdev, hops->hop_info[i].hop_pte_val);
+ else
+ hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val;
+
+ hops->used_hops = i + 1;
+
+ return 0;
+}
+
+/*
+ * hl_mmu_v2_prepare - prepare mmu_if for working with mmu v2
+ *
+ * @hdev: pointer to the device structure
+ * @mmu_if: pointer to the mmu interface structure
+ */
+void hl_mmu_v2_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
+{
+ mmu->init = hl_mmu_dr_init;
+ mmu->fini = hl_mmu_dr_fini;
+ mmu->ctx_init = hl_mmu_v2_ctx_init;
+ mmu->ctx_fini = hl_mmu_v2_ctx_fini;
+ mmu->map = hl_mmu_v2_map;
+ mmu->unmap = hl_mmu_v2_unmap;
+ mmu->flush = hl_mmu_dr_flush;
+ mmu->swap_out = hl_mmu_v2_swap_out;
+ mmu->swap_in = hl_mmu_v2_swap_in;
+ mmu->get_tlb_info = hl_mmu_v2_get_tlb_info;
+}
diff --git a/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
index afe7ef964f82..31507b2a431b 100644
--- a/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
@@ -47,7 +47,7 @@ static inline int hl_mmu_v2_hr_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size,
+ return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->pmmu.hop_table_size,
prop->mmu_pgt_size);
}
@@ -65,7 +65,7 @@ static inline void hl_mmu_v2_hr_fini(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size);
+ hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->pmmu.hop_table_size);
}
/**
@@ -108,7 +108,7 @@ static void hl_mmu_v2_hr_ctx_fini(struct hl_ctx *ctx)
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
hl_mmu_hr_free_hop_remove_pgt(pgt_info, &ctx->hdev->mmu_priv.hr,
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
}
}
@@ -150,7 +150,7 @@ static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx,
curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
if ((i < hop_last) && (curr_pte & mmu_prop->last_mask)) {
hop_last = i;
@@ -169,14 +169,14 @@ static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx,
for (i = hop_last ; i > 0 ; i--) {
hl_mmu_hr_clear_pte(ctx, hops_pgt_info[i], hop_pte_phys_addr[i],
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
if (hl_mmu_hr_put_pte(ctx, hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
- ctx->hdev->asic_prop.mmu_hop_table_size))
+ ctx->hdev->asic_prop.pmmu.hop_table_size))
goto mapped;
}
hl_mmu_hr_clear_pte(ctx, hops_pgt_info[0], hop_pte_phys_addr[0],
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
mapped:
return 0;
@@ -255,7 +255,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
scrambled_virt_addr);
curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
}
if (curr_pte & PAGE_PRESENT_MASK) {
@@ -268,7 +268,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
*(u64 *) (uintptr_t)
hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
- ctx->hdev->asic_prop.mmu_hop_table_size),
+ ctx->hdev->asic_prop.pmmu.hop_table_size),
hop_pte_phys_addr[i]);
rc = -EINVAL;
goto err;
@@ -279,7 +279,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
/* Write the PTEs */
hl_mmu_hr_write_pte(ctx, hops_pgt_info[hop_last], hop_pte_phys_addr[hop_last], curr_pte,
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
/* for each new hop, add its address to the table of previous-hop */
for (i = 1 ; i <= hop_last ; i++) {
@@ -287,7 +287,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
curr_pte = (hops_pgt_info[i]->phys_addr & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
hl_mmu_hr_write_pte(ctx, hops_pgt_info[i - 1], hop_pte_phys_addr[i - 1],
- curr_pte, ctx->hdev->asic_prop.mmu_hop_table_size);
+ curr_pte, ctx->hdev->asic_prop.pmmu.hop_table_size);
if (i - 1)
hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
hops_pgt_info[i - 1]->phys_addr);
@@ -303,7 +303,7 @@ err:
for (i = 1 ; i <= hop_last ; i++)
if (hop_new[i] && hops_pgt_info[i])
hl_mmu_hr_free_hop_remove_pgt(hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
return rc;
}
diff --git a/drivers/accel/habanalabs/common/security.c b/drivers/accel/habanalabs/common/security.c
index fe913965dbad..5402a3cd0491 100644
--- a/drivers/accel/habanalabs/common/security.c
+++ b/drivers/accel/habanalabs/common/security.c
@@ -7,15 +7,31 @@
#include "habanalabs.h"
-static const char * const hl_glbl_error_cause[HL_MAX_NUM_OF_GLBL_ERR_CAUSE] = {
+static const char * const hl_glbl_error_cause[] = {
"Error due to un-priv read",
"Error due to un-secure read",
"Error due to read from unmapped reg",
"Error due to un-priv write",
"Error due to un-secure write",
"Error due to write to unmapped reg",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
"External I/F write sec violation",
"External I/F write to un-mapped reg",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
"Read to write only",
"Write to read only"
};
@@ -671,10 +687,11 @@ static bool hl_check_block_range_exclusion(struct hl_device *hdev,
static int hl_read_glbl_errors(struct hl_device *hdev,
u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data)
{
- struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_special_block_info *special_blocks = prop->special_blocks;
struct hl_special_block_info *current_block = &special_blocks[blk_idx];
u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base,
- base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address);
+ base = current_block->base_addr - lower_32_bits(prop->cfg_base_address);
int i;
block_base = base + major * current_block->major_offset +
@@ -689,13 +706,13 @@ static int hl_read_glbl_errors(struct hl_device *hdev,
glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET;
addr_val = RREG32(glbl_err_addr);
- for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) {
+ for (i = 0 ; i <= prop->glbl_err_max_cause_num ; i++) {
if (cause_val & BIT(i))
dev_err_ratelimited(hdev->dev,
- "%s, addr %#llx\n",
- hl_glbl_error_cause[i],
- hdev->asic_prop.cfg_base_address + block_base +
- FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
+ "%s, addr %#llx\n",
+ hl_glbl_error_cause[i],
+ prop->cfg_base_address + block_base +
+ FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
}
WREG32(glbl_err_cause, cause_val);
diff --git a/drivers/accel/habanalabs/common/security.h b/drivers/accel/habanalabs/common/security.h
index d7a3b3e82ea4..476f70687c09 100644
--- a/drivers/accel/habanalabs/common/security.h
+++ b/drivers/accel/habanalabs/common/security.h
@@ -13,8 +13,7 @@
struct hl_device;
/* special blocks */
-#define HL_MAX_NUM_OF_GLBL_ERR_CAUSE 10
-#define HL_GLBL_ERR_ADDRESS_MASK GENMASK(11, 0)
+#define HL_GLBL_ERR_ADDRESS_MASK GENMASK(11, 0)
/* GLBL_ERR_ADDR register offset from the start of the block */
#define HL_GLBL_ERR_ADDR_OFFSET 0xF44
/* GLBL_ERR_CAUSE register offset from the start of the block */
diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index 53292d4c15c8..f2b04ffb0ecb 100644
--- a/drivers/accel/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -614,8 +614,6 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
- prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = false;
@@ -637,8 +635,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
prop->pmmu.last_mask = LAST_MASK;
/* TODO: will be duplicated until implementing per-MMU props */
- prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->pmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->pmmu.hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -649,6 +647,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2);
prop->dmmu.end_addr = VA_HOST_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
+ prop->dmmu.pgt_size = prop->mmu_pgt_size;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
@@ -3652,7 +3651,7 @@ static int gaudi_mmu_init(struct hl_device *hdev)
for (i = 0 ; i < prop->max_asid ; i++) {
hop0_addr = prop->mmu_pgt_addr +
- (i * prop->mmu_hop_table_size);
+ (i * prop->dmmu.hop_table_size);
rc = gaudi_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
if (rc) {
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index e0e5615ef9b0..fa1c4feb9f89 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -158,11 +158,13 @@
#define RAZWI_INITIATOR_ID_X_Y(xl, yl, xh) \
(RAZWI_INITIATOR_ID_X_Y_LOW(xl, yl) | RAZWI_INITIATOR_ID_X_HIGH(xh))
-#define PSOC_RAZWI_ENG_STR_SIZE 128
-#define PSOC_RAZWI_MAX_ENG_PER_RTR 5
+#define PSOC_RAZWI_ENG_STR_SIZE 128
+#define PSOC_RAZWI_MAX_ENG_PER_RTR 5
/* HW scrambles only bits 0-25 */
-#define HW_UNSCRAMBLED_BITS_MASK GENMASK_ULL(63, 26)
+#define HW_UNSCRAMBLED_BITS_MASK GENMASK_ULL(63, 26)
+
+#define GAUDI2_GLBL_ERR_MAX_CAUSE_NUM 17
struct gaudi2_razwi_info {
u32 axuser_xy;
@@ -2308,11 +2310,26 @@ static int set_number_of_functional_hbms(struct hl_device *hdev)
return 0;
}
+static bool gaudi2_is_edma_queue_id(u32 queue_id)
+{
+
+ switch (queue_id) {
+ case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3:
+ case GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3:
+ case GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3:
+ case GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int gaudi2_set_dram_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 basic_hbm_page_size;
- int rc;
+ u64 hbm_drv_base_offset = 0, edma_pq_base_addr;
+ u32 basic_hbm_page_size, edma_idx = 0;
+ int rc, i;
rc = set_number_of_functional_hbms(hdev);
if (rc)
@@ -2356,9 +2373,35 @@ static int gaudi2_set_dram_properties(struct hl_device *hdev)
prop->dmmu.start_addr = prop->dram_base_address +
(prop->dram_page_size *
DIV_ROUND_UP_SECTOR_T(prop->dram_size, prop->dram_page_size));
-
prop->dmmu.end_addr = prop->dmmu.start_addr + prop->dram_page_size *
div_u64((VA_HBM_SPACE_END - prop->dmmu.start_addr), prop->dmmu.page_size);
+ /*
+ * Driver can't share an (48MB) HBM page with the F/W in order to prevent FW to block
+ * the driver part by range register, so it must start at the next (48MB) page
+ */
+ hbm_drv_base_offset = roundup(CPU_FW_IMAGE_SIZE, prop->num_functional_hbms * SZ_8M);
+
+ /*
+ * The NIC driver section size and the HMMU page tables section in the HBM needs
+ * to be the remaining size in the first dram page after taking into
+ * account the F/W image size
+ */
+
+ /* Reserve region in HBM for HMMU page tables */
+ prop->mmu_pgt_addr = DRAM_PHYS_BASE + hbm_drv_base_offset +
+ ((prop->dram_page_size - hbm_drv_base_offset) -
+ (HMMU_PAGE_TABLES_SIZE + EDMA_PQS_SIZE + EDMA_SCRATCHPAD_SIZE));
+
+ /* Set EDMA PQs HBM addresses */
+ edma_pq_base_addr = prop->mmu_pgt_addr + HMMU_PAGE_TABLES_SIZE;
+
+ for (i = 0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i++) {
+ if (gaudi2_is_edma_queue_id(i)) {
+ prop->hw_queues_props[i].q_dram_bd_address = edma_pq_base_addr +
+ (edma_idx * HL_QUEUE_SIZE_IN_BYTES);
+ edma_idx++;
+ }
+ }
return 0;
}
@@ -2368,7 +2411,7 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hw_queue_properties *q_props;
u32 num_sync_stream_queues = 0;
- int i;
+ int i, rc;
prop->max_queues = GAUDI2_QUEUE_ID_SIZE;
prop->hw_queues_props = kcalloc(prop->max_queues, sizeof(struct hw_queue_properties),
@@ -2391,6 +2434,9 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
}
q_props[i].cb_alloc_flags = CB_ALLOC_USER;
+
+ if (gaudi2_is_edma_queue_id(i))
+ q_props[i].dram_bd = 1;
}
q_props[GAUDI2_QUEUE_ID_CPU_PQ].type = QUEUE_TYPE_CPU;
@@ -2419,46 +2465,43 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->rotator_enabled_mask = BIT(NUM_OF_ROT) - 1;
- if (hdev->pldm)
- prop->mmu_pgt_size = 0x800000; /* 8MB */
- else
- prop->mmu_pgt_size = MMU_PAGE_TABLES_INITIAL_SIZE;
+ prop->max_asid = 2;
+ prop->dmmu.pgt_size = HMMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
- prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dmmu.hop_shifts[MMU_HOP0] = DHOP0_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP1] = DHOP1_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP2] = DHOP2_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP3] = DHOP3_SHIFT;
- prop->dmmu.hop_shifts[MMU_HOP4] = DHOP4_SHIFT;
prop->dmmu.hop_masks[MMU_HOP0] = DHOP0_MASK;
prop->dmmu.hop_masks[MMU_HOP1] = DHOP1_MASK;
prop->dmmu.hop_masks[MMU_HOP2] = DHOP2_MASK;
prop->dmmu.hop_masks[MMU_HOP3] = DHOP3_MASK;
- prop->dmmu.hop_masks[MMU_HOP4] = DHOP4_MASK;
prop->dmmu.page_size = PAGE_SIZE_1GB;
- prop->dmmu.num_hops = MMU_ARCH_6_HOPS;
+ prop->dmmu.num_hops = MMU_ARCH_4_HOPS;
prop->dmmu.last_mask = LAST_MASK;
- prop->dmmu.host_resident = 1;
- prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->dmmu.host_resident = 0;
+ prop->dmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->dmmu.hop0_tables_total_size = HOP_TABLE_SIZE_512_PTE * prop->max_asid;
- /*
- * this is done in order to be able to validate FW descriptor (i.e. validating that
- * the addresses and allocated space for FW image does not cross memory bounds).
- * for this reason we set the DRAM size to the minimum possible and later it will
- * be modified according to what reported in the cpucp info packet
+ /* As we need to set the pgt address in dram for HMMU init so we cannot
+ * wait to the fw cpucp info to set the dram props as mmu init comes before
+ * hw init
*/
- prop->dram_size = (GAUDI2_HBM_NUM - 1) * SZ_16G;
+ rc = hdev->asic_funcs->set_dram_properties(hdev);
+ if (rc)
+ goto free_qprops;
+ prop->mmu_pgt_size = PMMU_PAGE_TABLES_SIZE;
+
+ prop->pmmu.pgt_size = prop->mmu_pgt_size;
hdev->pmmu_huge_range = true;
prop->pmmu.host_resident = 1;
prop->pmmu.num_hops = MMU_ARCH_6_HOPS;
prop->pmmu.last_mask = LAST_MASK;
- prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->pmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->pmmu.hop0_tables_total_size = HOP_TABLE_SIZE_512_PTE * prop->max_asid;
prop->hints_host_reserved_va_range.start_addr = RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START;
prop->hints_host_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HOST_END;
@@ -2516,7 +2559,6 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->max_num_of_engines = GAUDI2_ENGINE_ID_SIZE;
prop->num_engine_cores = CPU_ID_MAX;
prop->cfg_size = CFG_SIZE;
- prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI2_EVENT_SIZE;
prop->supports_engine_modes = true;
@@ -2560,6 +2602,10 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
return 0;
+
+free_qprops:
+ kfree(prop->hw_queues_props);
+ return rc;
}
static int gaudi2_pci_bars_map(struct hl_device *hdev)
@@ -3033,6 +3079,25 @@ static int gaudi2_fetch_psoc_frequency(struct hl_device *hdev)
return 0;
}
+static int gaudi2_mmu_clear_pgt_range(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_MMU_MASK))
+ return 0;
+
+ if (prop->dmmu.host_resident)
+ return 0;
+
+ rc = gaudi2_memset_device_memory(hdev, prop->mmu_pgt_addr, prop->dmmu.pgt_size, 0);
+ if (rc)
+ dev_err(hdev->dev, "Failed to clear mmu pgt");
+
+ return rc;
+}
+
static int gaudi2_early_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -3258,6 +3323,12 @@ static int gaudi2_late_init(struct hl_device *hdev)
goto disable_pci_access;
}
+ rc = gaudi2_mmu_clear_pgt_range(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
+ goto disable_pci_access;
+ }
+
gaudi2_init_arcs(hdev);
rc = gaudi2_scrub_arcs_dccm(hdev);
@@ -3518,7 +3589,7 @@ static int gaudi2_special_blocks_config(struct hl_device *hdev)
int i, rc;
/* Configure Special blocks */
- prop->glbl_err_cause_num = GAUDI2_NUM_OF_GLBL_ERR_CAUSE;
+ prop->glbl_err_max_cause_num = GAUDI2_GLBL_ERR_MAX_CAUSE_NUM;
prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks);
prop->special_blocks = kmalloc_array(prop->num_of_special_blocks,
sizeof(*prop->special_blocks), GFP_KERNEL);
@@ -3697,13 +3768,7 @@ static int gaudi2_sw_init(struct hl_device *hdev)
spin_lock_init(&gaudi2->hw_queues_lock);
- gaudi2->scratchpad_kernel_address = hl_asic_dma_alloc_coherent(hdev, PAGE_SIZE,
- &gaudi2->scratchpad_bus_address,
- GFP_KERNEL | __GFP_ZERO);
- if (!gaudi2->scratchpad_kernel_address) {
- rc = -ENOMEM;
- goto free_virt_msix_db_mem;
- }
+ gaudi2->scratchpad_bus_address = prop->mmu_pgt_addr + HMMU_PAGE_TABLES_SIZE + EDMA_PQS_SIZE;
gaudi2_user_mapped_blocks_init(hdev);
@@ -3727,7 +3792,7 @@ static int gaudi2_sw_init(struct hl_device *hdev)
rc = gaudi2_special_blocks_iterator_config(hdev);
if (rc)
- goto free_scratchpad_mem;
+ goto free_virt_msix_db_mem;
rc = gaudi2_test_queues_msgs_alloc(hdev);
if (rc)
@@ -3737,9 +3802,6 @@ static int gaudi2_sw_init(struct hl_device *hdev)
special_blocks_free:
gaudi2_special_blocks_iterator_free(hdev);
-free_scratchpad_mem:
- hl_asic_dma_free_coherent(hdev, PAGE_SIZE, gaudi2->scratchpad_kernel_address,
- gaudi2->scratchpad_bus_address);
free_virt_msix_db_mem:
hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
free_cpu_accessible_dma_pool:
@@ -3770,9 +3832,6 @@ static int gaudi2_sw_fini(struct hl_device *hdev)
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
- hl_asic_dma_free_coherent(hdev, PAGE_SIZE, gaudi2->scratchpad_kernel_address,
- gaudi2->scratchpad_bus_address);
-
dma_pool_destroy(hdev->dma_pool);
kfree(gaudi2);
@@ -4254,6 +4313,8 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
if (gaudi2->hw_cap_initialized & HW_CAP_MSIX)
return 0;
+ hl_init_cpu_for_irq(hdev);
+
rc = pci_alloc_irq_vectors(hdev->pdev, GAUDI2_MSIX_ENTRIES, GAUDI2_MSIX_ENTRIES,
PCI_IRQ_MSIX);
if (rc < 0) {
@@ -4307,6 +4368,7 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
i++, j++, user_irq_init_cnt++) {
irq = pci_irq_vector(hdev->pdev, i);
+ hl_set_irq_affinity(hdev, irq);
rc = request_irq(irq, hl_irq_user_interrupt_handler, 0, gaudi2_irq_name(i),
&hdev->user_interrupt[j]);
if (rc) {
@@ -4333,6 +4395,7 @@ free_user_irq:
i < GAUDI2_IRQ_NUM_USER_FIRST + user_irq_init_cnt ; i++, j++) {
irq = pci_irq_vector(hdev->pdev, i);
+ irq_set_affinity_and_hint(irq, NULL);
free_irq(irq, &hdev->user_interrupt[j]);
}
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
@@ -4413,6 +4476,7 @@ static void gaudi2_disable_msix(struct hl_device *hdev)
k < hdev->asic_prop.user_interrupt_count ; i++, j++, k++) {
irq = pci_irq_vector(hdev->pdev, i);
+ irq_set_affinity_and_hint(irq, NULL);
free_irq(irq, &hdev->user_interrupt[j]);
}
@@ -4957,10 +5021,17 @@ static void gaudi2_init_qman_pq(struct hl_device *hdev, u32 reg_base,
q = &hdev->kernel_queues[queue_id_base + pq_id];
pq_offset = pq_id * 4;
- WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset,
- lower_32_bits(q->bus_address));
- WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset,
- upper_32_bits(q->bus_address));
+ if (q->dram_bd) {
+ WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset,
+ lower_32_bits(q->pq_dram_address));
+ WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset,
+ upper_32_bits(q->pq_dram_address));
+ } else {
+ WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset,
+ lower_32_bits(q->bus_address));
+ WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset,
+ upper_32_bits(q->bus_address));
+ }
WREG32(reg_base + QM_PQ_SIZE_0_OFFSET + pq_offset, ilog2(HL_QUEUE_LENGTH));
WREG32(reg_base + QM_PQ_PI_0_OFFSET + pq_offset, 0);
WREG32(reg_base + QM_PQ_CI_0_OFFSET + pq_offset, 0);
@@ -5847,7 +5918,8 @@ static int gaudi2_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_har
return rc;
}
-static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
+static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base,
+ bool host_resident_pgt)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 hop0_addr;
@@ -5859,7 +5931,11 @@ static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
max_asid = min((u32) 8, max_asid);
for (asid = 0 ; asid < max_asid ; asid++) {
- hop0_addr = hdev->mmu_priv.hr.mmu_asid_hop0[asid].phys_addr;
+ if (host_resident_pgt)
+ hop0_addr = hdev->mmu_priv.hr.mmu_asid_hop0[asid].phys_addr;
+ else
+ hop0_addr = prop->mmu_pgt_addr + (asid * prop->dmmu.hop_table_size);
+
rc = gaudi2_mmu_update_asid_hop0_addr(hdev, stlb_base, asid, hop0_addr);
if (rc) {
dev_err(hdev->dev, "failed to set hop0 addr for asid %d\n", asid);
@@ -5870,7 +5946,8 @@ static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
return 0;
}
-static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb_base)
+static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb_base,
+ bool host_resident_pgt)
{
u32 status, timeout_usec;
int rc;
@@ -5893,7 +5970,7 @@ static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb
if (rc)
dev_notice_ratelimited(hdev->dev, "Timeout when waiting for MMU SRAM init\n");
- rc = gaudi2_mmu_update_hop0_addr(hdev, stlb_base);
+ rc = gaudi2_mmu_update_hop0_addr(hdev, stlb_base, host_resident_pgt);
if (rc)
return rc;
@@ -5917,6 +5994,7 @@ static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb
static int gaudi2_pci_mmu_init(struct hl_device *hdev)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 mmu_base, stlb_base;
int rc;
@@ -5956,7 +6034,7 @@ static int gaudi2_pci_mmu_init(struct hl_device *hdev)
WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_PMMU_SPI_SEI_ENABLE_MASK);
- rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
+ rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base, prop->pmmu.host_resident);
if (rc)
return rc;
@@ -6008,7 +6086,7 @@ static int gaudi2_dcore_hmmu_init(struct hl_device *hdev, int dcore_id,
WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_HMMU_SPI_SEI_ENABLE_MASK);
- rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
+ rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base, prop->dmmu.host_resident);
if (rc)
return rc;
@@ -7046,7 +7124,7 @@ static int gaudi2_test_queues(struct hl_device *hdev)
/* send test message on all enabled Qs */
for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) {
- if (!gaudi2_is_queue_enabled(hdev, i))
+ if (!gaudi2_is_queue_enabled(hdev, i) || gaudi2_is_edma_queue_id(i))
continue;
msg_info = &gaudi2->queues_test_info[i - GAUDI2_QUEUE_ID_PDMA_0_0];
@@ -7063,7 +7141,7 @@ static int gaudi2_test_queues(struct hl_device *hdev)
/* verify that all messages were processed */
for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) {
- if (!gaudi2_is_queue_enabled(hdev, i))
+ if (!gaudi2_is_queue_enabled(hdev, i) || gaudi2_is_edma_queue_id(i))
continue;
rc = gaudi2_test_queue_wait_completion(hdev, i, sob_val);
@@ -8907,9 +8985,6 @@ static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_typ
u32 error_count = 0;
int i;
- gaudi2_print_event(hdev, event_type, true,
- "intr_cause_data: %#llx", intr_cause_data);
-
for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) {
if (!(intr_cause_data & BIT_ULL(i)))
continue;
@@ -8918,15 +8993,16 @@ static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_typ
"err cause: %s", gaudi2_pcie_addr_dec_error_cause[i]);
error_count++;
- /*
- * Always check for LBW and HBW additional info as the indication itself is
- * sometimes missing
- */
+ switch (intr_cause_data & BIT_ULL(i)) {
+ case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK:
+ hl_check_for_glbl_errors(hdev);
+ break;
+ case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK:
+ gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask);
+ break;
+ }
}
- hl_check_for_glbl_errors(hdev);
- gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask);
-
return error_count;
}
@@ -8983,7 +9059,6 @@ static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool
if (is_pmmu) {
dev_err_ratelimited(hdev->dev, "PMMU page fault on va 0x%llx\n", addr);
} else {
-
addr = gaudi2_mmu_descramble_addr(hdev, addr);
addr &= HW_UNSCRAMBLED_BITS_MASK;
dev_err_ratelimited(hdev->dev, "HMMU page fault on va range 0x%llx - 0x%llx\n",
@@ -9514,25 +9589,17 @@ static int gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev, u16 event_type)
static int gaudi2_handle_pcie_drain(struct hl_device *hdev,
struct hl_eq_pcie_drain_ind_data *drain_data)
{
- u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause, error_count = 0;
+ u64 cause, error_count = 0;
cause = le64_to_cpu(drain_data->intr_cause.intr_cause_data);
- lbw_rd = le64_to_cpu(drain_data->drain_rd_addr_lbw);
- lbw_wr = le64_to_cpu(drain_data->drain_wr_addr_lbw);
- hbw_rd = le64_to_cpu(drain_data->drain_rd_addr_hbw);
- hbw_wr = le64_to_cpu(drain_data->drain_wr_addr_hbw);
if (cause & BIT_ULL(0)) {
- dev_err_ratelimited(hdev->dev,
- "PCIE AXI drain LBW completed, read_err %u, write_err %u\n",
- !!lbw_rd, !!lbw_wr);
+ dev_err_ratelimited(hdev->dev, "PCIE AXI drain LBW completed\n");
error_count++;
}
if (cause & BIT_ULL(1)) {
- dev_err_ratelimited(hdev->dev,
- "PCIE AXI drain HBW completed, raddr %#llx, waddr %#llx\n",
- hbw_rd, hbw_wr);
+ dev_err_ratelimited(hdev->dev, "PCIE AXI drain HBW completed\n");
error_count++;
}
@@ -10250,11 +10317,11 @@ reset_device:
}
static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
- struct packet_lin_dma *lin_dma_pkt, dma_addr_t pkt_dma_addr,
- u32 hw_queue_id, u32 size, u64 addr, u32 val)
+ struct packet_lin_dma *lin_dma_pkt,
+ u64 phys_addr, u32 hw_queue_id, u32 size, u64 addr, u32 val)
{
u32 ctl, pkt_size;
- int rc = 0;
+ int rc = 0, i;
ctl = FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
ctl |= FIELD_PREP(GAUDI2_PKT_LIN_DMA_CTL_MEMSET_MASK, 1);
@@ -10268,9 +10335,20 @@ static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
pkt_size = sizeof(struct packet_lin_dma);
- rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, pkt_dma_addr);
+ for (i = 0; i < 3; i++) {
+ rc = hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM,
+ phys_addr + (i * sizeof(u64)),
+ ((u64 *)(lin_dma_pkt)) + i, DEBUGFS_WRITE64);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to copy lin_dma packet to HBM (%#llx)\n",
+ phys_addr);
+ return rc;
+ }
+ }
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, phys_addr);
if (rc)
- dev_err(hdev->dev, "Failed to send lin dma packet to H/W queue %d\n",
+ dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %d\n",
hw_queue_id);
return rc;
@@ -10283,12 +10361,11 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz
GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0};
u32 chunk_size, dcore, edma_idx, sob_offset, sob_addr, comp_val,
- old_mmubp, mmubp, num_of_pkts, busy, pkt_size;
+ old_mmubp, mmubp, num_of_pkts, busy, pkt_size, cb_len;
u64 comp_addr, cur_addr = addr, end_addr = addr + size;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc = 0, dma_num = 0, i;
void *lin_dma_pkts_arr;
- dma_addr_t pkt_dma_addr;
- int rc = 0, dma_num = 0;
if (prop->edma_enabled_mask == 0) {
dev_info(hdev->dev, "non of the EDMA engines is enabled - skip dram scrubbing\n");
@@ -10306,9 +10383,19 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz
/* Calculate how many lin dma pkts we'll need */
num_of_pkts = div64_u64(round_up(size, SZ_2G), SZ_2G);
pkt_size = sizeof(struct packet_lin_dma);
+ cb_len = pkt_size * num_of_pkts;
+
+ /*
+ * if we're not scrubing HMMU or NIC reserved sections in hbm,
+ * then it the scrubing of the user section, as we use the start of the user section
+ * to store the CB of the EDMA QM, so shift the start address of the scrubbing accordingly
+ * and scrub the CB section before leaving this function.
+ */
+ if ((addr >= prop->dram_user_base_address) &&
+ (addr < prop->dram_user_base_address + cb_len))
+ cur_addr += (prop->dram_user_base_address + cb_len) - addr;
- lin_dma_pkts_arr = hl_asic_dma_alloc_coherent(hdev, pkt_size * num_of_pkts,
- &pkt_dma_addr, GFP_KERNEL);
+ lin_dma_pkts_arr = kvcalloc(num_of_pkts, pkt_size, GFP_KERNEL);
if (!lin_dma_pkts_arr)
return -ENOMEM;
@@ -10354,7 +10441,7 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz
rc = gaudi2_memset_memory_chunk_using_edma_qm(hdev,
(struct packet_lin_dma *)lin_dma_pkts_arr + dma_num,
- pkt_dma_addr + dma_num * pkt_size,
+ prop->dram_user_base_address + (dma_num * pkt_size),
edma_queues_id[dcore] + edma_idx * 4,
chunk_size, cur_addr, val);
if (rc)
@@ -10363,14 +10450,16 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz
dma_num++;
cur_addr += chunk_size;
if (cur_addr == end_addr)
- break;
+ goto edma_wait;
}
}
}
+edma_wait:
rc = hl_poll_timeout(hdev, sob_addr, busy, (busy == dma_num), 1000, 1000000);
if (rc) {
- dev_err(hdev->dev, "DMA Timeout during HBM scrubbing\n");
+ dev_err(hdev->dev, "DMA Timeout during HBM scrubbing(sob: 0x%x, dma_num: 0x%x)\n",
+ busy, dma_num);
goto end;
}
end:
@@ -10391,8 +10480,16 @@ end:
}
}
+ memset(lin_dma_pkts_arr, 0, sizeof(u64));
+
+ /* Zero the HBM area where we copied the CB */
+ for (i = 0; i < cb_len / sizeof(u64); i += sizeof(u64))
+ rc = hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM,
+ prop->dram_user_base_address + i,
+ (u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
WREG32(sob_addr, 0);
- hl_asic_dma_free_coherent(hdev, pkt_size * num_of_pkts, lin_dma_pkts_arr, pkt_dma_addr);
+
+ kfree(lin_dma_pkts_arr);
return rc;
}
@@ -11450,7 +11547,7 @@ static int gaudi2_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_p
return 0;
page_size_err:
- dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
+ dev_err(hdev->dev, "page size of 0x%X is not 0x%X aligned, can't map\n",
page_size, mmu_prop->page_size >> 10);
return -EFAULT;
}
@@ -11470,6 +11567,29 @@ int gaudi2_send_device_activity(struct hl_device *hdev, bool open)
return hl_fw_send_device_activity(hdev, open);
}
+static u64 gaudi2_read_pte(struct hl_device *hdev, u64 addr)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u64 val;
+
+ if (hdev->reset_info.hard_reset_pending)
+ return U64_MAX;
+
+ val = readq(hdev->pcie_bar[DRAM_BAR_ID] + (addr - gaudi2->dram_bar_cur_addr));
+
+ return val;
+}
+
+static void gaudi2_write_pte(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (hdev->reset_info.hard_reset_pending)
+ return;
+
+ writeq(val, hdev->pcie_bar[DRAM_BAR_ID] + (addr - gaudi2->dram_bar_cur_addr));
+}
+
static const struct hl_asic_funcs gaudi2_funcs = {
.early_init = gaudi2_early_init,
.early_fini = gaudi2_early_fini,
@@ -11506,8 +11626,8 @@ static const struct hl_asic_funcs gaudi2_funcs = {
.add_device_attr = gaudi2_add_device_attr,
.handle_eqe = gaudi2_handle_eqe,
.get_events_stat = gaudi2_get_events_stat,
- .read_pte = NULL,
- .write_pte = NULL,
+ .read_pte = gaudi2_read_pte,
+ .write_pte = gaudi2_write_pte,
.mmu_invalidate_cache = gaudi2_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi2_mmu_invalidate_cache_range,
.mmu_prefetch_cache_range = NULL,
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index 9b9eef0d97d6..eee41387b269 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -19,8 +19,6 @@
#define GAUDI2_LINUX_FW_FILE "habanalabs/gaudi2/gaudi2-fit.itb"
#define GAUDI2_BOOT_FIT_FILE "habanalabs/gaudi2/gaudi2-boot-fit.itb"
-#define MMU_PAGE_TABLES_INITIAL_SIZE 0x10000000 /* 256MB */
-
#define GAUDI2_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define NUMBER_OF_PDMA_QUEUES 2
@@ -109,13 +107,11 @@
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
-
-/* This define should be used only when working in a debug mode without dram.
- * When working with dram, the driver size will be calculated dynamically.
- */
-#define NIC_DEFAULT_DRV_SIZE 0x20000000 /* 512MB */
-
#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
+#define PMMU_PAGE_TABLES_SIZE 0x10000000 /* 256MB */
+#define EDMA_PQS_SIZE SZ_2M
+#define EDMA_SCRATCHPAD_SIZE SZ_1M
+#define HMMU_PAGE_TABLES_SIZE SZ_1M
#define NIC_NUMBER_OF_PORTS NIC_NUMBER_OF_ENGINES
@@ -241,9 +237,8 @@
#define GAUDI2_SOB_INCREMENT_BY_ONE (FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1))
-#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
+#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
-#define GAUDI2_NUM_OF_GLBL_ERR_CAUSE 8
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
diff --git a/drivers/accel/habanalabs/goya/goya.c b/drivers/accel/habanalabs/goya/goya.c
index 1322cb330c57..5a359c3bdc78 100644
--- a/drivers/accel/habanalabs/goya/goya.c
+++ b/drivers/accel/habanalabs/goya/goya.c
@@ -413,8 +413,6 @@ int goya_set_fixed_properties(struct hl_device *hdev)
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
- prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = true;
@@ -435,8 +433,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
prop->dmmu.last_mask = LAST_MASK;
/* TODO: will be duplicated until implementing per-MMU props */
- prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->dmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->dmmu.hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
/* shifts and masks are the same in PMMU and DMMU */
memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
@@ -446,8 +444,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
prop->pmmu.last_mask = LAST_MASK;
/* TODO: will be duplicated until implementing per-MMU props */
- prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->pmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->pmmu.hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -2678,7 +2676,7 @@ int goya_mmu_init(struct hl_device *hdev)
for (i = 0 ; i < prop->max_asid ; i++) {
hop0_addr = prop->mmu_pgt_addr +
- (i * prop->mmu_hop_table_size);
+ (i * prop->dmmu.hop_table_size);
rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
if (rc) {
diff --git a/drivers/accel/habanalabs/goya/goya_coresight.c b/drivers/accel/habanalabs/goya/goya_coresight.c
index 41cae5fd843b..3827ea4c02f7 100644
--- a/drivers/accel/habanalabs/goya/goya_coresight.c
+++ b/drivers/accel/habanalabs/goya/goya_coresight.c
@@ -576,7 +576,6 @@ static int goya_config_spmu(struct hl_device *hdev,
struct hl_debug_params *params)
{
u64 base_reg;
- struct hl_debug_params_spmu *input = params->input;
u64 *output;
u32 output_arr_len;
u32 events_num;
@@ -592,7 +591,7 @@ static int goya_config_spmu(struct hl_device *hdev,
base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
if (params->enable) {
- input = params->input;
+ struct hl_debug_params_spmu *input = params->input;
if (!input)
return -EINVAL;
diff --git a/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
index d408feecd483..b4a5e95be354 100644
--- a/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -26,6 +26,8 @@
#define LAST_MASK 0x0000000000800ull
#define FLAGS_MASK 0x0000000000FFFull
+#define MMU_ARCH_3_HOPS 3
+#define MMU_ARCH_4_HOPS 4
#define MMU_ARCH_5_HOPS 5
#define MMU_ARCH_6_HOPS 6
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 7cb962e21453..d09d29775b3f 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -287,22 +287,6 @@ static const struct file_operations fw_trace_level_fops = {
};
static ssize_t
-ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
-{
- struct ivpu_device *vdev = file->private_data;
-
- if (!size)
- return -EINVAL;
-
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
- return -ENODEV;
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
- return -ENODEV;
-
- return size;
-}
-
-static ssize_t
ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
@@ -327,6 +311,22 @@ static const struct file_operations ivpu_force_recovery_fops = {
.write = ivpu_force_recovery_fn,
};
+static ssize_t
+ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+
+ if (!size)
+ return -EINVAL;
+
+ if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
+ return -ENODEV;
+ if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
+ return -ENODEV;
+
+ return size;
+}
+
static const struct file_operations ivpu_reset_engine_fops = {
.owner = THIS_MODULE,
.open = simple_open,
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 4b0640226986..39f6d1b98fd6 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -45,11 +45,11 @@ MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
u8 ivpu_pll_min_ratio;
module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
-MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
+MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
-MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
+MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
@@ -328,13 +328,13 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
ivpu_ipc_consumer_del(vdev, &cons);
if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
- ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
+ ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
ipc_hdr.data_addr);
return -EIO;
}
if (!ret)
- ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
+ ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
return ret;
}
@@ -532,6 +532,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
atomic64_set(&vdev->unique_id_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
@@ -605,6 +606,7 @@ err_power_down:
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
err_xa_destroy:
+ xa_destroy(&vdev->db_xa);
xa_destroy(&vdev->submitted_jobs_xa);
xa_destroy(&vdev->context_xa);
return ret;
@@ -640,6 +642,8 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
ivpu_mmu_reserved_context_fini(vdev);
ivpu_mmu_global_context_fini(vdev);
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
+ xa_destroy(&vdev->db_xa);
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
xa_destroy(&vdev->submitted_jobs_xa);
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 069ace4adb2d..7be0500d9bb8 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -36,6 +36,9 @@
#define IVPU_USER_CONTEXT_MIN_SSID 2
#define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 63)
+#define IVPU_MIN_DB 1
+#define IVPU_MAX_DB 255
+
#define IVPU_NUM_ENGINES 2
#define IVPU_PLATFORM_SILICON 0
@@ -119,6 +122,8 @@ struct ivpu_device {
struct xarray context_xa;
struct xa_limit context_xa_limit;
+ struct xarray db_xa;
+
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
@@ -189,7 +194,7 @@ static inline int ivpu_hw_gen(struct ivpu_device *vdev)
case PCI_DEVICE_ID_LNL:
return IVPU_HW_40XX;
default:
- ivpu_err(vdev, "Unknown VPU device\n");
+ ivpu_err(vdev, "Unknown NPU device\n");
return 0;
}
}
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 5fa8bd4603d5..1457300828bf 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -46,15 +46,13 @@
static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
-MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
+MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
-/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
static struct {
int gen;
const char *name;
} fw_names[] = {
{ IVPU_HW_37XX, "vpu_37xx.bin" },
- { IVPU_HW_37XX, "mtl_vpu.bin" },
{ IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
{ IVPU_HW_40XX, "vpu_40xx.bin" },
{ IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
@@ -250,6 +248,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
+ struct ivpu_addr_range fw_range;
int log_verb_size;
int ret;
@@ -257,16 +256,19 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
if (ret)
return ret;
- fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
+ fw_range.start = fw->runtime_addr;
+ fw_range.end = fw->runtime_addr + fw->runtime_size;
+ fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem) {
- ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
+ ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n");
return -ENOMEM;
}
- fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE,
- DRM_IVPU_BO_CACHED);
+ fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE,
+ DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem_log_crit) {
- ivpu_err(vdev, "Failed to allocate critical log buffer\n");
+ ivpu_err(vdev, "Failed to create critical log buffer\n");
ret = -ENOMEM;
goto err_free_fw_mem;
}
@@ -276,18 +278,19 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
else
log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
- fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED);
+ fw->mem_log_verb = ivpu_bo_create_global(vdev, log_verb_size,
+ DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem_log_verb) {
- ivpu_err(vdev, "Failed to allocate verbose log buffer\n");
+ ivpu_err(vdev, "Failed to create verbose log buffer\n");
ret = -ENOMEM;
goto err_free_log_crit;
}
if (fw->shave_nn_size) {
- fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
- fw->shave_nn_size, DRM_IVPU_BO_WC);
+ fw->mem_shave_nn = ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.shave,
+ fw->shave_nn_size, DRM_IVPU_BO_WC);
if (!fw->mem_shave_nn) {
- ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
+ ivpu_err(vdev, "Failed to create shavenn buffer\n");
ret = -ENOMEM;
goto err_free_log_verb;
}
@@ -296,11 +299,11 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
return 0;
err_free_log_verb:
- ivpu_bo_free_internal(fw->mem_log_verb);
+ ivpu_bo_free(fw->mem_log_verb);
err_free_log_crit:
- ivpu_bo_free_internal(fw->mem_log_crit);
+ ivpu_bo_free(fw->mem_log_crit);
err_free_fw_mem:
- ivpu_bo_free_internal(fw->mem);
+ ivpu_bo_free(fw->mem);
return ret;
}
@@ -309,13 +312,13 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
struct ivpu_fw_info *fw = vdev->fw;
if (fw->mem_shave_nn) {
- ivpu_bo_free_internal(fw->mem_shave_nn);
+ ivpu_bo_free(fw->mem_shave_nn);
fw->mem_shave_nn = NULL;
}
- ivpu_bo_free_internal(fw->mem_log_verb);
- ivpu_bo_free_internal(fw->mem_log_crit);
- ivpu_bo_free_internal(fw->mem);
+ ivpu_bo_free(fw->mem_log_verb);
+ ivpu_bo_free(fw->mem_log_crit);
+ ivpu_bo_free(fw->mem);
fw->mem_log_verb = NULL;
fw->mem_log_crit = NULL;
@@ -469,6 +472,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->d0i3_residency_time_us);
ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
boot_params->d0i3_entry_vpu_ts);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
+ boot_params->system_time_us);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
@@ -480,11 +485,14 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_residency_time_us =
ktime_us_delta(ktime_get_boottime(), vdev->hw->d0i3_entry_host_ts);
boot_params->d0i3_entry_vpu_ts = vdev->hw->d0i3_entry_vpu_ts;
+ boot_params->system_time_us = ktime_to_us(ktime_get_real());
ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n",
boot_params->d0i3_residency_time_us);
ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
boot_params->d0i3_entry_vpu_ts);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
+ boot_params->system_time_us);
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
@@ -562,6 +570,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_residency_time_us = 0;
boot_params->d0i3_entry_vpu_ts = 0;
+ boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
ivpu_fw_boot_params_print(vdev, boot_params);
diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c
index f6770f5e82a2..ef0adb5e0fbe 100644
--- a/drivers/accel/ivpu/ivpu_fw_log.c
+++ b/drivers/accel/ivpu/ivpu_fw_log.c
@@ -20,7 +20,7 @@
unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
module_param(ivpu_log_level, uint, 0444);
MODULE_PARM_DESC(ivpu_log_level,
- "VPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
+ "NPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
" info=" __stringify(IVPU_FW_LOG_INFO)
" warn=" __stringify(IVPU_FW_LOG_WARN)
" error=" __stringify(IVPU_FW_LOG_ERROR)
@@ -121,11 +121,11 @@ void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_
u32 next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "VPU critical", only_new_msgs, p);
+ fw_log_print_buffer(vdev, log_header, "NPU critical", only_new_msgs, p);
next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "VPU verbose", only_new_msgs, p);
+ fw_log_print_buffer(vdev, log_header, "NPU verbose", only_new_msgs, p);
}
void ivpu_fw_log_clear(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index e9ddbe9f50eb..1b409dbd332d 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -172,8 +172,7 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
return &bo->base.base;
}
-static struct ivpu_bo *
-ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
+static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
struct ivpu_bo *bo;
@@ -201,7 +200,7 @@ ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
return bo;
}
-static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
+static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
@@ -224,7 +223,7 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
return ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, range);
}
-static void ivpu_bo_free(struct drm_gem_object *obj)
+static void ivpu_gem_bo_free(struct drm_gem_object *obj)
{
struct ivpu_device *vdev = to_ivpu_device(obj->dev);
struct ivpu_bo *bo = to_ivpu_bo(obj);
@@ -245,8 +244,8 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
}
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
- .free = ivpu_bo_free,
- .open = ivpu_bo_open,
+ .free = ivpu_gem_bo_free,
+ .open = ivpu_gem_bo_open,
.print_info = drm_gem_shmem_object_print_info,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
@@ -272,9 +271,9 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (size == 0)
return -EINVAL;
- bo = ivpu_bo_create(vdev, size, args->flags);
+ bo = ivpu_bo_alloc(vdev, size, args->flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
+ ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
@@ -289,33 +288,28 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
}
struct ivpu_bo *
-ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags)
+ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ struct ivpu_addr_range *range, u64 size, u32 flags)
{
- const struct ivpu_addr_range *range;
- struct ivpu_addr_range fixed_range;
struct iosys_map map;
struct ivpu_bo *bo;
int ret;
- drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
- drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
+ if (drm_WARN_ON(&vdev->drm, !range))
+ return NULL;
- if (vpu_addr) {
- fixed_range.start = vpu_addr;
- fixed_range.end = vpu_addr + size;
- range = &fixed_range;
- } else {
- range = &vdev->hw->ranges.global;
- }
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->start));
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_create(vdev, size, flags);
+ bo = ivpu_bo_alloc(vdev, size, flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
- bo, vpu_addr, size, flags);
+ ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ bo, range->start, size, flags);
return NULL;
}
- ret = ivpu_bo_alloc_vpu_addr(bo, &vdev->gctx, range);
+ ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range);
if (ret)
goto err_put;
@@ -323,11 +317,14 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
if (ret)
goto err_put;
- dma_resv_lock(bo->base.base.resv, NULL);
- ret = drm_gem_shmem_vmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
- if (ret)
- goto err_put;
+ if (flags & DRM_IVPU_BO_MAPPABLE) {
+ dma_resv_lock(bo->base.base.resv, NULL);
+ ret = drm_gem_shmem_vmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
+
+ if (ret)
+ goto err_put;
+ }
return bo;
@@ -336,13 +333,20 @@ err_put:
return NULL;
}
-void ivpu_bo_free_internal(struct ivpu_bo *bo)
+struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
+{
+ return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags);
+}
+
+void ivpu_bo_free(struct ivpu_bo *bo)
{
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
- dma_resv_lock(bo->base.base.resv, NULL);
- drm_gem_shmem_vunmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
+ if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
+ dma_resv_lock(bo->base.base.resv, NULL);
+ drm_gem_shmem_vunmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
+ }
drm_gem_object_put(&bo->base.base);
}
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index a8559211c70d..fb7117c13eec 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -28,8 +28,10 @@ int ivpu_bo_pin(struct ivpu_bo *bo);
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
-struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
-void ivpu_bo_free_internal(struct ivpu_bo *bo);
+struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ struct ivpu_addr_range *range, u64 size, u32 flags);
+struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
+void ivpu_bo_free(struct ivpu_bo *bo);
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 89af1006df55..9a0c9498baba 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -13,7 +13,7 @@
#include "ivpu_pm.h"
#define TILE_FUSE_ENABLE_BOTH 0x0
-#define TILE_SKU_BOTH_MTL 0x3630
+#define TILE_SKU_BOTH 0x3630
/* Work point configuration values */
#define CONFIG_1_TILE 0x01
@@ -228,7 +228,7 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
if (ret) {
- ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
+ ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
return ret;
}
}
@@ -589,7 +589,7 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
struct ivpu_hw_info *hw = vdev->hw;
hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
- hw->sku = TILE_SKU_BOTH_MTL;
+ hw->sku = TILE_SKU_BOTH;
hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
ivpu_pll_init_frequency_ratios(vdev);
@@ -762,10 +762,10 @@ static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev);
if (!ivpu_hw_37xx_is_idle(vdev))
- ivpu_warn(vdev, "VPU not idle during power down\n");
+ ivpu_warn(vdev, "NPU not idle during power down\n");
if (ivpu_hw_37xx_reset(vdev)) {
- ivpu_err(vdev, "Failed to reset VPU\n");
+ ivpu_err(vdev, "Failed to reset NPU\n");
ret = -EIO;
}
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index a1523d0b1ef3..e4eddbf5d11c 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -80,11 +80,11 @@ static char *ivpu_platform_to_str(u32 platform)
{
switch (platform) {
case IVPU_PLATFORM_SILICON:
- return "IVPU_PLATFORM_SILICON";
+ return "SILICON";
case IVPU_PLATFORM_SIMICS:
- return "IVPU_PLATFORM_SIMICS";
+ return "SIMICS";
case IVPU_PLATFORM_FPGA:
- return "IVPU_PLATFORM_FPGA";
+ return "FPGA";
default:
return "Invalid platform";
}
@@ -768,7 +768,7 @@ static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
int ret = 0;
if (ivpu_hw_40xx_ip_reset(vdev)) {
- ivpu_err(vdev, "Failed to reset VPU IP\n");
+ ivpu_err(vdev, "Failed to reset NPU IP\n");
ret = -EIO;
}
@@ -926,7 +926,7 @@ static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
- ivpu_warn(vdev, "Failed to reset the VPU\n");
+ ivpu_warn(vdev, "Failed to reset the NPU\n");
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index fa66c39b57ec..04ac4b9840fb 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -58,8 +58,8 @@ static void ivpu_ipc_mem_fini(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
- ivpu_bo_free_internal(ipc->mem_rx);
- ivpu_bo_free_internal(ipc->mem_tx);
+ ivpu_bo_free(ipc->mem_rx);
+ ivpu_bo_free(ipc->mem_tx);
}
static int
@@ -471,13 +471,13 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
struct ivpu_ipc_info *ipc = vdev->ipc;
int ret;
- ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
+ ipc->mem_tx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!ipc->mem_tx) {
ivpu_err(vdev, "Failed to allocate mem_tx\n");
return -ENOMEM;
}
- ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
+ ipc->mem_rx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!ipc->mem_rx) {
ivpu_err(vdev, "Failed to allocate mem_rx\n");
ret = -ENOMEM;
@@ -506,9 +506,9 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
return 0;
err_free_rx:
- ivpu_bo_free_internal(ipc->mem_rx);
+ ivpu_bo_free(ipc->mem_rx);
err_free_tx:
- ivpu_bo_free_internal(ipc->mem_tx);
+ ivpu_bo_free(ipc->mem_tx);
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index e70cfb859339..a49bc9105ed0 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -30,19 +30,26 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
{
+ struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
struct ivpu_device *vdev = file_priv->vdev;
struct vpu_job_queue_header *jobq_header;
struct ivpu_cmdq *cmdq;
+ int ret;
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
return NULL;
- cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC);
+ ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
+ goto err_free_cmdq;
+ }
+
+ cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
- goto cmdq_free;
+ goto err_erase_xa;
- cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev);
cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
sizeof(struct vpu_job_queue_entry));
@@ -55,7 +62,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 e
return cmdq;
-cmdq_free:
+err_erase_xa:
+ xa_erase(&vdev->db_xa, cmdq->db_id);
+err_free_cmdq:
kfree(cmdq);
return NULL;
}
@@ -65,7 +74,8 @@ static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *c
if (!cmdq)
return;
- ivpu_bo_free_internal(cmdq->mem);
+ ivpu_bo_free(cmdq->mem);
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
kfree(cmdq);
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 5f73854234ba..9973945dff5d 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -22,7 +22,7 @@
static bool ivpu_disable_recovery;
module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
-MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected");
+MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected");
static unsigned long ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
@@ -118,11 +118,11 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
int ret;
- ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+ ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
ret = pm_runtime_resume_and_get(vdev->drm.dev);
if (ret)
- ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
ivpu_fw_log_dump(vdev);
@@ -260,10 +260,10 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
ret = ivpu_suspend(vdev);
if (ret)
- ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
+ ivpu_err(vdev, "Failed to suspend NPU: %d\n", ret);
if (!hw_is_idle) {
- ivpu_err(vdev, "VPU failed to enter idle, force suspended.\n");
+ ivpu_err(vdev, "NPU failed to enter idle, force suspended.\n");
ivpu_fw_log_dump(vdev);
ivpu_pm_prepare_cold_boot(vdev);
} else {
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 04c954258563..87cac7bc730a 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (c) 2020-2023, Intel Corporation.
*/
#ifndef VPU_BOOT_API_H
@@ -27,12 +27,12 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 20
+#define VPU_BOOT_API_VER_MINOR 22
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_BOOT_API_VER_PATCH 4
+#define VPU_BOOT_API_VER_PATCH 0
/*
* Index in the API version table
@@ -41,7 +41,7 @@
#define VPU_BOOT_API_VER_INDEX 0
/* ------------ FW API version information end ---------------------*/
-#pragma pack(push, 1)
+#pragma pack(push, 4)
/*
* Firmware image header format
@@ -66,9 +66,17 @@ struct vpu_firmware_header {
/* Size of memory require for firmware execution */
u32 runtime_size;
u32 shave_nn_fw_size;
- /* Size of primary preemption buffer. */
+ /*
+ * Size of primary preemption buffer, assuming a 2-job submission queue.
+ * NOTE: host driver is expected to adapt size accordingly to actual
+ * submission queue size and device capabilities.
+ */
u32 preemption_buffer_1_size;
- /* Size of secondary preemption buffer. */
+ /*
+ * Size of secondary preemption buffer, assuming a 2-job submission queue.
+ * NOTE: host driver is expected to adapt size accordingly to actual
+ * submission queue size and device capabilities.
+ */
u32 preemption_buffer_2_size;
/* Space reserved for future preemption-related fields. */
u32 preemption_reserved[6];
@@ -181,10 +189,10 @@ struct vpu_warm_boot_section {
#define VPU_PRESENT_CALL_PERIOD_MS_MAX 10000
/**
- * Macros to enable various operation modes within the VPU.
+ * Macros to enable various power profiles within the NPU.
* To be defined as part of 32 bit mask.
*/
-#define VPU_OP_MODE_SURVIVABILITY 0x1
+#define POWER_PROFILE_SURVIVABILITY 0x1
struct vpu_boot_params {
u32 magic;
@@ -317,7 +325,15 @@ struct vpu_boot_params {
u64 d0i3_residency_time_us;
/* Value of VPU perf counter at the time of entering D0i3 state . */
u64 d0i3_entry_vpu_ts;
- u32 pad4[20];
+ /*
+ * The system time of the host operating system in microseconds.
+ * E.g the number of microseconds since 1st of January 1970, or whatever date the
+ * host operating system uses to maintain system time.
+ * This value will be used to track system time on the VPU.
+ * The KMD is required to update this value on every VPU reset.
+ */
+ u64 system_time_us;
+ u32 pad4[18];
/* Warm boot information: 0x400 - 0x43F */
u32 warm_boot_sections_count;
u32 warm_boot_start_address_reference;
@@ -344,10 +360,14 @@ struct vpu_boot_params {
u32 vpu_focus_present_timer_ms;
/* VPU ECC Signaling */
u32 vpu_uses_ecc_mca_signal;
- /* Values defined by VPU_OP_MODE* macros */
- u32 vpu_operation_mode;
- /* Unused/reserved: 0x480 - 0xFFF */
- u32 pad6[736];
+ /* Values defined by POWER_PROFILE* macros */
+ u32 power_profile;
+ /* Microsecond value for DCT active cycle */
+ u32 dct_active_us;
+ /* Microsecond value for DCT inactive cycle */
+ u32 dct_inactive_us;
+ /* Unused/reserved: 0x488 - 0xFFF */
+ u32 pad6[734];
};
/*
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 7da7622742be..e46f3531211a 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (c) 2020-2023, Intel Corporation.
*/
/**
@@ -27,7 +27,7 @@
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_JSM_API_VER_PATCH 0
+#define VPU_JSM_API_VER_PATCH 6
/*
* Index in the API version table
@@ -43,8 +43,11 @@
/* Max number of impacted contexts that can be dealt with the engine reset command */
#define VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS 3
-/** Pack the API structures for now, once alignment issues are fixed this can be removed */
-#pragma pack(push, 1)
+/*
+ * Pack the API structures to enforce binary compatibility
+ * Align to 8 bytes for optimal performance
+ */
+#pragma pack(push, 8)
/*
* Engine indexes.
@@ -125,6 +128,19 @@
#define VPU_HWS_MAX_REALTIME_PRIORITY_LEVEL 31U
/*
+ * vpu_jsm_engine_reset_context flag definitions
+ */
+#define VPU_ENGINE_RESET_CONTEXT_FLAG_COLLATERAL_DAMAGE_MASK BIT(0)
+#define VPU_ENGINE_RESET_CONTEXT_HANG_PRIMARY_CAUSE 0
+#define VPU_ENGINE_RESET_CONTEXT_COLLATERAL_DAMAGE 1
+
+/*
+ * Invalid command queue handle identifier. Applies to cmdq_id and cmdq_group
+ * in this API.
+ */
+#define VPU_HWS_INVALID_CMDQ_HANDLE 0ULL
+
+/*
* Job format.
*/
struct vpu_job_queue_entry {
@@ -613,7 +629,7 @@ struct vpu_jsm_engine_reset_context {
u32 reserved_0;
/* Command queue id */
u64 cmdq_id;
- /* Flags: 0: cause of hang; 1: collateral damage of reset */
+ /* See VPU_ENGINE_RESET_CONTEXT_* defines */
u64 flags;
};
@@ -730,11 +746,7 @@ struct vpu_ipc_msg_payload_hws_create_cmdq {
u32 host_ssid;
/* Engine for which queue is being created */
u32 engine_idx;
- /*
- * Cmdq group may be set to 0 or equal to
- * cmdq_id while each priority band contains
- * only single engine instances.
- */
+ /* Cmdq group: only used for HWS logging of state changes */
u64 cmdq_group;
/* Command queue id */
u64 cmdq_id;
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index cb77d048ed54..51cb85d0387b 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -358,8 +358,8 @@ static struct mhi_channel_config aic100_channels[] = {
.wake_capable = false,
},
{
- .num = 21,
.name = "QAIC_TIMESYNC",
+ .num = 21,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
@@ -390,8 +390,8 @@ static struct mhi_channel_config aic100_channels[] = {
.wake_capable = false,
},
{
- .num = 23,
.name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 23,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 582836f9538f..9256653b3036 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -30,6 +30,7 @@
#define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
#define to_drm(qddev) (&(qddev)->drm)
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
+#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
enum __packed dev_states {
/* Device is offline or will be very soon */
@@ -191,8 +192,6 @@ struct qaic_bo {
u32 nr_slice;
/* Number of slice that have been transferred by DMA engine */
u32 nr_slice_xfer_done;
- /* true = BO is queued for execution, true = BO is not queued */
- bool queued;
/*
* If true then user has attached slicing information to this BO by
* calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 03c9a793da35..2459fe4a3f95 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -141,6 +141,11 @@ struct dbc_rsp {
__le16 status;
} __packed;
+static inline bool bo_queued(struct qaic_bo *bo)
+{
+ return !list_empty(&bo->xfer_list);
+}
+
inline int get_dbc_req_elem_size(void)
{
return sizeof(struct dbc_req);
@@ -569,6 +574,9 @@ static void qaic_free_sgt(struct sg_table *sgt)
{
struct scatterlist *sg;
+ if (!sgt)
+ return;
+
for (sg = sgt->sgl; sg; sg = sg_next(sg))
if (sg_page(sg))
__free_pages(sg_page(sg), get_order(sg->length));
@@ -648,6 +656,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
}
complete_all(&bo->xfer_done);
INIT_LIST_HEAD(&bo->slices);
+ INIT_LIST_HEAD(&bo->xfer_list);
}
static struct qaic_bo *qaic_alloc_init_bo(void)
@@ -709,9 +718,13 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto free_bo;
+
ret = drm_gem_handle_create(file_priv, obj, &args->handle);
if (ret)
- goto free_sgt;
+ goto free_bo;
bo->handle = args->handle;
drm_gem_object_put(obj);
@@ -720,10 +733,8 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
return 0;
-free_sgt:
- qaic_free_sgt(bo->sgt);
free_bo:
- kfree(bo);
+ drm_gem_object_put(obj);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
@@ -738,7 +749,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
struct drm_gem_object *obj;
struct qaic_device *qdev;
struct qaic_user *usr;
- int ret;
+ int ret = 0;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -760,9 +771,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
goto unlock_dev_srcu;
}
- ret = drm_gem_create_mmap_offset(obj);
- if (ret == 0)
- args->offset = drm_vma_node_offset_addr(&obj->vma_node);
+ args->offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put(obj);
@@ -828,9 +837,6 @@ static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_h
struct sg_table *sgt;
int ret;
- if (obj->import_attach->dmabuf->size < hdr->size)
- return -EINVAL;
-
sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
@@ -847,9 +853,6 @@ static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->base.size < hdr->size)
- return -EINVAL;
-
ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
if (ret)
return -EFAULT;
@@ -950,9 +953,6 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
if (arg_size / args->hdr.count != sizeof(*slice_ent))
return -EINVAL;
- if (args->hdr.size == 0)
- return -EINVAL;
-
if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
return -EINVAL;
@@ -992,16 +992,16 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
goto free_slice_ent;
}
- ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size);
- if (ret)
- goto free_slice_ent;
-
obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
if (!obj) {
ret = -ENOENT;
goto free_slice_ent;
}
+ ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, obj->size);
+ if (ret)
+ goto put_bo;
+
bo = to_qaic_bo(obj);
ret = mutex_lock_interruptible(&bo->lock);
if (ret)
@@ -1173,7 +1173,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
struct bo_slice *slice;
unsigned long flags;
struct qaic_bo *bo;
- bool queued;
int i, j;
int ret;
@@ -1205,9 +1204,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
}
spin_lock_irqsave(&dbc->xfer_lock, flags);
- queued = bo->queued;
- bo->queued = true;
- if (queued) {
+ if (bo_queued(bo)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EINVAL;
goto unlock_bo;
@@ -1230,7 +1227,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
else
ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
if (ret) {
- bo->queued = false;
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
goto unlock_bo;
}
@@ -1253,8 +1249,7 @@ failed_to_send_bo:
spin_lock_irqsave(&dbc->xfer_lock, flags);
bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
obj = &bo->base;
- bo->queued = false;
- list_del(&bo->xfer_list);
+ list_del_init(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
drm_gem_object_put(obj);
@@ -1615,8 +1610,7 @@ read_fifo:
*/
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
bo->nr_slice_xfer_done = 0;
- bo->queued = false;
- list_del(&bo->xfer_list);
+ list_del_init(&bo->xfer_list);
bo->perf_stats.req_processed_ts = ktime_get_ns();
complete_all(&bo->xfer_done);
drm_gem_object_put(&bo->base);
@@ -1875,7 +1869,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
/* Check if BO is committed to H/W for DMA */
spin_lock_irqsave(&dbc->xfer_lock, flags);
- if (bo->queued) {
+ if (bo_queued(bo)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EBUSY;
goto unlock_ch_srcu;
@@ -1905,8 +1899,7 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
spin_lock_irqsave(&dbc->xfer_lock, flags);
while (!list_empty(&dbc->xfer_list)) {
bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
- bo->queued = false;
- list_del(&bo->xfer_list);
+ list_del_init(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
bo->nr_slice_xfer_done = 0;
bo->req_id = 0;
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 2a313eb69b12..d1a632dbaec6 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -44,6 +44,53 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
static bool link_up;
static DEFINE_IDA(qaic_usrs);
+static void qaicm_wq_release(struct drm_device *dev, void *res)
+{
+ struct workqueue_struct *wq = res;
+
+ destroy_workqueue(wq);
+}
+
+static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt)
+{
+ struct workqueue_struct *wq;
+ int ret;
+
+ wq = alloc_workqueue(fmt, WQ_UNBOUND, 0);
+ if (!wq)
+ return ERR_PTR(-ENOMEM);
+ ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return wq;
+}
+
+static void qaicm_srcu_release(struct drm_device *dev, void *res)
+{
+ struct srcu_struct *lock = res;
+
+ cleanup_srcu_struct(lock);
+}
+
+static int qaicm_srcu_init(struct drm_device *dev, struct srcu_struct *lock)
+{
+ int ret;
+
+ ret = init_srcu_struct(lock);
+ if (ret)
+ return ret;
+
+ return drmm_add_action_or_reset(dev, qaicm_srcu_release, lock);
+}
+
+static void qaicm_pci_release(struct drm_device *dev, void *res)
+{
+ struct qaic_device *qdev = to_qaic_device(dev);
+
+ pci_set_drvdata(qdev->pdev, NULL);
+}
+
static void free_usr(struct kref *kref)
{
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
@@ -299,74 +346,73 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
release_dbc(qdev, i);
}
-static void cleanup_qdev(struct qaic_device *qdev)
-{
- int i;
-
- for (i = 0; i < qdev->num_dbc; ++i)
- cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
- cleanup_srcu_struct(&qdev->dev_lock);
- pci_set_drvdata(qdev->pdev, NULL);
- destroy_workqueue(qdev->cntl_wq);
- destroy_workqueue(qdev->qts_wq);
-}
-
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
struct qaic_drm_device *qddev;
struct qaic_device *qdev;
- int i;
+ struct drm_device *drm;
+ int i, ret;
- qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
+ qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL);
if (!qdev)
return NULL;
qdev->dev_state = QAIC_OFFLINE;
if (id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
- qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
+ qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
if (!qdev->dbc)
return NULL;
}
- qdev->cntl_wq = alloc_workqueue("qaic_cntl", WQ_UNBOUND, 0);
- if (!qdev->cntl_wq)
+ qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
+ if (IS_ERR(qddev))
+ return NULL;
+
+ drm = to_drm(qddev);
+ pci_set_drvdata(pdev, qdev);
+
+ ret = drmm_mutex_init(drm, &qddev->users_mutex);
+ if (ret)
+ return NULL;
+ ret = drmm_add_action_or_reset(drm, qaicm_pci_release, NULL);
+ if (ret)
+ return NULL;
+ ret = drmm_mutex_init(drm, &qdev->cntl_mutex);
+ if (ret)
return NULL;
- qdev->qts_wq = alloc_workqueue("qaic_ts", WQ_UNBOUND, 0);
- if (!qdev->qts_wq) {
- destroy_workqueue(qdev->cntl_wq);
+ qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl");
+ if (IS_ERR(qdev->cntl_wq))
+ return NULL;
+ qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts");
+ if (IS_ERR(qdev->qts_wq))
return NULL;
- }
- pci_set_drvdata(pdev, qdev);
+ ret = qaicm_srcu_init(drm, &qdev->dev_lock);
+ if (ret)
+ return NULL;
+
+ qdev->qddev = qddev;
qdev->pdev = pdev;
+ qddev->qdev = qdev;
- mutex_init(&qdev->cntl_mutex);
INIT_LIST_HEAD(&qdev->cntl_xfer_list);
- init_srcu_struct(&qdev->dev_lock);
+ INIT_LIST_HEAD(&qddev->users);
for (i = 0; i < qdev->num_dbc; ++i) {
spin_lock_init(&qdev->dbc[i].xfer_lock);
qdev->dbc[i].qdev = qdev;
qdev->dbc[i].id = i;
INIT_LIST_HEAD(&qdev->dbc[i].xfer_list);
- init_srcu_struct(&qdev->dbc[i].ch_lock);
+ ret = qaicm_srcu_init(drm, &qdev->dbc[i].ch_lock);
+ if (ret)
+ return NULL;
init_waitqueue_head(&qdev->dbc[i].dbc_release);
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
}
- qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
- if (IS_ERR(qddev)) {
- cleanup_qdev(qdev);
- return NULL;
- }
-
- drmm_mutex_init(to_drm(qddev), &qddev->users_mutex);
- INIT_LIST_HEAD(&qddev->users);
- qddev->qdev = qdev;
- qdev->qddev = qddev;
-
return qdev;
}
@@ -472,35 +518,28 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = init_pci(qdev, pdev);
if (ret)
- goto cleanup_qdev;
+ return ret;
for (i = 0; i < qdev->num_dbc; ++i)
qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
mhi_irq = init_msi(qdev, pdev);
- if (mhi_irq < 0) {
- ret = mhi_irq;
- goto cleanup_qdev;
- }
+ if (mhi_irq < 0)
+ return mhi_irq;
ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
if (ret)
- goto cleanup_qdev;
+ return ret;
qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
qdev->single_msi);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
- goto cleanup_drm_dev;
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
+ return ret;
}
return 0;
-
-cleanup_drm_dev:
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
-cleanup_qdev:
- cleanup_qdev(qdev);
- return ret;
}
static void qaic_pci_remove(struct pci_dev *pdev)
@@ -511,9 +550,8 @@ static void qaic_pci_remove(struct pci_dev *pdev)
return;
qaic_dev_reset_clean_local_state(qdev);
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
- cleanup_qdev(qdev);
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
}
static void qaic_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 5c36ab85f80b..67d7be800a7c 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -138,7 +138,6 @@ struct agp_bridge_data {
unsigned long gart_bus_addr;
unsigned long gatt_bus_addr;
u32 mode;
- enum chipset_type type;
unsigned long *key_list;
atomic_t current_memory_agp;
atomic_t agp_in_use;
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index e0fd99e61a2d..0393a9bba3a8 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -102,7 +102,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
*
* * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
* respectively &mmu_interval_notifier callbacks. This means any code required
- * for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
+ * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO.
* Only GFP_ATOMIC is permissible, which might fail.
*
* Note that only GPU drivers have a reasonable excuse for both requiring
@@ -522,7 +522,7 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
EXPORT_SYMBOL(dma_fence_wait_timeout);
/**
- * dma_fence_release - default relese function for fences
+ * dma_fence_release - default release function for fences
* @kref: &dma_fence.recfount
*
* This is the default release functions for &dma_fence. Drivers shouldn't call
@@ -974,8 +974,8 @@ void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
EXPORT_SYMBOL(dma_fence_set_deadline);
/**
- * dma_fence_describe - Dump fence describtion into seq_file
- * @fence: the 6fence to describe
+ * dma_fence_describe - Dump fence description into seq_file
+ * @fence: the fence to describe
* @seq: the seq_file to put the textual description into
*
* Dump a textual description of the fence and it's state into the seq_file.
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index eb8b733065b2..e2869fb31140 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -405,7 +405,7 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
*
* Beware that the iterator can be restarted. Code which accumulates statistics
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
- * this reason prefer the locked dma_resv_iter_first() whenver possible.
+ * this reason prefer the locked dma_resv_iter_first() whenever possible.
*
* Returns the first fence from an unlocked dma_resv obj.
*/
@@ -428,7 +428,7 @@ EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
*
* Beware that the iterator can be restarted. Code which accumulates statistics
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
- * this reason prefer the locked dma_resv_iter_next() whenver possible.
+ * this reason prefer the locked dma_resv_iter_next() whenever possible.
*
* Returns the next fence from an unlocked dma_resv obj.
*/
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index afd38539b92e..71d8b26c4103 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -182,6 +182,7 @@ config MTK_ADSP_IPC
config SYSFB
bool
select BOOT_VESA_SUPPORT
+ select SCREEN_INFO
config SYSFB_SIMPLEFB
bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 3c197db42c9d..a6b48703dc9e 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/pci.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
@@ -69,24 +70,70 @@ void sysfb_disable(void)
}
EXPORT_SYMBOL_GPL(sysfb_disable);
+#if defined(CONFIG_PCI)
+static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
+{
+ /*
+ * TODO: Try to integrate this code into the PCI subsystem
+ */
+ int ret;
+ u16 command;
+
+ ret = pci_read_config_word(pdev, PCI_COMMAND, &command);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return false;
+ if (!(command & PCI_COMMAND_MEMORY))
+ return false;
+ return true;
+}
+#else
+static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
+{
+ return false;
+}
+#endif
+
+static __init struct device *sysfb_parent_dev(const struct screen_info *si)
+{
+ struct pci_dev *pdev;
+
+ pdev = screen_info_pci_dev(si);
+ if (IS_ERR(pdev)) {
+ return ERR_CAST(pdev);
+ } else if (pdev) {
+ if (!sysfb_pci_dev_is_enabled(pdev))
+ return ERR_PTR(-ENODEV);
+ return &pdev->dev;
+ }
+
+ return NULL;
+}
+
static __init int sysfb_init(void)
{
struct screen_info *si = &screen_info;
+ struct device *parent;
struct simplefb_platform_data mode;
const char *name;
bool compatible;
int ret = 0;
+ screen_info_apply_fixups();
+
mutex_lock(&disable_lock);
if (disabled)
goto unlock_mutex;
sysfb_apply_efi_quirks();
+ parent = sysfb_parent_dev(si);
+ if (IS_ERR(parent))
+ goto unlock_mutex;
+
/* try to create a simple-framebuffer device */
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
- pd = sysfb_create_simplefb(si, &mode);
+ pd = sysfb_create_simplefb(si, &mode, parent);
if (!IS_ERR(pd))
goto unlock_mutex;
}
@@ -109,6 +156,8 @@ static __init int sysfb_init(void)
goto unlock_mutex;
}
+ pd->dev.parent = parent;
+
sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_data(pd, si, sizeof(*si));
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 74363ed7501f..75a186bf8f8e 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -91,7 +91,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
}
__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
- const struct simplefb_platform_data *mode)
+ const struct simplefb_platform_data *mode,
+ struct device *parent)
{
struct platform_device *pd;
struct resource res;
@@ -143,6 +144,8 @@ __init struct platform_device *sysfb_create_simplefb(const struct screen_info *s
if (!pd)
return ERR_PTR(-ENOMEM);
+ pd->dev.parent = parent;
+
sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_resources(pd, &res, 1);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2520db0b776e..872edb47bb53 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -19,8 +19,7 @@ menuconfig DRM
# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
# device and dmabuf fd. Let's make sure that is available for our userspace.
select KCMP
- select VIDEO_CMDLINE
- select VIDEO_NOMODESET
+ select VIDEO
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -42,7 +41,7 @@ config DRM_MIPI_DSI
config DRM_DEBUG_MM
bool "Insert extra checks and debug info into the DRM range managers"
default n
- depends on DRM=y
+ depends on DRM
depends on STACKTRACE_SUPPORT
select STACKDEPOT
help
@@ -289,19 +288,7 @@ config DRM_VGEM
as used by Mesa's software renderer for enhanced performance.
If M is selected the module will be called vgem.
-config DRM_VKMS
- tristate "Virtual KMS (EXPERIMENTAL)"
- depends on DRM && MMU
- select DRM_KMS_HELPER
- select DRM_GEM_SHMEM_HELPER
- select CRC32
- default n
- help
- Virtual Kernel Mode-Setting (VKMS) is used for testing or for
- running GPU in a headless machines. Choose this option to get
- a VKMS.
-
- If M is selected the module will be called vkms.
+source "drivers/gpu/drm/vkms/Kconfig"
source "drivers/gpu/drm/exynos/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 260e32ef7bae..fa26a4e3a99d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -80,7 +80,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
- amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o
+ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -98,7 +98,7 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o
# add DF block
amdgpu-y += \
@@ -132,7 +132,8 @@ amdgpu-y += \
vega20_ih.o \
navi10_ih.o \
ih_v6_0.o \
- ih_v6_1.o
+ ih_v6_1.o \
+ ih_v7_0.o
# add PSP block
amdgpu-y += \
@@ -143,7 +144,8 @@ amdgpu-y += \
psp_v11_0_8.o \
psp_v12_0.o \
psp_v13_0.o \
- psp_v13_0_4.o
+ psp_v13_0_4.o \
+ psp_v14_0.o
# add DCE block
amdgpu-y += \
@@ -208,6 +210,7 @@ amdgpu-y += \
vcn_v4_0.o \
vcn_v4_0_3.o \
vcn_v4_0_5.o \
+ vcn_v5_0_0.o \
amdgpu_jpeg.o \
jpeg_v1_0.o \
jpeg_v2_0.o \
@@ -215,7 +218,8 @@ amdgpu-y += \
jpeg_v3_0.o \
jpeg_v4_0.o \
jpeg_v4_0_3.o \
- jpeg_v4_0_5.o
+ jpeg_v4_0_5.o \
+ jpeg_v5_0_0.o
# add VPE block
amdgpu-y += \
@@ -233,7 +237,8 @@ amdgpu-y += \
athub_v1_0.o \
athub_v2_0.o \
athub_v2_1.o \
- athub_v3_0.o
+ athub_v3_0.o \
+ athub_v4_1_0.o
# add SMUIO block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 79827a6dcd7f..9246bca0a008 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -107,6 +107,7 @@
#include "amdgpu_smuio.h"
#include "amdgpu_fdinfo.h"
#include "amdgpu_mca.h"
+#include "amdgpu_aca.h"
#include "amdgpu_ras.h"
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
@@ -114,14 +115,12 @@
#define MAX_GPU_INSTANCE 64
-struct amdgpu_gpu_instance
-{
+struct amdgpu_gpu_instance {
struct amdgpu_device *adev;
int mgpu_fan_enabled;
};
-struct amdgpu_mgpu_info
-{
+struct amdgpu_mgpu_info {
struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE];
struct mutex mutex;
uint32_t num_gpu;
@@ -140,8 +139,7 @@ enum amdgpu_ss {
AMDGPU_SS_DRV_UNLOAD
};
-struct amdgpu_watchdog_timer
-{
+struct amdgpu_watchdog_timer {
bool timeout_fatal_disable;
uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
};
@@ -198,7 +196,7 @@ extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
-extern uint amdgpu_dm_abm_level;
+extern int amdgpu_dm_abm_level;
extern int amdgpu_backlight;
extern int amdgpu_damage_clips;
extern struct amdgpu_mgpu_info mgpu_info;
@@ -1046,6 +1044,9 @@ struct amdgpu_device {
/* MCA */
struct amdgpu_mca mca;
+ /* ACA */
+ struct amdgpu_aca aca;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
uint32_t harvest_ip_mask;
int num_ip_blocks;
@@ -1095,6 +1096,7 @@ struct amdgpu_device {
long sdma_timeout;
long video_timeout;
long compute_timeout;
+ long psp_timeout;
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
@@ -1332,6 +1334,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+#define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l))
/*
* BIOS helpers.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
new file mode 100644
index 000000000000..493982f94649
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -0,0 +1,879 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/list.h>
+#include "amdgpu.h"
+#include "amdgpu_aca.h"
+#include "amdgpu_ras.h"
+
+#define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype}
+
+typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data);
+
+struct aca_banks {
+ int nr_banks;
+ struct list_head list;
+};
+
+struct aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
+static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = {
+ ACA_BANK_HWID(SMU, 0x01, 0x01),
+ ACA_BANK_HWID(PCS_XGMI, 0x50, 0x00),
+ ACA_BANK_HWID(UMC, 0x96, 0x00),
+};
+
+static void aca_banks_init(struct aca_banks *banks)
+{
+ if (!banks)
+ return;
+
+ memset(banks, 0, sizeof(*banks));
+ INIT_LIST_HEAD(&banks->list);
+}
+
+static int aca_banks_add_bank(struct aca_banks *banks, struct aca_bank *bank)
+{
+ struct aca_bank_node *node;
+
+ if (!bank)
+ return -EINVAL;
+
+ node = kvzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ memcpy(&node->bank, bank, sizeof(*bank));
+
+ INIT_LIST_HEAD(&node->node);
+ list_add_tail(&node->node, &banks->list);
+
+ banks->nr_banks++;
+
+ return 0;
+}
+
+static void aca_banks_release(struct aca_banks *banks)
+{
+ struct aca_bank_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &banks->list, node) {
+ list_del(&node->node);
+ kvfree(node);
+ }
+}
+
+static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+
+ if (!count)
+ return -EINVAL;
+
+ if (!smu_funcs || !smu_funcs->get_valid_aca_count)
+ return -EOPNOTSUPP;
+
+ return smu_funcs->get_valid_aca_count(adev, type, count);
+}
+
+static struct aca_regs_dump {
+ const char *name;
+ int reg_idx;
+} aca_regs[] = {
+ {"CONTROL", ACA_REG_IDX_CTL},
+ {"STATUS", ACA_REG_IDX_STATUS},
+ {"ADDR", ACA_REG_IDX_ADDR},
+ {"MISC", ACA_REG_IDX_MISC0},
+ {"CONFIG", ACA_REG_IDX_CONFG},
+ {"IPID", ACA_REG_IDX_IPID},
+ {"SYND", ACA_REG_IDX_SYND},
+ {"DESTAT", ACA_REG_IDX_DESTAT},
+ {"DEADDR", ACA_REG_IDX_DEADDR},
+ {"CONTROL_MASK", ACA_REG_IDX_CTL_MASK},
+};
+
+static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank)
+{
+ int i;
+
+ dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
+ /* plus 1 for output format, e.g: ACA[08/08]: xxxx */
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ dev_info(adev->dev, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+ idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+}
+
+static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_error_type type,
+ int start, int count,
+ struct aca_banks *banks)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+ struct aca_bank bank;
+ int i, max_count, ret;
+
+ if (!count)
+ return 0;
+
+ if (!smu_funcs || !smu_funcs->get_valid_aca_bank)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ max_count = smu_funcs->max_ue_bank_count;
+ break;
+ case ACA_ERROR_TYPE_CE:
+ max_count = smu_funcs->max_ce_bank_count;
+ break;
+ case ACA_ERROR_TYPE_DEFERRED:
+ default:
+ return -EINVAL;
+ }
+
+ if (start + count >= max_count)
+ return -EINVAL;
+
+ count = min_t(int, count, max_count);
+ for (i = 0; i < count; i++) {
+ memset(&bank, 0, sizeof(bank));
+ ret = smu_funcs->get_valid_aca_bank(adev, type, start + i, &bank);
+ if (ret)
+ return ret;
+
+ aca_smu_bank_dump(adev, i, count, &bank);
+
+ ret = aca_banks_add_bank(banks, &bank);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
+static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type)
+{
+ const struct aca_bank_ops *bank_ops = handle->bank_ops;
+
+ if (!aca_bank_hwip_is_matched(bank, handle->hwip))
+ return false;
+
+ if (!bank_ops->aca_bank_is_valid)
+ return true;
+
+ return bank_ops->aca_bank_is_valid(handle, bank, type, handle->data);
+}
+
+static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+ struct aca_bank_error *bank_error;
+
+ bank_error = kvzalloc(sizeof(*bank_error), GFP_KERNEL);
+ if (!bank_error)
+ return NULL;
+
+ INIT_LIST_HEAD(&bank_error->node);
+ memcpy(&bank_error->info, info, sizeof(*info));
+
+ mutex_lock(&aerr->lock);
+ list_add_tail(&bank_error->node, &aerr->list);
+ mutex_unlock(&aerr->lock);
+
+ return bank_error;
+}
+
+static struct aca_bank_error *find_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+ struct aca_bank_error *bank_error = NULL;
+ struct aca_bank_info *tmp_info;
+ bool found = false;
+
+ mutex_lock(&aerr->lock);
+ list_for_each_entry(bank_error, &aerr->list, node) {
+ tmp_info = &bank_error->info;
+ if (tmp_info->socket_id == info->socket_id &&
+ tmp_info->die_id == info->die_id) {
+ found = true;
+ goto out_unlock;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&aerr->lock);
+
+ return found ? bank_error : NULL;
+}
+
+static void aca_bank_error_remove(struct aca_error *aerr, struct aca_bank_error *bank_error)
+{
+ if (!aerr || !bank_error)
+ return;
+
+ list_del(&bank_error->node);
+ aerr->nr_errors--;
+
+ kvfree(bank_error);
+}
+
+static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+ struct aca_bank_error *bank_error;
+
+ if (!aerr || !info)
+ return NULL;
+
+ bank_error = find_bank_error(aerr, info);
+ if (bank_error)
+ return bank_error;
+
+ return new_bank_error(aerr, info);
+}
+
+static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type,
+ struct aca_bank_report *report)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ struct aca_bank_error *bank_error;
+ struct aca_error *aerr;
+
+ if (!handle || !report)
+ return -EINVAL;
+
+ if (!report->count[type])
+ return 0;
+
+ aerr = &error_cache->errors[type];
+ bank_error = get_bank_error(aerr, &report->info);
+ if (!bank_error)
+ return -ENOMEM;
+
+ bank_error->count[type] += report->count[type];
+
+ return 0;
+}
+
+static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, struct aca_bank_report *report)
+{
+ const struct aca_bank_ops *bank_ops = handle->bank_ops;
+
+ if (!bank || !report)
+ return -EINVAL;
+
+ if (!bank_ops->aca_bank_generate_report)
+ return -EOPNOTSUPP;
+
+ memset(report, 0, sizeof(*report));
+ return bank_ops->aca_bank_generate_report(handle, bank, type,
+ report, handle->data);
+}
+
+static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ struct aca_bank_report report;
+ int ret;
+
+ ret = aca_generate_bank_report(handle, bank, type, &report);
+ if (ret)
+ return ret;
+
+ if (!report.count[type])
+ return 0;
+
+ ret = aca_log_errors(handle, type, &report);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank,
+ enum aca_error_type type, bank_handler_t handler, void *data)
+{
+ struct aca_handle *handle;
+ int ret;
+
+ if (list_empty(&mgr->list))
+ return 0;
+
+ list_for_each_entry(handle, &mgr->list, node) {
+ if (!aca_bank_is_valid(handle, bank, type))
+ continue;
+
+ ret = handler(handle, bank, type, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks,
+ enum aca_error_type type, bank_handler_t handler, void *data)
+{
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ int ret;
+
+ if (!mgr || !banks)
+ return -EINVAL;
+
+ /* pre check to avoid unnecessary operations */
+ if (list_empty(&mgr->list) || list_empty(&banks->list))
+ return 0;
+
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+
+ ret = aca_dispatch_bank(mgr, bank, type, handler, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type,
+ bank_handler_t handler, void *data)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ struct aca_banks banks;
+ u32 count = 0;
+ int ret;
+
+ if (list_empty(&aca->mgr.list))
+ return 0;
+
+ /* NOTE: pmfw is only support UE and CE */
+ if (type == ACA_ERROR_TYPE_DEFERRED)
+ type = ACA_ERROR_TYPE_CE;
+
+ ret = aca_smu_get_valid_aca_count(adev, type, &count);
+ if (ret)
+ return ret;
+
+ if (!count)
+ return 0;
+
+ aca_banks_init(&banks);
+
+ ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks);
+ if (ret)
+ goto err_release_banks;
+
+ if (list_empty(&banks.list)) {
+ ret = 0;
+ goto err_release_banks;
+ }
+
+ ret = aca_dispatch_banks(&aca->mgr, &banks, type,
+ handler, data);
+ if (ret)
+ goto err_release_banks;
+
+err_release_banks:
+ aca_banks_release(&banks);
+
+ return ret;
+}
+
+static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_error_type type, struct ras_err_data *err_data)
+{
+ struct aca_bank_info *info;
+ struct amdgpu_smuio_mcm_config_info mcm_info;
+ u64 count;
+
+ if (type >= ACA_ERROR_TYPE_COUNT)
+ return -EINVAL;
+
+ count = bank_error->count[type];
+ if (!count)
+ return 0;
+
+ info = &bank_error->info;
+ mcm_info.die_id = info->die_id;
+ mcm_info.socket_id = info->socket_id;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count);
+ break;
+ case ACA_ERROR_TYPE_CE:
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count);
+ break;
+ case ACA_ERROR_TYPE_DEFERRED:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int aca_log_aca_error(struct aca_handle *handle, enum aca_error_type type, struct ras_err_data *err_data)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ struct aca_error *aerr = &error_cache->errors[type];
+ struct aca_bank_error *bank_error, *tmp;
+
+ mutex_lock(&aerr->lock);
+
+ if (list_empty(&aerr->list))
+ goto out_unlock;
+
+ list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) {
+ aca_log_aca_error_data(bank_error, type, err_data);
+ aca_bank_error_remove(aerr, bank_error);
+ }
+
+out_unlock:
+ mutex_unlock(&aerr->lock);
+
+ return 0;
+}
+
+static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type,
+ struct ras_err_data *err_data)
+{
+ int ret;
+
+ /* udpate aca bank to aca source error_cache first */
+ ret = aca_banks_update(adev, type, handler_aca_log_bank_error, NULL);
+ if (ret)
+ return ret;
+
+ return aca_log_aca_error(handle, type, err_data);
+}
+
+static bool aca_handle_is_valid(struct aca_handle *handle)
+{
+ if (!handle->mask || !list_empty(&handle->node))
+ return false;
+
+ return true;
+}
+
+int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
+ enum aca_error_type type, void *data)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+
+ if (!handle || !err_data)
+ return -EINVAL;
+
+ if (aca_handle_is_valid(handle))
+ return -EOPNOTSUPP;
+
+ if (!(BIT(type) & handle->mask))
+ return 0;
+
+ return __aca_get_error_data(adev, handle, type, err_data);
+}
+
+static void aca_error_init(struct aca_error *aerr, enum aca_error_type type)
+{
+ mutex_init(&aerr->lock);
+ INIT_LIST_HEAD(&aerr->list);
+ aerr->type = type;
+ aerr->nr_errors = 0;
+}
+
+static void aca_init_error_cache(struct aca_handle *handle)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ int type;
+
+ for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
+ aca_error_init(&error_cache->errors[type], type);
+}
+
+static void aca_error_fini(struct aca_error *aerr)
+{
+ struct aca_bank_error *bank_error, *tmp;
+
+ mutex_lock(&aerr->lock);
+ list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
+ aca_bank_error_remove(aerr, bank_error);
+
+ mutex_destroy(&aerr->lock);
+}
+
+static void aca_fini_error_cache(struct aca_handle *handle)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ int type;
+
+ for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
+ aca_error_fini(&error_cache->errors[type]);
+}
+
+static int add_aca_handle(struct amdgpu_device *adev, struct aca_handle_manager *mgr, struct aca_handle *handle,
+ const char *name, const struct aca_info *ras_info, void *data)
+{
+ memset(handle, 0, sizeof(*handle));
+
+ handle->adev = adev;
+ handle->mgr = mgr;
+ handle->name = name;
+ handle->hwip = ras_info->hwip;
+ handle->mask = ras_info->mask;
+ handle->bank_ops = ras_info->bank_ops;
+ handle->data = data;
+ aca_init_error_cache(handle);
+
+ INIT_LIST_HEAD(&handle->node);
+ list_add_tail(&handle->node, &mgr->list);
+ mgr->nr_handles++;
+
+ return 0;
+}
+
+static ssize_t aca_sysfs_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct aca_handle *handle = container_of(attr, struct aca_handle, aca_attr);
+
+ /* NOTE: the aca cache will be auto cleared once read,
+ * So the driver should unify the query entry point, forward request to ras query interface directly */
+ return amdgpu_ras_aca_sysfs_read(dev, attr, handle, buf, handle->data);
+}
+
+static int add_aca_sysfs(struct amdgpu_device *adev, struct aca_handle *handle)
+{
+ struct device_attribute *aca_attr = &handle->aca_attr;
+
+ snprintf(handle->attr_name, sizeof(handle->attr_name) - 1, "aca_%s", handle->name);
+ aca_attr->show = aca_sysfs_read;
+ aca_attr->attr.name = handle->attr_name;
+ aca_attr->attr.mode = S_IRUGO;
+ sysfs_attr_init(&aca_attr->attr);
+
+ return sysfs_add_file_to_group(&adev->dev->kobj,
+ &aca_attr->attr,
+ "ras");
+}
+
+int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
+ const char *name, const struct aca_info *ras_info, void *data)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ int ret;
+
+ if (!amdgpu_aca_is_enabled(adev))
+ return 0;
+
+ ret = add_aca_handle(adev, &aca->mgr, handle, name, ras_info, data);
+ if (ret)
+ return ret;
+
+ return add_aca_sysfs(adev, handle);
+}
+
+static void remove_aca_handle(struct aca_handle *handle)
+{
+ struct aca_handle_manager *mgr = handle->mgr;
+
+ aca_fini_error_cache(handle);
+ list_del(&handle->node);
+ mgr->nr_handles--;
+}
+
+static void remove_aca_sysfs(struct aca_handle *handle)
+{
+ struct amdgpu_device *adev = handle->adev;
+ struct device_attribute *aca_attr = &handle->aca_attr;
+
+ if (adev->dev->kobj.sd)
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &aca_attr->attr,
+ "ras");
+}
+
+void amdgpu_aca_remove_handle(struct aca_handle *handle)
+{
+ if (!handle || list_empty(&handle->node))
+ return;
+
+ remove_aca_sysfs(handle);
+ remove_aca_handle(handle);
+}
+
+static int aca_manager_init(struct aca_handle_manager *mgr)
+{
+ INIT_LIST_HEAD(&mgr->list);
+ mgr->nr_handles = 0;
+
+ return 0;
+}
+
+static void aca_manager_fini(struct aca_handle_manager *mgr)
+{
+ struct aca_handle *handle, *tmp;
+
+ list_for_each_entry_safe(handle, tmp, &mgr->list, node)
+ amdgpu_aca_remove_handle(handle);
+}
+
+bool amdgpu_aca_is_enabled(struct amdgpu_device *adev)
+{
+ return adev->aca.is_enabled;
+}
+
+int amdgpu_aca_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ int ret;
+
+ ret = aca_manager_init(&aca->mgr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void amdgpu_aca_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+
+ aca_manager_fini(&aca->mgr);
+}
+
+int amdgpu_aca_reset(struct amdgpu_device *adev)
+{
+ amdgpu_aca_fini(adev);
+
+ return amdgpu_aca_init(adev);
+}
+
+void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+
+ WARN_ON(aca->smu_funcs);
+ aca->smu_funcs = smu_funcs;
+}
+
+int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info)
+{
+ u64 ipid;
+ u32 instidhi, instidlo;
+
+ if (!bank || !info)
+ return -EINVAL;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ info->hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ info->mcatype = ACA_REG__IPID__MCATYPE(ipid);
+ /*
+ * Unfied DieID Format: SAASS. A:AID, S:Socket.
+ * Unfied DieID[4:4] = InstanceId[0:0]
+ * Unfied DieID[0:3] = InstanceIdHi[0:3]
+ */
+ instidhi = ACA_REG__IPID__INSTANCEIDHI(ipid);
+ instidlo = ACA_REG__IPID__INSTANCEIDLO(ipid);
+ info->die_id = ((instidhi >> 2) & 0x03);
+ info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03);
+
+ return 0;
+}
+
+static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
+{
+ int error_code;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) {
+ error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
+ return error_code & 0xff;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* NOTE: the true error code is encoded in status.errorcode[0:7] */
+ error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
+
+ return error_code & 0xff;
+}
+
+int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size)
+{
+ int i, error_code;
+
+ if (!bank || !err_codes)
+ return -EINVAL;
+
+ error_code = aca_bank_get_error_code(adev, bank);
+ for (i = 0; i < size; i++) {
+ if (err_codes[i] == error_code)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+
+ if (!smu_funcs || !smu_funcs->set_debug_mode)
+ return -EOPNOTSUPP;
+
+ return smu_funcs->set_debug_mode(adev, en);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ int ret;
+
+ ret = amdgpu_ras_set_aca_debug_mode(adev, val ? true : false);
+ if (ret)
+ return ret;
+
+ dev_info(adev->dev, "amdgpu set smu aca debug mode %s success\n", val ? "on" : "off");
+
+ return 0;
+}
+
+static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_error_type type, int idx)
+{
+ struct aca_bank_info info;
+ int i, ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return;
+
+ seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_ERROR_TYPE_UE ? "UE" : "CE");
+ seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
+ idx, info.socket_id, info.die_id, info.hwid, info.mcatype);
+
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ seq_printf(m, "aca entry[%d].regs[%d]: 0x%016llx\n", idx, aca_regs[i].reg_idx, bank->regs[aca_regs[i].reg_idx]);
+}
+
+struct aca_dump_context {
+ struct seq_file *m;
+ int idx;
+};
+
+static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ struct aca_dump_context *ctx = (struct aca_dump_context *)data;
+
+ aca_dump_entry(ctx->m, bank, type, ctx->idx++);
+
+ return handler_aca_log_bank_error(handle, bank, type, NULL);
+}
+
+static int aca_dump_show(struct seq_file *m, enum aca_error_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct aca_dump_context context = {
+ .m = m,
+ .idx = 0,
+ };
+
+ return aca_banks_update(adev, type, handler_aca_bank_dump, (void *)&context);
+}
+
+static int aca_dump_ce_show(struct seq_file *m, void *unused)
+{
+ return aca_dump_show(m, ACA_ERROR_TYPE_CE);
+}
+
+static int aca_dump_ce_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aca_dump_ce_show, inode->i_private);
+}
+
+static const struct file_operations aca_ce_dump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = aca_dump_ce_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int aca_dump_ue_show(struct seq_file *m, void *unused)
+{
+ return aca_dump_show(m, ACA_ERROR_TYPE_UE);
+}
+
+static int aca_dump_ue_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aca_dump_ue_show, inode->i_private);
+}
+
+static const struct file_operations aca_ue_dump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = aca_dump_ue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+DEFINE_DEBUGFS_ATTRIBUTE(aca_debug_mode_fops, NULL, amdgpu_aca_smu_debug_mode_set, "%llu\n");
+#endif
+
+void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
+{
+#if defined(CONFIG_DEBUG_FS)
+ if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6))
+ return;
+
+ debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops);
+ debugfs_create_file("aca_ue_dump", 0400, root, adev, &aca_ue_dump_debug_fops);
+ debugfs_create_file("aca_ce_dump", 0400, root, adev, &aca_ce_dump_debug_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
new file mode 100644
index 000000000000..2da50e095883
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_ACA_H__
+#define __AMDGPU_ACA_H__
+
+#include <linux/list.h>
+
+#define ACA_MAX_REGS_COUNT (16)
+
+#define ACA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
+#define ACA_REG__STATUS__VAL(x) ACA_REG_FIELD(x, 63, 63)
+#define ACA_REG__STATUS__OVERFLOW(x) ACA_REG_FIELD(x, 62, 62)
+#define ACA_REG__STATUS__UC(x) ACA_REG_FIELD(x, 61, 61)
+#define ACA_REG__STATUS__EN(x) ACA_REG_FIELD(x, 60, 60)
+#define ACA_REG__STATUS__MISCV(x) ACA_REG_FIELD(x, 59, 59)
+#define ACA_REG__STATUS__ADDRV(x) ACA_REG_FIELD(x, 58, 58)
+#define ACA_REG__STATUS__PCC(x) ACA_REG_FIELD(x, 57, 57)
+#define ACA_REG__STATUS__ERRCOREIDVAL(x) ACA_REG_FIELD(x, 56, 56)
+#define ACA_REG__STATUS__TCC(x) ACA_REG_FIELD(x, 55, 55)
+#define ACA_REG__STATUS__SYNDV(x) ACA_REG_FIELD(x, 53, 53)
+#define ACA_REG__STATUS__CECC(x) ACA_REG_FIELD(x, 46, 46)
+#define ACA_REG__STATUS__UECC(x) ACA_REG_FIELD(x, 45, 45)
+#define ACA_REG__STATUS__DEFERRED(x) ACA_REG_FIELD(x, 44, 44)
+#define ACA_REG__STATUS__POISON(x) ACA_REG_FIELD(x, 43, 43)
+#define ACA_REG__STATUS__SCRUB(x) ACA_REG_FIELD(x, 40, 40)
+#define ACA_REG__STATUS__ERRCOREID(x) ACA_REG_FIELD(x, 37, 32)
+#define ACA_REG__STATUS__ADDRLSB(x) ACA_REG_FIELD(x, 29, 24)
+#define ACA_REG__STATUS__ERRORCODEEXT(x) ACA_REG_FIELD(x, 21, 16)
+#define ACA_REG__STATUS__ERRORCODE(x) ACA_REG_FIELD(x, 15, 0)
+
+#define ACA_REG__IPID__MCATYPE(x) ACA_REG_FIELD(x, 63, 48)
+#define ACA_REG__IPID__INSTANCEIDHI(x) ACA_REG_FIELD(x, 47, 44)
+#define ACA_REG__IPID__HARDWAREID(x) ACA_REG_FIELD(x, 43, 32)
+#define ACA_REG__IPID__INSTANCEIDLO(x) ACA_REG_FIELD(x, 31, 0)
+
+#define ACA_REG__MISC0__VALID(x) ACA_REG_FIELD(x, 63, 63)
+#define ACA_REG__MISC0__OVRFLW(x) ACA_REG_FIELD(x, 48, 48)
+#define ACA_REG__MISC0__ERRCNT(x) ACA_REG_FIELD(x, 43, 32)
+
+#define ACA_REG__SYND__ERRORINFORMATION(x) ACA_REG_FIELD(x, 17, 0)
+
+/* NOTE: The following codes refers to the smu header file */
+#define ACA_EXTERROR_CODE_CE 0x3a
+#define ACA_EXTERROR_CODE_FAULT 0x3b
+
+#define ACA_ERROR_UE_MASK BIT_MASK(ACA_ERROR_TYPE_UE)
+#define ACA_ERROR_CE_MASK BIT_MASK(ACA_ERROR_TYPE_CE)
+#define ACA_ERROR_DEFERRED_MASK BIT_MASK(ACA_ERROR_TYPE_DEFERRED)
+
+enum aca_reg_idx {
+ ACA_REG_IDX_CTL = 0,
+ ACA_REG_IDX_STATUS = 1,
+ ACA_REG_IDX_ADDR = 2,
+ ACA_REG_IDX_MISC0 = 3,
+ ACA_REG_IDX_CONFG = 4,
+ ACA_REG_IDX_IPID = 5,
+ ACA_REG_IDX_SYND = 6,
+ ACA_REG_IDX_DESTAT = 8,
+ ACA_REG_IDX_DEADDR = 9,
+ ACA_REG_IDX_CTL_MASK = 10,
+ ACA_REG_IDX_COUNT = 16,
+};
+
+enum aca_hwip_type {
+ ACA_HWIP_TYPE_UNKNOW = -1,
+ ACA_HWIP_TYPE_PSP = 0,
+ ACA_HWIP_TYPE_UMC,
+ ACA_HWIP_TYPE_SMU,
+ ACA_HWIP_TYPE_PCS_XGMI,
+ ACA_HWIP_TYPE_COUNT,
+};
+
+enum aca_error_type {
+ ACA_ERROR_TYPE_INVALID = -1,
+ ACA_ERROR_TYPE_UE = 0,
+ ACA_ERROR_TYPE_CE,
+ ACA_ERROR_TYPE_DEFERRED,
+ ACA_ERROR_TYPE_COUNT
+};
+
+struct aca_bank {
+ u64 regs[ACA_MAX_REGS_COUNT];
+};
+
+struct aca_bank_node {
+ struct aca_bank bank;
+ struct list_head node;
+};
+
+struct aca_bank_info {
+ int die_id;
+ int socket_id;
+ int hwid;
+ int mcatype;
+};
+
+struct aca_bank_report {
+ struct aca_bank_info info;
+ u64 count[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_bank_error {
+ struct list_head node;
+ struct aca_bank_info info;
+ u64 count[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_error {
+ struct list_head list;
+ struct mutex lock;
+ enum aca_error_type type;
+ int nr_errors;
+};
+
+struct aca_handle_manager {
+ struct list_head list;
+ int nr_handles;
+};
+
+struct aca_error_cache {
+ struct aca_error errors[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_handle {
+ struct list_head node;
+ enum aca_hwip_type hwip;
+ struct amdgpu_device *adev;
+ struct aca_handle_manager *mgr;
+ struct aca_error_cache error_cache;
+ const struct aca_bank_ops *bank_ops;
+ struct device_attribute aca_attr;
+ char attr_name[64];
+ const char *name;
+ u32 mask;
+ void *data;
+};
+
+struct aca_bank_ops {
+ int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data);
+ bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ void *data);
+};
+
+struct aca_smu_funcs {
+ int max_ue_bank_count;
+ int max_ce_bank_count;
+ int (*set_debug_mode)(struct amdgpu_device *adev, bool enable);
+ int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_error_type type, u32 *count);
+ int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_error_type type, int idx, struct aca_bank *bank);
+};
+
+struct amdgpu_aca {
+ struct aca_handle_manager mgr;
+ const struct aca_smu_funcs *smu_funcs;
+ bool is_enabled;
+};
+
+struct aca_info {
+ enum aca_hwip_type hwip;
+ const struct aca_bank_ops *bank_ops;
+ u32 mask;
+};
+
+int amdgpu_aca_init(struct amdgpu_device *adev);
+void amdgpu_aca_fini(struct amdgpu_device *adev);
+int amdgpu_aca_reset(struct amdgpu_device *adev);
+void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs);
+bool amdgpu_aca_is_enabled(struct amdgpu_device *adev);
+
+int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info);
+int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size);
+
+int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
+ const char *name, const struct aca_info *aca_info, void *data);
+void amdgpu_aca_remove_handle(struct aca_handle *handle);
+int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
+ enum aca_error_type type, void *data);
+int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en);
+void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 41db030ddc4e..190039f14c30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -742,9 +742,10 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
}
-void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, bool reset)
{
- amdgpu_umc_poison_handler(adev, reset);
+ amdgpu_umc_poison_handler(adev, block, reset);
}
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 27c61c535e29..e60f63ccf79a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -193,6 +193,9 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem);
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence);
#else
static inline
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
@@ -218,6 +221,13 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
{
return 0;
}
+static inline
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence)
+{
+ return 0;
+}
#endif
/* Shared API */
int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
@@ -326,7 +336,7 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
- bool reset);
+ enum amdgpu_ras_block block, bool reset);
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 231fd927dcfb..5cd84f72bf26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -426,9 +426,9 @@ validate_fail:
return ret;
}
-static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
- uint32_t domain,
- struct dma_fence *fence)
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence)
{
int ret = amdgpu_bo_reserve(bo, false);
@@ -464,13 +464,15 @@ static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
* again. Page directories are only updated after updating page
* tables.
*/
-static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
+static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket)
{
struct amdgpu_bo *pd = vm->root.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
- ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
+ ret = amdgpu_vm_validate(adev, vm, ticket,
+ amdgpu_amdkfd_validate_vm_bo, NULL);
if (ret) {
pr_err("failed to validate PT BOs\n");
return ret;
@@ -1310,14 +1312,15 @@ update_gpuvm_pte_failed:
return ret;
}
-static int process_validate_vms(struct amdkfd_process_info *process_info)
+static int process_validate_vms(struct amdkfd_process_info *process_info,
+ struct ww_acquire_ctx *ticket)
{
struct amdgpu_vm *peer_vm;
int ret;
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
- ret = vm_validate_pt_pd_bos(peer_vm);
+ ret = vm_validate_pt_pd_bos(peer_vm, ticket);
if (ret)
return ret;
}
@@ -1402,7 +1405,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
ret = amdgpu_bo_reserve(vm->root.bo, true);
if (ret)
goto reserve_pd_fail;
- ret = vm_validate_pt_pd_bos(vm);
+ ret = vm_validate_pt_pd_bos(vm, NULL);
if (ret) {
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
@@ -2043,7 +2046,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
- ret = vm_validate_pt_pd_bos(avm);
+ ret = vm_validate_pt_pd_bos(avm, NULL);
if (unlikely(ret))
goto out_unreserve;
@@ -2136,7 +2139,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
goto unreserve_out;
}
- ret = vm_validate_pt_pd_bos(avm);
+ ret = vm_validate_pt_pd_bos(avm, NULL);
if (unlikely(ret))
goto unreserve_out;
@@ -2634,7 +2637,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
}
- ret = process_validate_vms(process_info);
+ ret = process_validate_vms(process_info, NULL);
if (ret)
goto unreserve_out;
@@ -2894,11 +2897,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
amdgpu_sync_create(&sync_obj);
- /* Validate PDs and PTs */
- ret = process_validate_vms(process_info);
- if (ret)
- goto validate_map_fail;
-
/* Validate BOs and map them to GPUVM (update VM page tables). */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
@@ -2949,6 +2947,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
if (failed_size)
pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+ /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+ * validations above would invalidate DMABuf imports again.
+ */
+ ret = process_validate_vms(process_info, &exec.ticket);
+ if (ret)
+ goto validate_map_fail;
+
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
@@ -3020,7 +3025,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
&process_info->eviction_fence->base,
DMA_RESV_USAGE_BOOKKEEP);
}
- /* Attach eviction fence to PD / PT BOs */
+ /* Attach eviction fence to PD / PT BOs and DMABuf imports */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
struct amdgpu_bo *bo = peer_vm->root.bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index dce9e7d5e4ec..52b12c1718eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1018,7 +1018,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
dividers->post_div = args.v3.ucPostDiv;
dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -1038,7 +1039,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
dividers->post_div = args.v5.ucPostDiv;
dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -1056,7 +1058,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -1067,7 +1070,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -1109,7 +1113,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -1151,7 +1156,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
if (mem_clock)
args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
}
void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
@@ -1205,7 +1211,8 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v2.ucVoltageMode = 0;
args.v2.usVoltageLevel = 0;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
@@ -1214,7 +1221,8 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index fb2681dd6b33..6857c586ded7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -941,5 +941,6 @@ int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
return -EINVAL;
}
- return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1);
+ return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1,
+ sizeof(asic_init_ps_v2_1));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index c7eb2caec65a..649b5530d8ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -36,7 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
-bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address);
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t *i2c_address);
bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6adeddfb3d56..0a4b09709cfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -952,10 +952,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bytes_moved = 0;
p->bytes_moved_vis = 0;
- r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
- amdgpu_cs_bo_validate, p);
+ r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
+ amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+ DRM_ERROR("amdgpu_vm_validate() failed.\n");
goto out_free_user_pages;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 796fa6f1420b..cfdf558b48b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -28,9 +28,8 @@
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
{
- uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+ uint64_t addr = AMDGPU_VA_RESERVED_CSA_START(adev);
- addr -= AMDGPU_VA_RESERVED_SIZE;
addr = amdgpu_gmc_sign_extend(addr);
return addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 94bdb5fa6ebc..b0ea4ddc8e72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -96,6 +96,9 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
#define AMDGPU_MAX_RETRY_LIMIT 2
#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
+#define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
+#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
+#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
static const struct drm_driver amdgpu_kms_driver;
@@ -781,12 +784,22 @@ u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
void __iomem *pcie_index_hi_offset;
void __iomem *pcie_data_offset;
- pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
- pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
- if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
- pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
- else
+ if (unlikely(!adev->nbio.funcs)) {
+ pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
+ pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
+ } else {
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+ }
+
+ if (reg_addr >> 32) {
+ if (unlikely(!adev->nbio.funcs))
+ pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
+ else
+ pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
+ } else {
pcie_index_hi = 0;
+ }
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
@@ -1218,8 +1231,6 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
ret = amdgpu_atomfirmware_asic_init(adev, true);
- /* TODO: check the return val and stop device initialization if boot fails */
- amdgpu_psp_query_boot_status(adev);
return ret;
} else {
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1442,6 +1453,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
+ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
+ DRM_WARN("System can't access extended configuration space,please check!!\n");
+
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
(pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
@@ -5686,6 +5701,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
IP_VERSION(9, 4, 2) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
amdgpu_ras_resume(adev);
} else {
@@ -6107,6 +6123,20 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct amdgpu_reset_context reset_context;
u32 memsize;
struct list_head device_list;
+ struct amdgpu_hive_info *hive;
+ int hive_ras_recovery = 0;
+ struct amdgpu_ras *ras;
+
+ /* PCI error slot reset should be skipped During RAS recovery */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+ ras = amdgpu_ras_get_context(adev);
+ if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
+ ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
+ return PCI_ERS_RESULT_RECOVERED;
DRM_INFO("PCI error: slot reset callback!!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index c7d60dd0fb97..78588334577a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -27,6 +27,7 @@
#include "amdgpu_discovery.h"
#include "soc15_hw_ip.h"
#include "discovery.h"
+#include "amdgpu_ras.h"
#include "soc15.h"
#include "gfx_v9_0.h"
@@ -63,17 +64,20 @@
#include "hdp_v5_0.h"
#include "hdp_v5_2.h"
#include "hdp_v6_0.h"
+#include "hdp_v7_0.h"
#include "nv.h"
#include "soc21.h"
#include "navi10_ih.h"
#include "ih_v6_0.h"
#include "ih_v6_1.h"
+#include "ih_v7_0.h"
#include "gfx_v10_0.h"
#include "gfx_v11_0.h"
#include "sdma_v5_0.h"
#include "sdma_v5_2.h"
#include "sdma_v6_0.h"
#include "lsdma_v6_0.h"
+#include "lsdma_v7_0.h"
#include "vcn_v2_0.h"
#include "jpeg_v2_0.h"
#include "vcn_v3_0.h"
@@ -92,12 +96,15 @@
#include "smuio_v13_0.h"
#include "smuio_v13_0_3.h"
#include "smuio_v13_0_6.h"
+#include "vcn_v5_0_0.h"
+#include "jpeg_v5_0_0.h"
#include "amdgpu_vpe.h"
#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMP0_SMN_C2PMSG_33 0x16061
#define mmMM_INDEX 0x0
@@ -518,7 +525,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
out:
kfree(adev->mman.discovery_bin);
adev->mman.discovery_bin = NULL;
-
+ if ((amdgpu_discovery != 2) &&
+ (RREG32(mmIP_DISCOVERY_VERSION) == 4))
+ amdgpu_ras_query_boot_status(adev, 4);
return r;
}
@@ -1278,11 +1287,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
* 0b10 : encode is disabled
* 0b01 : decode is disabled
*/
- adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
- ip->revision & 0xc0;
- ip->revision &= ~0xc0;
if (adev->vcn.num_vcn_inst <
AMDGPU_MAX_VCN_INSTANCES) {
+ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ ip->revision & 0xc0;
adev->vcn.num_vcn_inst++;
adev->vcn.inst_mask |=
(1U << ip->instance_number);
@@ -1293,6 +1301,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->vcn.num_vcn_inst + 1,
AMDGPU_MAX_VCN_INSTANCES);
}
+ ip->revision &= ~0xc0;
}
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
@@ -1763,6 +1772,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 1, 0):
amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
break;
+ case IP_VERSION(7, 0, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
@@ -1812,11 +1824,16 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
case IP_VERSION(13, 0, 4):
amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
@@ -2033,6 +2050,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
default:
@@ -2122,6 +2140,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
break;
+ case IP_VERSION(5, 0, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
@@ -2493,6 +2515,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
break;
case IP_VERSION(7, 11, 0):
+ case IP_VERSION(7, 11, 1):
adev->nbio.funcs = &nbio_v7_11_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
break;
@@ -2560,6 +2583,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 1, 0):
adev->hdp.funcs = &hdp_v6_0_funcs;
break;
+ case IP_VERSION(7, 0, 0):
+ adev->hdp.funcs = &hdp_v7_0_funcs;
+ break;
default:
break;
}
@@ -2624,6 +2650,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 8):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
adev->smuio.funcs = &smuio_v13_0_6_funcs;
break;
default:
@@ -2637,6 +2664,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 3):
adev->lsdma.funcs = &lsdma_v6_0_funcs;
break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ adev->lsdma.funcs = &lsdma_v7_0_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index decbbe3d4f06..055ba2ea4c12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -377,6 +377,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct amdgpu_vm_bo_base *bo_base;
int r;
+ /* FIXME: This should be after the "if", but needs a fix to make sure
+ * DMABuf imports are initialized in the right VM list.
+ */
+ amdgpu_vm_bo_invalidate(adev, bo, false);
if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 586f4d03039d..af7fae7907d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -367,7 +367,7 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
* Setting the value to 0 disables this functionality.
* Setting the value to -2 is auto enabled with power down when displays are attached.
*/
-MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = autowith displays)");
+MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = auto with displays)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
/**
@@ -594,7 +594,7 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
#ifdef CONFIG_DRM_AMDGPU_SI
#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_si_support = 0;
+int amdgpu_si_support;
MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
#else
int amdgpu_si_support = 1;
@@ -613,7 +613,7 @@ module_param_named(si_support, amdgpu_si_support, int, 0444);
#ifdef CONFIG_DRM_AMDGPU_CIK
#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_cik_support = 0;
+int amdgpu_cik_support;
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
#else
int amdgpu_cik_support = 1;
@@ -849,12 +849,13 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444);
* the ABM algorithm, with 1 being the least reduction and 4 being the most
* reduction.
*
- * Defaults to 0, or disabled. Userspace can still override this level later
- * after boot.
+ * Defaults to -1, or disabled. Userspace can only override this level after
+ * boot if it's set to auto.
*/
-uint amdgpu_dm_abm_level;
-MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
-module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
+int amdgpu_dm_abm_level = -1;
+MODULE_PARM_DESC(abmlevel,
+ "ABM level (0 = off, 1-4 = backlight reduction level, -1 auto (default))");
+module_param_named(abmlevel, amdgpu_dm_abm_level, int, 0444);
int amdgpu_backlight = -1;
MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 5706b282a0c7..c7df7fa3459f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -97,6 +97,10 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
stats.requested_visible_vram/1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
stats.requested_gtt/1024UL);
+ drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
+ drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
+ drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
+
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 49a5f1c73b3e..22aeee8adb71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -187,7 +187,34 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
else
++bo_va->ref_count;
amdgpu_bo_unreserve(abo);
- return 0;
+
+ /* Validate and add eviction fence to DMABuf imports with dynamic
+ * attachment in compute VMs. Re-validation will be done by
+ * amdgpu_vm_validate. Fences are on the reservation shared with the
+ * export, which is currently required to be validated and fenced
+ * already by amdgpu_amdkfd_gpuvm_restore_process_bos.
+ *
+ * Nested locking below for the case that a GEM object is opened in
+ * kfd_mem_export_dmabuf. Since the lock below is only taken for imports,
+ * but not for export, this is a different lock class that cannot lead to
+ * circular lock dependencies.
+ */
+ if (!vm->is_compute_context || !vm->process_info)
+ return 0;
+ if (!obj->import_attach ||
+ !dma_buf_is_dynamic(obj->import_attach->dmabuf))
+ return 0;
+ mutex_lock_nested(&vm->process_info->lock, 1);
+ if (!WARN_ON(!vm->process_info->eviction_fence)) {
+ r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT,
+ &vm->process_info->eviction_fence->base);
+ if (r)
+ dev_warn(adev->dev, "%d: validate_and_fence failed: %d\n",
+ vm->task_info.pid, r);
+ }
+ mutex_unlock(&vm->process_info->lock);
+
+ return r;
}
static void amdgpu_gem_object_close(struct drm_gem_object *obj,
@@ -682,10 +709,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
uint64_t vm_size;
int r = 0;
- if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
+ if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
dev_dbg(dev->dev,
"va_address 0x%llx is in reserved area 0x%llx\n",
- args->va_address, AMDGPU_VA_RESERVED_SIZE);
+ args->va_address, AMDGPU_VA_RESERVED_BOTTOM);
return -EINVAL;
}
@@ -701,7 +728,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->va_address &= AMDGPU_GMC_HOLE_MASK;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
- vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_TOP;
if (args->va_address + args->map_size > vm_size) {
dev_dbg(dev->dev,
"va_address 0x%llx is in top reserved area 0x%llx\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 6ddc8e3360e2..f04803a44b44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -329,7 +329,8 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->eop_gpu_addr = kiq->eop_gpu_addr;
ring->no_scheduler = true;
- sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue);
+ snprintf(ring->name, sizeof(ring->name), "kiq_%d.%d.%d.%d",
+ xcc_id, ring->me, ring->pipe, ring->queue);
r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
@@ -642,8 +643,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
- kiq->pmf->kiq_map_queues(kiq_ring,
- &adev->gfx.compute_ring[j]);
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.compute_ring[j]);
}
r = amdgpu_ring_test_helper(kiq_ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 55784a9f26c4..d4a848c51a83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -52,7 +52,7 @@ int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
struct amdgpu_bo_param bp;
u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
- uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
+ uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) - 1) >> pde0_page_shift;
memset(&bp, 0, sizeof(bp));
bp.size = PAGE_ALIGN((npdes + 1) * 8);
@@ -746,6 +746,59 @@ error_unlock_reset:
return r;
}
+void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask,
+ uint32_t xcc_inst)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
+ struct amdgpu_ring *ring = &kiq->ring;
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+
+ if (adev->mes.ring.sched.ready) {
+ amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
+ ref, mask);
+ return;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+ ref, mask);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for IRQ context */
+ if (r < 1 && in_interrupt())
+ goto failed_kiq;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq;
+
+ return;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq:
+ dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
/**
* amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index e699d1ca8deb..17f40ea1104b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -417,6 +417,10 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst);
+void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask,
+ uint32_t xcc_inst);
extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index ddd0891da116..3d7fcdeaf8cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -62,9 +62,8 @@ int amdgpu_pasid_alloc(unsigned int bits)
int pasid = -EINVAL;
for (bits = min(bits, 31U); bits > 0; bits--) {
- pasid = ida_simple_get(&amdgpu_pasid_ida,
- 1U << (bits - 1), 1U << bits,
- GFP_KERNEL);
+ pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1),
+ (1U << bits) - 1, GFP_KERNEL);
if (pasid != -ENOSPC)
break;
}
@@ -82,7 +81,7 @@ int amdgpu_pasid_alloc(unsigned int bits)
void amdgpu_pasid_free(u32 pasid)
{
trace_amdgpu_pasid_freed(pasid);
- ida_simple_remove(&amdgpu_pasid_ida, pasid);
+ ida_free(&amdgpu_pasid_ida, pasid);
}
static void amdgpu_pasid_free_cb(struct dma_fence *fence,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 2ff2897fd1db..6df99cb00d9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -36,10 +36,35 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
{
+ int i, r;
+
INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler);
mutex_init(&adev->jpeg.jpeg_pg_lock);
atomic_set(&adev->jpeg.total_submission_cnt, 0);
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG))
+ adev->jpeg.indirect_sram = true;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (adev->jpeg.indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->jpeg.inst[i].dpg_sram_bo,
+ &adev->jpeg.inst[i].dpg_sram_gpu_addr,
+ &adev->jpeg.inst[i].dpg_sram_cpu_addr);
+ if (r) {
+ dev_err(adev->dev,
+ "JPEG %d (%d) failed to allocate DPG bo\n", i, r);
+ return r;
+ }
+ }
+ }
+
return 0;
}
@@ -51,6 +76,11 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
if (adev->jpeg.harvest_config & (1 << i))
continue;
+ amdgpu_bo_free_kernel(
+ &adev->jpeg.inst[i].dpg_sram_bo,
+ &adev->jpeg.inst[i].dpg_sram_gpu_addr,
+ (void **)&adev->jpeg.inst[i].dpg_sram_cpu_addr);
+
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]);
}
@@ -210,12 +240,15 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else {
r = 0;
}
+
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
+ if (amdgpu_emu_mode == 1)
+ udelay(10);
}
if (i >= adev->usec_timeout)
@@ -296,3 +329,16 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
return 0;
}
+
+int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id)
+{
+ struct amdgpu_firmware_info ucode = {
+ .ucode_id = AMDGPU_UCODE_ID_JPEG_RAM,
+ .mc_addr = adev->jpeg.inst[inst_idx].dpg_sram_gpu_addr,
+ .ucode_size = ((uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_curr_addr -
+ (uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr),
+ };
+
+ return psp_execute_ip_fw_load(&adev->psp, &ucode);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index ffe47e9f5bf2..aea31d61d991 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -32,6 +32,34 @@
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
+#define WREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15( \
+ JPEG, GET_INST(JPEG, inst_idx), \
+ mmUVD_DPG_LMA_CTL, \
+ (UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \
+ indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ } else { \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
+ offset; \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
+ value; \
+ } \
+ } while (0)
+
+#define RREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, mask_en) \
+ ({ \
+ WREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_CTL, \
+ (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ RREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_DATA); \
+ })
+
struct amdgpu_jpeg_reg{
unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
};
@@ -41,6 +69,11 @@ struct amdgpu_jpeg_inst {
struct amdgpu_irq_src irq;
struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_jpeg_reg external;
+ struct amdgpu_bo *dpg_sram_bo;
+ struct dpg_pause_state pause_state;
+ void *dpg_sram_cpu_addr;
+ uint64_t dpg_sram_gpu_addr;
+ uint32_t *dpg_sram_curr_addr;
uint8_t aid_id;
};
@@ -63,6 +96,7 @@ struct amdgpu_jpeg {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
+ bool indirect_sram;
};
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
@@ -82,5 +116,7 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block);
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bf4f48fe438d..a2df3025a754 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -894,14 +894,14 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
- vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_TOP;
/* Older VCE FW versions are buggy and can handle only 40bits */
if (adev->vce.fw_version &&
adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
vm_size = min(vm_size, 1ULL << 40);
- dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
+ dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_BOTTOM;
dev_info->virtual_address_max =
min(vm_size, AMDGPU_GMC_HOLE_START);
@@ -1114,6 +1114,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
ui32 >>= 8;
break;
+ case AMDGPU_INFO_SENSOR_GPU_INPUT_POWER:
+ /* get input GPU power */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 >>= 8;
+ break;
case AMDGPU_INFO_SENSOR_VDDNB:
/* get VDDNB in millivolts */
if (amdgpu_dpm_read_sensor(adev,
@@ -1370,6 +1379,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto error_vm;
}
+ r = amdgpu_seq64_map(adev, &fpriv->vm, &fpriv->seq64_va);
+ if (r)
+ goto error_vm;
+
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 59fafb8392e0..24ad4b97177b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -27,6 +27,16 @@
#include "umc/umc_6_7_0_offset.h"
#include "umc/umc_6_7_0_sh_mask.h"
+static bool amdgpu_mca_is_deferred_error(struct amdgpu_device *adev,
+ uint64_t mc_status)
+{
+ if (adev->umc.ras->check_ecc_err_status)
+ return adev->umc.ras->check_ecc_err_status(adev,
+ AMDGPU_MCA_ERROR_TYPE_DE, &mc_status);
+
+ return false;
+}
+
void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count)
@@ -202,16 +212,16 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry)
{
- dev_info(adev->dev, "[Hardware error] Accelerator Check Architecture events logged\n");
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].STATUS=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_STATUS]);
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].ADDR=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_ADDR]);
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].MISC0=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_MISC0]);
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].IPID=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_IPID]);
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].SYND=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_SYND]);
}
@@ -256,9 +266,14 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
if (type == AMDGPU_MCA_ERROR_TYPE_UE)
amdgpu_ras_error_statistic_ue_count(err_data,
&mcm_info, &err_addr, (uint64_t)count);
- else
- amdgpu_ras_error_statistic_ce_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ else {
+ if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
+ amdgpu_ras_error_statistic_de_count(err_data,
+ &mcm_info, &err_addr, (uint64_t)count);
+ else
+ amdgpu_ras_error_statistic_ce_count(err_data,
+ &mcm_info, &err_addr, (uint64_t)count);
+ }
}
out_mca_release:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index b399f1b62887..b964110ed1e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -65,6 +65,7 @@ enum amdgpu_mca_ip {
enum amdgpu_mca_error_type {
AMDGPU_MCA_ERROR_TYPE_UE = 0,
AMDGPU_MCA_ERROR_TYPE_CE,
+ AMDGPU_MCA_ERROR_TYPE_DE,
};
struct amdgpu_mca_ras_block {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index da48b6da0107..a98e03e0a51f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1398,7 +1398,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
goto error_fini;
}
- ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
+ ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
if (r) {
DRM_ERROR("failed to map ctx meta data\n");
@@ -1565,9 +1565,9 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
-
- debugfs_create_file("amdgpu_mes_event_log", 0444, root,
- adev, &amdgpu_debugfs_mes_event_log_fops);
+ if (adev->enable_mes)
+ debugfs_create_file("amdgpu_mes_event_log", 0444, root,
+ adev, &amdgpu_debugfs_mes_event_log_fops);
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 425cebcc5cbf..010b0cb7693c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -220,9 +220,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
placement->num_placement = c;
placement->placement = places;
-
- placement->num_busy_placement = c;
- placement->busy_placement = places;
}
/**
@@ -1276,25 +1273,36 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
uint64_t size = amdgpu_bo_size(bo);
+ struct drm_gem_object *obj;
unsigned int domain;
+ bool shared;
/* Abort if the BO doesn't currently have a backing store */
if (!bo->tbo.resource)
return;
+ obj = &bo->tbo.base;
+ shared = drm_gem_object_is_shared_for_memory_stats(obj);
+
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
if (amdgpu_bo_in_cpu_visible_vram(bo))
stats->visible_vram += size;
+ if (shared)
+ stats->vram_shared += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
stats->gtt += size;
+ if (shared)
+ stats->gtt_shared += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
stats->cpu += size;
+ if (shared)
+ stats->cpu_shared += size;
break;
}
@@ -1397,8 +1405,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
AMDGPU_GEM_DOMAIN_GTT);
/* Avoid costly evictions; only set GTT as a busy placement */
- abo->placement.num_busy_placement = 1;
- abo->placement.busy_placement = &abo->placements[1];
+ abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index a3ea8a82db23..be679c42b0b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -138,12 +138,18 @@ struct amdgpu_bo_vm {
struct amdgpu_mem_stats {
/* current VRAM usage, includes visible VRAM */
uint64_t vram;
+ /* current shared VRAM usage, includes visible VRAM */
+ uint64_t vram_shared;
/* current visible VRAM usage */
uint64_t visible_vram;
/* current GTT usage */
uint64_t gtt;
+ /* current shared GTT usage */
+ uint64_t gtt_shared;
/* current system memory usage */
uint64_t cpu;
+ /* current shared system memory usage */
+ uint64_t cpu_shared;
/* sum of evicted buffers, includes visible VRAM */
uint64_t evicted_vram;
/* sum of evicted buffers due to CPU access */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 0328616473f8..3c2b1413058b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -38,6 +38,7 @@
#include "psp_v12_0.h"
#include "psp_v13_0.h"
#include "psp_v13_0_4.h"
+#include "psp_v14_0.h"
#include "amdgpu_ras.h"
#include "amdgpu_securedisplay.h"
@@ -162,20 +163,26 @@ static int psp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
+ psp->autoload_supported = true;
+ psp->boot_time_tmr = true;
+
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
psp_v3_1_set_psp_funcs(psp);
psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
psp_v10_0_set_psp_funcs(psp);
psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 4):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 7):
@@ -188,15 +195,20 @@ static int psp_early_init(void *handle)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
psp_v11_0_set_psp_funcs(psp);
- psp->autoload_supported = true;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 3):
case IP_VERSION(12, 0, 1):
psp_v12_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(13, 0, 2):
+ psp->boot_time_tmr = false;
+ fallthrough;
case IP_VERSION(13, 0, 6):
psp_v13_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
@@ -204,25 +216,31 @@ static int psp_early_init(void *handle)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
psp_v13_0_set_psp_funcs(psp);
- psp->autoload_supported = true;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 8):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
psp_v11_0_8_set_psp_funcs(psp);
- psp->autoload_supported = false;
}
+ psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
psp_v13_0_set_psp_funcs(psp);
- psp->autoload_supported = true;
adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(13, 0, 4):
psp_v13_0_4_set_psp_funcs(psp);
- psp->autoload_supported = true;
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ psp_v14_0_set_psp_funcs(psp);
break;
default:
return -EINVAL;
@@ -230,6 +248,8 @@ static int psp_early_init(void *handle)
psp->adev = adev;
+ adev->psp_timeout = 20000;
+
psp_check_pmfw_centralized_cstate_management(psp);
if (amdgpu_sriov_vf(adev))
@@ -291,21 +311,22 @@ static int psp_memory_training_init(struct psp_context *psp)
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
- DRM_DEBUG("memory training is not supported!\n");
+ dev_dbg(psp->adev->dev, "memory training is not supported!\n");
return 0;
}
ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
if (ctx->sys_cache == NULL) {
- DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
+ dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
ret = -ENOMEM;
goto Err_out;
}
- DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
- ctx->train_data_size,
- ctx->p2c_train_data_offset,
- ctx->c2p_train_data_offset);
+ dev_dbg(psp->adev->dev,
+ "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
return 0;
@@ -407,7 +428,7 @@ static int psp_sw_init(void *handle)
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd) {
- DRM_ERROR("Failed to allocate memory to command buffer!\n");
+ dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
ret = -ENOMEM;
}
@@ -454,13 +475,13 @@ static int psp_sw_init(void *handle)
if (mem_training_ctx->enable_mem_training) {
ret = psp_memory_training_init(psp);
if (ret) {
- DRM_ERROR("Failed to initialize memory training!\n");
+ dev_err(adev->dev, "Failed to initialize memory training!\n");
return ret;
}
ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
if (ret) {
- DRM_ERROR("Failed to process memory training!\n");
+ dev_err(adev->dev, "Failed to process memory training!\n");
return ret;
}
}
@@ -626,7 +647,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
{
int ret;
int index;
- int timeout = 20000;
+ int timeout = psp->adev->psp_timeout;
bool ras_intr = false;
bool skip_unsupport = false;
@@ -675,9 +696,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
*/
if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
- DRM_WARN("failed to load ucode %s(0x%X) ",
- amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
- DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ dev_warn(psp->adev->dev,
+ "failed to load ucode %s(0x%X) ",
+ amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
+ dev_warn(psp->adev->dev,
+ "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
/* If any firmware (including CAP) load fails under SRIOV, it should
@@ -771,16 +794,6 @@ static int psp_load_toc(struct psp_context *psp,
return ret;
}
-static bool psp_boottime_tmr(struct psp_context *psp)
-{
- switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- return true;
- default:
- return false;
- }
-}
-
/* Set up Trusted Memory Region */
static int psp_tmr_init(struct psp_context *psp)
{
@@ -807,12 +820,12 @@ static int psp_tmr_init(struct psp_context *psp)
psp->fw_pri_buf) {
ret = psp_load_toc(psp, &tmr_size);
if (ret) {
- DRM_ERROR("Failed to load toc\n");
+ dev_err(psp->adev->dev, "Failed to load toc\n");
return ret;
}
}
- if (!psp->tmr_bo) {
+ if (!psp->tmr_bo && !psp->boot_time_tmr) {
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
@@ -855,7 +868,7 @@ static int psp_tmr_load(struct psp_context *psp)
psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
if (psp->tmr_bo)
- DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
+ dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
@@ -1113,7 +1126,7 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
psp_prep_reg_prog_cmd_buf(cmd, reg, value);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (ret)
- DRM_ERROR("PSP failed to program reg id %d", reg);
+ dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
release_psp_cmd_buf(psp);
@@ -1526,22 +1539,22 @@ static void psp_ras_ta_check_status(struct psp_context *psp)
switch (ras_cmd->ras_status) {
case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
dev_warn(psp->adev->dev,
- "RAS WARNING: cmd failed due to unsupported ip\n");
+ "RAS WARNING: cmd failed due to unsupported ip\n");
break;
case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
dev_warn(psp->adev->dev,
- "RAS WARNING: cmd failed due to unsupported error injection\n");
+ "RAS WARNING: cmd failed due to unsupported error injection\n");
break;
case TA_RAS_STATUS__SUCCESS:
break;
case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
dev_warn(psp->adev->dev,
- "RAS WARNING: Inject error to critical region is not allowed\n");
+ "RAS WARNING: Inject error to critical region is not allowed\n");
break;
default:
dev_warn(psp->adev->dev,
- "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
break;
}
}
@@ -1565,7 +1578,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return ret;
if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
- DRM_WARN("RAS: Unsupported Interface");
+ dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
return -EINVAL;
}
@@ -1715,7 +1728,7 @@ int psp_ras_initialize(struct psp_context *psp)
psp->ras_context.context.initialized = true;
else {
if (ras_cmd->ras_status)
- dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
+ dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
/* fail to load RAS TA */
psp->ras_context.context.initialized = false;
@@ -1779,6 +1792,31 @@ int psp_ras_trigger_error(struct psp_context *psp,
return 0;
}
+
+int psp_ras_query_address(struct psp_context *psp,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras_context.context.initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
+ ras_cmd->ras_in_message.address = *addr_in;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
+ return -EINVAL;
+
+ *addr_out = ras_cmd->ras_out_message.address;
+
+ return 0;
+}
// ras end
// HDCP start
@@ -2125,19 +2163,14 @@ int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
return ret;
}
-int amdgpu_psp_query_boot_status(struct amdgpu_device *adev)
+bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
{
- struct psp_context *psp = &adev->psp;
- int ret = 0;
-
- if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
- return 0;
-
if (psp->funcs &&
- psp->funcs->query_boot_status)
- ret = psp->funcs->query_boot_status(psp);
-
- return ret;
+ psp->funcs->get_ras_capability) {
+ return psp->funcs->get_ras_capability(psp);
+ } else {
+ return false;
+ }
}
static int psp_hw_start(struct psp_context *psp)
@@ -2150,7 +2183,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_kdb != NULL)) {
ret = psp_bootloader_load_kdb(psp);
if (ret) {
- DRM_ERROR("PSP load kdb failed!\n");
+ dev_err(adev->dev, "PSP load kdb failed!\n");
return ret;
}
}
@@ -2159,7 +2192,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_spl != NULL)) {
ret = psp_bootloader_load_spl(psp);
if (ret) {
- DRM_ERROR("PSP load spl failed!\n");
+ dev_err(adev->dev, "PSP load spl failed!\n");
return ret;
}
}
@@ -2168,7 +2201,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_sysdrv != NULL)) {
ret = psp_bootloader_load_sysdrv(psp);
if (ret) {
- DRM_ERROR("PSP load sys drv failed!\n");
+ dev_err(adev->dev, "PSP load sys drv failed!\n");
return ret;
}
}
@@ -2177,7 +2210,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_soc_drv != NULL)) {
ret = psp_bootloader_load_soc_drv(psp);
if (ret) {
- DRM_ERROR("PSP load soc drv failed!\n");
+ dev_err(adev->dev, "PSP load soc drv failed!\n");
return ret;
}
}
@@ -2186,7 +2219,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_intf_drv != NULL)) {
ret = psp_bootloader_load_intf_drv(psp);
if (ret) {
- DRM_ERROR("PSP load intf drv failed!\n");
+ dev_err(adev->dev, "PSP load intf drv failed!\n");
return ret;
}
}
@@ -2195,7 +2228,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_dbg_drv != NULL)) {
ret = psp_bootloader_load_dbg_drv(psp);
if (ret) {
- DRM_ERROR("PSP load dbg drv failed!\n");
+ dev_err(adev->dev, "PSP load dbg drv failed!\n");
return ret;
}
}
@@ -2204,7 +2237,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_ras_drv != NULL)) {
ret = psp_bootloader_load_ras_drv(psp);
if (ret) {
- DRM_ERROR("PSP load ras_drv failed!\n");
+ dev_err(adev->dev, "PSP load ras_drv failed!\n");
return ret;
}
}
@@ -2213,7 +2246,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
if (ret) {
- DRM_ERROR("PSP load sos failed!\n");
+ dev_err(adev->dev, "PSP load sos failed!\n");
return ret;
}
}
@@ -2221,17 +2254,17 @@ static int psp_hw_start(struct psp_context *psp)
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret) {
- DRM_ERROR("PSP create ring failed!\n");
+ dev_err(adev->dev, "PSP create ring failed!\n");
return ret;
}
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
- if (!psp_boottime_tmr(psp)) {
+ if (!psp->boot_time_tmr || psp->autoload_supported) {
ret = psp_tmr_init(psp);
if (ret) {
- DRM_ERROR("PSP tmr init failed!\n");
+ dev_err(adev->dev, "PSP tmr init failed!\n");
return ret;
}
}
@@ -2248,10 +2281,12 @@ skip_pin_bo:
return ret;
}
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
- return ret;
+ if (!psp->boot_time_tmr || !psp->autoload_supported) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load tmr failed!\n");
+ return ret;
+ }
}
return 0;
@@ -2462,6 +2497,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_P2S_TABLE:
*type = GFX_FW_TYPE_P2S_TABLE;
break;
+ case AMDGPU_UCODE_ID_JPEG_RAM:
+ *type = GFX_FW_TYPE_JPEG_RAM;
+ break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
@@ -2518,7 +2556,8 @@ static void psp_print_fw_hdr(struct psp_context *psp,
}
}
-static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
+static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd)
{
int ret;
@@ -2531,7 +2570,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
if (ret)
- DRM_ERROR("Unknown firmware type\n");
+ dev_err(psp->adev->dev, "Unknown firmware type\n");
return ret;
}
@@ -2542,7 +2581,7 @@ int psp_execute_ip_fw_load(struct psp_context *psp,
int ret = 0;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
- ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
+ ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
if (!ret) {
ret = psp_cmd_submit_buf(psp, ucode, cmd,
psp->fence_buf_mc_addr);
@@ -2601,13 +2640,13 @@ static int psp_load_smu_fw(struct psp_context *psp)
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret)
- DRM_WARN("Failed to set MP1 state prepare for reload\n");
+ dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
}
ret = psp_execute_ip_fw_load(psp, ucode);
if (ret)
- DRM_ERROR("PSP load smu failed!\n");
+ dev_err(adev->dev, "PSP load smu failed!\n");
return ret;
}
@@ -2712,7 +2751,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload_start(psp);
if (ret) {
- DRM_ERROR("Failed to start rlc autoload\n");
+ dev_err(adev->dev, "Failed to start rlc autoload\n");
return ret;
}
}
@@ -2734,7 +2773,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
if (ret) {
- DRM_ERROR("PSP ring init failed!\n");
+ dev_err(adev->dev, "PSP ring init failed!\n");
goto failed;
}
}
@@ -2749,13 +2788,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
ret = psp_asd_initialize(psp);
if (ret) {
- DRM_ERROR("PSP load asd failed!\n");
+ dev_err(adev->dev, "PSP load asd failed!\n");
goto failed1;
}
ret = psp_rl_load(adev);
if (ret) {
- DRM_ERROR("PSP load RL failed!\n");
+ dev_err(adev->dev, "PSP load RL failed!\n");
goto failed1;
}
@@ -2775,7 +2814,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
ret = psp_ras_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
- "RAS: Failed to initialize RAS\n");
+ "RAS: Failed to initialize RAS\n");
ret = psp_hdcp_initialize(psp);
if (ret)
@@ -2828,7 +2867,7 @@ static int psp_hw_init(void *handle)
ret = psp_load_fw(adev);
if (ret) {
- DRM_ERROR("PSP firmware loading failed\n");
+ dev_err(adev->dev, "PSP firmware loading failed\n");
goto failed;
}
@@ -2875,7 +2914,7 @@ static int psp_suspend(void *handle)
psp->xgmi_context.context.initialized) {
ret = psp_xgmi_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate xgmi ta\n");
+ dev_err(adev->dev, "Failed to terminate xgmi ta\n");
goto out;
}
}
@@ -2883,46 +2922,46 @@ static int psp_suspend(void *handle)
if (psp->ta_fw) {
ret = psp_ras_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate ras ta\n");
+ dev_err(adev->dev, "Failed to terminate ras ta\n");
goto out;
}
ret = psp_hdcp_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate hdcp ta\n");
+ dev_err(adev->dev, "Failed to terminate hdcp ta\n");
goto out;
}
ret = psp_dtm_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate dtm ta\n");
+ dev_err(adev->dev, "Failed to terminate dtm ta\n");
goto out;
}
ret = psp_rap_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate rap ta\n");
+ dev_err(adev->dev, "Failed to terminate rap ta\n");
goto out;
}
ret = psp_securedisplay_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate securedisplay ta\n");
+ dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
goto out;
}
}
ret = psp_asd_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate asd\n");
+ dev_err(adev->dev, "Failed to terminate asd\n");
goto out;
}
ret = psp_tmr_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate tmr\n");
+ dev_err(adev->dev, "Failed to terminate tmr\n");
goto out;
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret)
- DRM_ERROR("PSP ring stop failed\n");
+ dev_err(adev->dev, "PSP ring stop failed\n");
out:
return ret;
@@ -2934,12 +2973,12 @@ static int psp_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- DRM_INFO("PSP is resuming...\n");
+ dev_info(adev->dev, "PSP is resuming...\n");
if (psp->mem_train_ctx.enable_mem_training) {
ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
if (ret) {
- DRM_ERROR("Failed to process memory training!\n");
+ dev_err(adev->dev, "Failed to process memory training!\n");
return ret;
}
}
@@ -2956,7 +2995,7 @@ static int psp_resume(void *handle)
ret = psp_asd_initialize(psp);
if (ret) {
- DRM_ERROR("PSP load asd failed!\n");
+ dev_err(adev->dev, "PSP load asd failed!\n");
goto failed;
}
@@ -2980,7 +3019,7 @@ static int psp_resume(void *handle)
ret = psp_ras_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
- "RAS: Failed to initialize RAS\n");
+ "RAS: Failed to initialize RAS\n");
ret = psp_hdcp_initialize(psp);
if (ret)
@@ -3008,7 +3047,7 @@ static int psp_resume(void *handle)
return 0;
failed:
- DRM_ERROR("PSP resume failed\n");
+ dev_err(adev->dev, "PSP resume failed\n");
mutex_unlock(&adev->firmware.mutex);
return ret;
}
@@ -3069,9 +3108,11 @@ int psp_ring_cmd_submit(struct psp_context *psp,
write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
/* Check invalid write_frame ptr address */
if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
- ring_buffer_start, ring_buffer_end, write_frame);
- DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ dev_err(adev->dev,
+ "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ dev_err(adev->dev,
+ "write_frame is pointing to address out of bounds\n");
return -EINVAL;
}
@@ -3597,7 +3638,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
int ret;
if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
- DRM_INFO("PSP block is not ready yet.");
+ dev_info(adev->dev, "PSP block is not ready yet\n.");
return -EBUSY;
}
@@ -3606,7 +3647,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
mutex_unlock(&adev->psp.mutex);
if (ret) {
- DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
+ dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
return ret;
}
@@ -3628,7 +3669,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
void *fw_pri_cpu_addr;
if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
- DRM_INFO("PSP block is not ready yet.");
+ dev_err(adev->dev, "PSP block is not ready yet.");
return -EBUSY;
}
@@ -3661,7 +3702,7 @@ rel_buf:
release_firmware(usbc_pd_fw);
fail:
if (ret) {
- DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
+ dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
count = ret;
}
@@ -3708,7 +3749,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
/* Safeguard against memory drain */
if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
- dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B);
+ dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
kvfree(adev->psp.vbflash_tmp_buf);
adev->psp.vbflash_tmp_buf = NULL;
adev->psp.vbflash_image_size = 0;
@@ -3727,7 +3768,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
adev->psp.vbflash_image_size += count;
mutex_unlock(&adev->psp.mutex);
- dev_dbg(adev->dev, "IFWI staged for update");
+ dev_dbg(adev->dev, "IFWI staged for update\n");
return count;
}
@@ -3747,7 +3788,7 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
if (adev->psp.vbflash_image_size == 0)
return -EINVAL;
- dev_dbg(adev->dev, "PSP IFWI flash process initiated");
+ dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
AMDGPU_GPU_PAGE_SIZE,
@@ -3772,11 +3813,11 @@ rel_buf:
adev->psp.vbflash_image_size = 0;
if (ret) {
- dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
+ dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
return ret;
}
- dev_dbg(adev->dev, "PSP IFWI flash process done");
+ dev_dbg(adev->dev, "PSP IFWI flash process done\n");
return 0;
}
@@ -3930,3 +3971,11 @@ const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
.rev = 4,
.funcs = &psp_ip_funcs,
};
+
+const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 14,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index c4d9cbde55b9..ee16f134ae92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -134,7 +134,7 @@ struct psp_funcs {
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
- int (*query_boot_status)(struct psp_context *psp);
+ bool (*get_ras_capability)(struct psp_context *psp);
};
struct ta_funcs {
@@ -203,7 +203,7 @@ struct psp_ras_context {
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
/*Define the VRAM size that will be encroached by BIST training.*/
-#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
+#define BIST_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
enum psp_memory_training_init_flag {
PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
@@ -364,6 +364,8 @@ struct psp_context {
atomic_t fence_value;
/* flag to mark whether gfx fw autoload is supported or not */
bool autoload_supported;
+ /* flag to mark whether psp use runtime TMR or boottime TMR */
+ bool boot_time_tmr;
/* flag to mark whether df cstate management centralized to PMFW */
bool pmfw_centralized_cstate_management;
@@ -463,6 +465,7 @@ extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block;
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
+extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, bool check_changed);
@@ -502,6 +505,9 @@ int psp_ras_enable_features(struct psp_context *psp,
int psp_ras_trigger_error(struct psp_context *psp,
struct ta_ras_trigger_error_input *info, uint32_t instance_mask);
int psp_ras_terminate(struct psp_context *psp);
+int psp_ras_query_address(struct psp_context *psp,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out);
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
@@ -538,7 +544,5 @@ int psp_spatial_partition(struct psp_context *psp, int mode);
int is_psp_fw_valid(struct psp_bin_desc bin);
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
-
-int amdgpu_psp_query_boot_status(struct amdgpu_device *adev);
-
+bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 31823a30dea2..46f3d1013e8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -39,6 +39,7 @@
#include "nbio_v7_9.h"
#include "atom.h"
#include "amdgpu_reset.h"
+#include "amdgpu_psp.h"
#ifdef CONFIG_X86_MCE_AMD
#include <asm/mce.h>
@@ -73,6 +74,8 @@ const char *ras_block_string[] = {
"mca",
"vcn",
"jpeg",
+ "ih",
+ "mpio",
};
const char *ras_mca_block_string[] = {
@@ -94,7 +97,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
if (!ras_block)
return "NULL";
- if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
+ if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
+ ras_block->block >= ARRAY_SIZE(ras_block_string))
return "OUT OF RANGE";
if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
@@ -116,6 +120,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -628,8 +634,12 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
}
- return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
- "ce", info.ce_count);
+ if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count, "de", info.de_count);
+ else
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count);
}
/* obj begin */
@@ -1036,7 +1046,8 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
struct ras_manager *ras_mgr,
struct ras_err_data *err_data,
const char *blk_name,
- bool is_ue)
+ bool is_ue,
+ bool is_de)
{
struct amdgpu_smuio_mcm_config_info *mcm_info;
struct ras_err_node *err_node;
@@ -1065,25 +1076,50 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
}
} else {
- for_each_ras_error(err_node, err_data) {
- err_info = &err_node->err_info;
- mcm_info = &err_info->mcm_info;
- if (err_info->ce_count) {
+ if (is_de) {
+ for_each_ras_error(err_node, err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ if (err_info->de_count) {
+ dev_info(adev->dev, "socket: %d, die: %d, "
+ "%lld new deferred hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->de_count,
+ blk_name);
+ }
+ }
+
+ for_each_ras_error(err_node, &ras_mgr->err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new correctable hardware errors detected in %s block\n",
- mcm_info->socket_id,
- mcm_info->die_id,
- err_info->ce_count,
- blk_name);
+ "%lld deferred hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->de_count, blk_name);
+ }
+ } else {
+ for_each_ras_error(err_node, err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ if (err_info->ce_count) {
+ dev_info(adev->dev, "socket: %d, die: %d, "
+ "%lld new correctable hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->ce_count,
+ blk_name);
+ }
}
- }
- for_each_ras_error(err_node, &ras_mgr->err_data) {
- err_info = &err_node->err_info;
- mcm_info = &err_info->mcm_info;
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld correctable hardware errors detected in total in %s block\n",
- mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
+ for_each_ras_error(err_node, &ras_mgr->err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ dev_info(adev->dev, "socket: %d, die: %d, "
+ "%lld correctable hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->ce_count, blk_name);
+ }
}
}
}
@@ -1102,7 +1138,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
if (err_data->ce_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false);
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ blk_name, false, false);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
@@ -1124,7 +1161,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
if (err_data->ue_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true);
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ blk_name, true, false);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
@@ -1144,6 +1182,28 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
}
}
+ if (err_data->de_count) {
+ if (err_data_has_source_info(err_data)) {
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ blk_name, false, true);
+ } else if (!adev->aid_mask &&
+ adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id &&
+ adev->smuio.funcs->get_die_id) {
+ dev_info(adev->dev, "socket: %d, die: %d "
+ "%ld deferred hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.de_count,
+ blk_name);
+ } else {
+ dev_info(adev->dev, "%ld deferred hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.de_count,
+ blk_name);
+ }
+ }
}
static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
@@ -1154,7 +1214,8 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
if (err_data_has_source_info(err_data)) {
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
-
+ amdgpu_ras_error_statistic_de_count(&obj->err_data,
+ &err_info->mcm_info, NULL, err_info->de_count);
amdgpu_ras_error_statistic_ce_count(&obj->err_data,
&err_info->mcm_info, NULL, err_info->ce_count);
amdgpu_ras_error_statistic_ue_count(&obj->err_data,
@@ -1164,9 +1225,72 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
/* for legacy asic path which doesn't has error source info */
obj->err_data.ue_count += err_data->ue_count;
obj->err_data.ce_count += err_data->ce_count;
+ obj->err_data.de_count += err_data->de_count;
}
}
+static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
+{
+ struct ras_common_if head;
+
+ memset(&head, 0, sizeof(head));
+ head.block = blk;
+
+ return amdgpu_ras_find_obj(adev, &head);
+}
+
+int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ const struct aca_info *aca_info, void *data)
+{
+ struct ras_manager *obj;
+
+ obj = get_ras_manager(adev, blk);
+ if (!obj)
+ return -EINVAL;
+
+ return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
+}
+
+int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
+{
+ struct ras_manager *obj;
+
+ obj = get_ras_manager(adev, blk);
+ if (!obj)
+ return -EINVAL;
+
+ amdgpu_aca_remove_handle(&obj->aca_handle);
+
+ return 0;
+}
+
+static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ enum aca_error_type type, struct ras_err_data *err_data)
+{
+ struct ras_manager *obj;
+
+ obj = get_ras_manager(adev, blk);
+ if (!obj)
+ return -EINVAL;
+
+ return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data);
+}
+
+ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
+ struct aca_handle *handle, char *buf, void *data)
+{
+ struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (amdgpu_ras_query_error_status(obj->adev, &info))
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count);
+}
+
static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
struct ras_query_if *info,
struct ras_err_data *err_data,
@@ -1174,6 +1298,7 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
{
enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
struct amdgpu_ras_block_object *block_obj = NULL;
+ int ret;
if (blk == AMDGPU_RAS_BLOCK_COUNT)
return -EINVAL;
@@ -1203,9 +1328,19 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
}
}
} else {
- /* FIXME: add code to check return value later */
- amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
- amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
+ if (amdgpu_aca_is_enabled(adev)) {
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data);
+ if (ret)
+ return ret;
+ } else {
+ /* FIXME: add code to check return value later */
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
+ }
}
return 0;
@@ -1239,6 +1374,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
+ info->de_count = obj->err_data.de_count;
amdgpu_ras_error_generate_report(adev, info, &err_data);
@@ -1254,6 +1390,7 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+ const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
struct amdgpu_hive_info *hive;
int hive_ras_recovery = 0;
@@ -1264,7 +1401,7 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
}
if (!amdgpu_ras_is_supported(adev, block) ||
- !amdgpu_ras_get_mca_debug_mode(adev))
+ !amdgpu_ras_get_aca_debug_mode(adev))
return -EOPNOTSUPP;
hive = amdgpu_get_xgmi_hive(adev);
@@ -1276,7 +1413,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
hive_ras_recovery) &&
- mca_funcs && mca_funcs->mca_set_debug_mode)
+ ((smu_funcs && smu_funcs->set_debug_mode) ||
+ (mca_funcs && mca_funcs->mca_set_debug_mode)))
return -EOPNOTSUPP;
if (block_obj->hw_ops->reset_ras_error_count)
@@ -1772,7 +1910,10 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
}
}
- amdgpu_mca_smu_debugfs_init(adev, dir);
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_aca_smu_debugfs_init(adev, dir);
+ else
+ amdgpu_mca_smu_debugfs_init(adev, dir);
}
/* debugfs end */
@@ -1900,7 +2041,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
}
}
- amdgpu_umc_poison_handler(adev, false);
+ amdgpu_umc_poison_handler(adev, obj->head.block, false);
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
@@ -1951,6 +2092,7 @@ static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
*/
obj->err_data.ue_count += err_data.ue_count;
obj->err_data.ce_count += err_data.ce_count;
+ obj->err_data.de_count += err_data.de_count;
}
amdgpu_ras_error_data_fini(&err_data);
@@ -2520,6 +2662,32 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
}
}
+static int amdgpu_ras_page_retirement_thread(void *param)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)param;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ while (!kthread_should_stop()) {
+
+ wait_event_interruptible(con->page_retirement_wq,
+ kthread_should_stop() ||
+ atomic_read(&con->page_retirement_req_cnt));
+
+ if (kthread_should_stop())
+ break;
+
+ dev_info(adev->dev, "Start processing page retirement. request:%d\n",
+ atomic_read(&con->page_retirement_req_cnt));
+
+ atomic_dec(&con->page_retirement_req_cnt);
+
+ amdgpu_umc_bad_page_polling_timeout(adev,
+ false, MAX_UMC_POISON_POLLING_TIME_ASYNC);
+ }
+
+ return 0;
+}
+
int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -2583,6 +2751,16 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
}
}
+ mutex_init(&con->page_retirement_lock);
+ init_waitqueue_head(&con->page_retirement_wq);
+ atomic_set(&con->page_retirement_req_cnt, 0);
+ con->page_retirement_thread =
+ kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
+ if (IS_ERR(con->page_retirement_thread)) {
+ con->page_retirement_thread = NULL;
+ dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
+ }
+
#ifdef CONFIG_X86_MCE_AMD
if ((adev->asic_type == CHIP_ALDEBARAN) &&
(adev->gmc.xgmi.connected_to_cpu))
@@ -2618,6 +2796,11 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
if (!data)
return 0;
+ if (con->page_retirement_thread)
+ kthread_stop(con->page_retirement_thread);
+
+ atomic_set(&con->page_retirement_req_cnt, 0);
+
cancel_work_sync(&con->recovery_work);
mutex_lock(&con->recovery_lock);
@@ -2679,6 +2862,87 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
}
+/* Query ras capablity via atomfirmware interface */
+static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
+{
+ /* mem_ecc cap */
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ dev_info(adev->dev, "MEM ECC is active.\n");
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else {
+ dev_info(adev->dev, "MEM ECC is not presented.\n");
+ }
+
+ /* sram_ecc cap */
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ dev_info(adev->dev, "SRAM ECC is active.\n");
+ if (!amdgpu_sriov_vf(adev))
+ adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ else
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
+ 1 << AMDGPU_RAS_BLOCK__SDMA |
+ 1 << AMDGPU_RAS_BLOCK__GFX);
+
+ /*
+ * VCN/JPEG RAS can be supported on both bare metal and
+ * SRIOV environment
+ */
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ else
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+
+ /*
+ * XGMI RAS is not supported if xgmi num physical nodes
+ * is zero
+ */
+ if (!adev->gmc.xgmi.num_physical_nodes)
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
+ } else {
+ dev_info(adev->dev, "SRAM ECC is not presented.\n");
+ }
+}
+
+/* Query poison mode from umc/df IP callbacks */
+static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ bool df_poison, umc_poison;
+
+ /* poison setting is useless on SRIOV guest */
+ if (amdgpu_sriov_vf(adev) || !con)
+ return;
+
+ /* Init poison supported flag, the default value is false */
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ adev->gmc.is_app_apu) {
+ /* enabled by default when GPU is connected to CPU */
+ con->poison_supported = true;
+ } else if (adev->df.funcs &&
+ adev->df.funcs->query_ras_poison_mode &&
+ adev->umc.ras &&
+ adev->umc.ras->query_ras_poison_mode) {
+ df_poison =
+ adev->df.funcs->query_ras_poison_mode(adev);
+ umc_poison =
+ adev->umc.ras->query_ras_poison_mode(adev);
+
+ /* Only poison is set in both DF and UMC, we can support it */
+ if (df_poison && umc_poison)
+ con->poison_supported = true;
+ else if (df_poison != umc_poison)
+ dev_warn(adev->dev,
+ "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
+ df_poison, umc_poison);
+ }
+}
+
/*
* check hardware's ras ability which will be saved in hw_supported.
* if hardware does not support ras, we can skip some ras initializtion and
@@ -2695,49 +2959,13 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (!amdgpu_ras_asic_supported(adev))
return;
- if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
- if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
- dev_info(adev->dev, "MEM ECC is active.\n");
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
- } else {
- dev_info(adev->dev, "MEM ECC is not presented.\n");
- }
-
- if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
- dev_info(adev->dev, "SRAM ECC is active.\n");
- if (!amdgpu_sriov_vf(adev))
- adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
- else
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
- 1 << AMDGPU_RAS_BLOCK__SDMA |
- 1 << AMDGPU_RAS_BLOCK__GFX);
-
- /* VCN/JPEG RAS can be supported on both bare metal and
- * SRIOV environment
- */
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
- IP_VERSION(2, 6, 0) ||
- amdgpu_ip_version(adev, VCN_HWIP, 0) ==
- IP_VERSION(4, 0, 0) ||
- amdgpu_ip_version(adev, VCN_HWIP, 0) ==
- IP_VERSION(4, 0, 3))
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- else
- adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
+ /* query ras capability from psp */
+ if (amdgpu_psp_get_ras_capability(&adev->psp))
+ goto init_ras_enabled_flag;
- /*
- * XGMI RAS is not supported if xgmi num physical nodes
- * is zero
- */
- if (!adev->gmc.xgmi.num_physical_nodes)
- adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
- } else {
- dev_info(adev->dev, "SRAM ECC is not presented.\n");
- }
+ /* query ras capablity from bios */
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
+ amdgpu_ras_query_ras_capablity_from_vbios(adev);
} else {
/* driver only manages a few IP blocks RAS feature
* when GPU is connected cpu through XGMI */
@@ -2746,13 +2974,21 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
1 << AMDGPU_RAS_BLOCK__MMHUB);
}
+ /* apply asic specific settings (vega20 only for now) */
amdgpu_ras_get_quirks(adev);
+ /* query poison mode from umc/df ip callback */
+ amdgpu_ras_query_poison_mode(adev);
+
+init_ras_enabled_flag:
/* hw_supported needs to be aligned with RAS block mask. */
adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
adev->ras_hw_enabled & amdgpu_ras_mask;
+
+ /* aca is disabled by default */
+ adev->aca.is_enabled = false;
}
static void amdgpu_ras_counte_dw(struct work_struct *work)
@@ -2780,39 +3016,6 @@ Out:
pm_runtime_put_autosuspend(dev->dev);
}
-static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
-{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool df_poison, umc_poison;
-
- /* poison setting is useless on SRIOV guest */
- if (amdgpu_sriov_vf(adev) || !con)
- return;
-
- /* Init poison supported flag, the default value is false */
- if (adev->gmc.xgmi.connected_to_cpu ||
- adev->gmc.is_app_apu) {
- /* enabled by default when GPU is connected to CPU */
- con->poison_supported = true;
- } else if (adev->df.funcs &&
- adev->df.funcs->query_ras_poison_mode &&
- adev->umc.ras &&
- adev->umc.ras->query_ras_poison_mode) {
- df_poison =
- adev->df.funcs->query_ras_poison_mode(adev);
- umc_poison =
- adev->umc.ras->query_ras_poison_mode(adev);
-
- /* Only poison is set in both DF and UMC, we can support it */
- if (df_poison && umc_poison)
- con->poison_supported = true;
- else if (df_poison != umc_poison)
- dev_warn(adev->dev,
- "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
- df_poison, umc_poison);
- }
-}
-
static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
{
return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
@@ -2917,12 +3120,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
- amdgpu_ras_query_poison_mode(adev);
-
/* Packed socket_id to ras feature mask bits[31:29] */
if (adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id)
- con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29);
+ con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
+ AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
/* Get RAS schema for particular SOC */
con->schema = amdgpu_get_ras_schema(adev);
@@ -3128,7 +3330,7 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev)
amdgpu_ras_disable_all_features(adev, 0);
/* Make sure all ras objects are disabled. */
- if (con->features)
+ if (AMDGPU_RAS_GET_FEATURES(con->features))
amdgpu_ras_disable_all_features(adev, 1);
}
@@ -3142,15 +3344,29 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
- amdgpu_ras_set_mca_debug_mode(adev, false);
+ if (amdgpu_aca_is_enabled(adev)) {
+ if (amdgpu_in_reset(adev))
+ r = amdgpu_aca_reset(adev);
+ else
+ r = amdgpu_aca_init(adev);
+ if (r)
+ return r;
+
+ amdgpu_ras_set_aca_debug_mode(adev, false);
+ } else {
+ amdgpu_ras_set_mca_debug_mode(adev, false);
+ }
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
- if (!node->ras_obj) {
+ obj = node->ras_obj;
+ if (!obj) {
dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
continue;
}
- obj = node->ras_obj;
+ if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
+ continue;
+
if (obj->ras_late_init) {
r = obj->ras_late_init(adev, &obj->ras_comm);
if (r) {
@@ -3175,7 +3391,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
/* Need disable ras on all IPs here before ip [hw/sw]fini */
- if (con->features)
+ if (AMDGPU_RAS_GET_FEATURES(con->features))
amdgpu_ras_disable_all_features(adev, 0);
amdgpu_ras_recovery_fini(adev);
return 0;
@@ -3208,10 +3424,13 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
amdgpu_ras_fs_fini(adev);
amdgpu_ras_interrupt_remove_all(adev);
- WARN(con->features, "Feature mask is not cleared");
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_aca_fini(adev);
- if (con->features)
- amdgpu_ras_disable_all_features(adev, 1);
+ WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
+
+ if (AMDGPU_RAS_GET_FEATURES(con->features))
+ amdgpu_ras_disable_all_features(adev, 0);
cancel_delayed_work_sync(&con->ras_counte_delay_work);
@@ -3425,22 +3644,41 @@ int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
if (con) {
ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
if (!ret)
- con->is_mca_debug_mode = enable;
+ con->is_aca_debug_mode = enable;
+ }
+
+ return ret;
+}
+
+int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ if (con) {
+ if (amdgpu_aca_is_enabled(adev))
+ ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
+ else
+ ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
+ if (!ret)
+ con->is_aca_debug_mode = enable;
}
return ret;
}
-bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev)
+bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
if (!con)
return false;
- if (mca_funcs && mca_funcs->mca_set_debug_mode)
- return con->is_mca_debug_mode;
+ if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
+ (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
+ return con->is_aca_debug_mode;
else
return true;
}
@@ -3450,15 +3688,16 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+ const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
if (!con) {
*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
return false;
}
- if (mca_funcs && mca_funcs->mca_set_debug_mode)
+ if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
*error_query_mode =
- (con->is_mca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
+ (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
else
*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
@@ -3699,8 +3938,7 @@ static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct
}
static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr)
+ struct amdgpu_smuio_mcm_config_info *mcm_info)
{
struct ras_err_node *err_node;
@@ -3712,10 +3950,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
if (!err_node)
return NULL;
- memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
+ INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
- if (err_addr)
- memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr));
+ memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
err_data->err_list_count++;
list_add_tail(&err_node->node, &err_data->err_node_list);
@@ -3724,6 +3961,29 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
return &err_node->err_info;
}
+void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
+{
+ struct ras_err_addr *mca_err_addr;
+
+ mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL);
+ if (!mca_err_addr)
+ return;
+
+ INIT_LIST_HEAD(&mca_err_addr->node);
+
+ mca_err_addr->err_status = err_addr->err_status;
+ mca_err_addr->err_ipid = err_addr->err_ipid;
+ mca_err_addr->err_addr = err_addr->err_addr;
+
+ list_add_tail(&mca_err_addr->node, &err_info->err_addr_list);
+}
+
+void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
+{
+ list_del(&mca_err_addr->node);
+ kfree(mca_err_addr);
+}
+
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
struct amdgpu_smuio_mcm_config_info *mcm_info,
struct ras_err_addr *err_addr, u64 count)
@@ -3736,10 +3996,13 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
if (!count)
return 0;
- err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
if (!err_info)
return -EINVAL;
+ if (err_addr && err_addr->err_status)
+ amdgpu_ras_add_mca_err_addr(err_info, err_addr);
+
err_info->ue_count += count;
err_data->ue_count += count;
@@ -3758,7 +4021,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
if (!count)
return 0;
- err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
if (!err_info)
return -EINVAL;
@@ -3767,3 +4030,135 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
return 0;
}
+
+int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count)
+{
+ struct ras_err_info *err_info;
+
+ if (!err_data || !mcm_info)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+ if (!err_info)
+ return -EINVAL;
+
+ if (err_addr && err_addr->err_status)
+ amdgpu_ras_add_mca_err_addr(err_info, err_addr);
+
+ err_info->de_count += count;
+ err_data->de_count += count;
+
+ return 0;
+}
+
+#define mmMP0_SMN_C2PMSG_92 0x1609C
+#define mmMP0_SMN_C2PMSG_126 0x160BE
+static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
+ u32 instance, u32 boot_error)
+{
+ u32 socket_id, aid_id, hbm_id;
+ u32 reg_data;
+ u64 reg_addr;
+
+ socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
+ aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
+ hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error);
+
+ /* The pattern for smn addressing in other SOC could be different from
+ * the one for aqua_vanjaram. We should revisit the code if the pattern
+ * is changed. In such case, replace the aqua_vanjaram implementation
+ * with more common helper */
+ reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
+ aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+ reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+ dev_err(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
+ socket_id, aid_id, reg_data);
+
+ if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
+ socket_id, aid_id, hbm_id);
+
+ if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
+ socket_id, aid_id, hbm_id);
+
+ if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
+ socket_id, aid_id, hbm_id);
+}
+
+static int amdgpu_ras_wait_for_boot_complete(struct amdgpu_device *adev,
+ u32 instance, u32 *boot_error)
+{
+ u32 reg_addr;
+ u32 reg_data;
+ int retry_loop;
+
+ reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
+ aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+ for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
+ reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+ if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) {
+ *boot_error = AMDGPU_RAS_BOOT_SUCEESS;
+ return 0;
+ }
+ msleep(1);
+ }
+
+ /* The pattern for smn addressing in other SOC could be different from
+ * the one for aqua_vanjaram. We should revisit the code if the pattern
+ * is changed. In such case, replace the aqua_vanjaram implementation
+ * with more common helper */
+ reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
+ aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+ for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
+ reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+ if (AMDGPU_RAS_GPU_ERR_BOOT_STATUS(reg_data)) {
+ *boot_error = reg_data;
+ return 0;
+ }
+ msleep(1);
+ }
+
+ *boot_error = reg_data;
+ return -ETIME;
+}
+
+void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
+{
+ u32 boot_error = 0;
+ u32 i;
+
+ for (i = 0; i < num_instances; i++) {
+ if (amdgpu_ras_wait_for_boot_complete(adev, i, &boot_error))
+ amdgpu_ras_boot_time_error_reporting(adev, i, boot_error);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 76fb85628716..d10e5bb0e52f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -29,9 +29,28 @@
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
#include "amdgpu_smuio.h"
+#include "amdgpu_aca.h"
struct amdgpu_iv_entry;
+#define AMDGPU_RAS_GPU_ERR_MEM_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 0, 0)
+#define AMDGPU_RAS_GPU_ERR_FW_LOAD(x) AMDGPU_GET_REG_FIELD(x, 1, 1)
+#define AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 2, 2)
+#define AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 3, 3)
+#define AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 4, 4)
+#define AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 5, 5)
+#define AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(x) AMDGPU_GET_REG_FIELD(x, 6, 6)
+#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7)
+#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
+#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
+#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 13, 13)
+#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31)
+
+#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000
+#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
+#define AMDGPU_RAS_BOOT_STATUS_MASK 0xFF
+#define AMDGPU_RAS_BOOT_SUCEESS 0x80000000
+
#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
/* position of instance value in sub_block_index of
* ta_ras_trigger_error_input, the sub block uses lower 12 bits
@@ -39,6 +58,12 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_INST_MASK 0xfffff000
#define AMDGPU_RAS_INST_SHIFT 0xc
+#define AMDGPU_RAS_FEATURES_SOCKETID_SHIFT 29
+#define AMDGPU_RAS_FEATURES_SOCKETID_MASK 0xe0000000
+
+/* The high three bits indicates socketid */
+#define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK)
+
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
AMDGPU_RAS_BLOCK__SDMA,
@@ -57,6 +82,8 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__MCA,
AMDGPU_RAS_BLOCK__VCN,
AMDGPU_RAS_BLOCK__JPEG,
+ AMDGPU_RAS_BLOCK__IH,
+ AMDGPU_RAS_BLOCK__MPIO,
AMDGPU_RAS_BLOCK__LAST
};
@@ -441,10 +468,15 @@ struct amdgpu_ras {
/* Indicates smu whether need update bad channel info */
bool update_channel_flag;
/* Record status of smu mca debug mode */
- bool is_mca_debug_mode;
+ bool is_aca_debug_mode;
/* Record special requirements of gpu reset caller */
uint32_t gpu_reset_flags;
+
+ struct task_struct *page_retirement_thread;
+ wait_queue_head_t page_retirement_wq;
+ struct mutex page_retirement_lock;
+ atomic_t page_retirement_req_cnt;
};
struct ras_fs_data {
@@ -453,6 +485,7 @@ struct ras_fs_data {
};
struct ras_err_addr {
+ struct list_head node;
uint64_t err_status;
uint64_t err_ipid;
uint64_t err_addr;
@@ -462,7 +495,8 @@ struct ras_err_info {
struct amdgpu_smuio_mcm_config_info mcm_info;
u64 ce_count;
u64 ue_count;
- struct ras_err_addr err_addr;
+ u64 de_count;
+ struct list_head err_addr_list;
};
struct ras_err_node {
@@ -473,6 +507,7 @@ struct ras_err_node {
struct ras_err_data {
unsigned long ue_count;
unsigned long ce_count;
+ unsigned long de_count;
unsigned long err_addr_cnt;
struct eeprom_table_record *err_addr;
u32 err_list_count;
@@ -529,6 +564,8 @@ struct ras_manager {
struct ras_ih_data ih_data;
struct ras_err_data err_data;
+
+ struct aca_handle aca_handle;
};
struct ras_badpage {
@@ -548,6 +585,7 @@ struct ras_query_if {
struct ras_common_if head;
unsigned long ue_count;
unsigned long ce_count;
+ unsigned long de_count;
};
struct ras_inject_if {
@@ -781,7 +819,8 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev);
int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con);
int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
-bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev);
+int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable);
+bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev);
bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
unsigned int *mode);
@@ -818,5 +857,20 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
struct amdgpu_smuio_mcm_config_info *mcm_info,
struct ras_err_addr *err_addr, u64 count);
+int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count);
+void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances);
+int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ const struct aca_info *aca_info, void *data);
+int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk);
+
+ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
+ struct aca_handle *handle, char *buf, void *data);
+
+void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info,
+ struct ras_err_addr *err_addr);
+void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info,
+ struct ras_err_addr *mca_err_addr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2fde93b00cab..b12808c0c331 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -735,6 +735,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
control->tbl_rai.health_percent = 0;
}
+
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
}
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 2c3675d91614..db5791e1a7ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -241,7 +241,7 @@ void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
table_size = le32_to_cpu(hdr->jt_size);
}
- for (i = 0; i < table_size; i ++) {
+ for (i = 0; i < table_size; i++) {
dst_ptr[bo_offset + i] =
cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index b591d33af264..5a17e0ff2ab8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -169,7 +169,7 @@ struct amdgpu_rlc_funcs {
void (*stop)(struct amdgpu_device *adev);
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
- void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
+ void (*update_spm_vmid)(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid);
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index 7a6a67275404..e22cb2b5cd92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -36,13 +36,24 @@
*/
/**
+ * amdgpu_seq64_get_va_base - Get the seq64 va base address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * va base address on success
+ */
+static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
+{
+ return AMDGPU_VA_RESERVED_SEQ64_START(adev);
+}
+
+/**
* amdgpu_seq64_map - Map the seq64 memory to VM
*
* @adev: amdgpu_device pointer
* @vm: vm pointer
* @bo_va: bo_va pointer
- * @seq64_addr: seq64 vaddr start address
- * @size: seq64 pool size
*
* Map the seq64 memory to the given VM.
*
@@ -50,11 +61,11 @@
* 0 on success or a negative error code on failure
*/
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va, u64 seq64_addr,
- uint32_t size)
+ struct amdgpu_bo_va **bo_va)
{
struct amdgpu_bo *bo;
struct drm_exec exec;
+ u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -77,9 +88,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error;
}
- r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
+ seq64_addr = amdgpu_seq64_get_va_base(adev);
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_PTE_READABLE);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
@@ -144,31 +155,25 @@ error:
* amdgpu_seq64_alloc - Allocate a 64 bit memory
*
* @adev: amdgpu_device pointer
- * @gpu_addr: allocated gpu VA start address
- * @cpu_addr: allocated cpu VA start address
+ * @va: VA to access the seq in process address space
+ * @cpu_addr: CPU address to access the seq
*
* Alloc a 64 bit memory from seq64 pool.
*
* Returns:
* 0 on success or a negative error code on failure
*/
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr,
- u64 **cpu_addr)
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
{
unsigned long bit_pos;
- u32 offset;
bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem);
+ if (bit_pos >= adev->seq64.num_sem)
+ return -ENOSPC;
- if (bit_pos < adev->seq64.num_sem) {
- __set_bit(bit_pos, adev->seq64.used);
- offset = bit_pos << 6; /* convert to qw offset */
- } else {
- return -EINVAL;
- }
-
- *gpu_addr = offset + AMDGPU_SEQ64_VADDR_START;
- *cpu_addr = offset + adev->seq64.cpu_base_addr;
+ __set_bit(bit_pos, adev->seq64.used);
+ *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+ *cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
return 0;
}
@@ -177,20 +182,17 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr,
* amdgpu_seq64_free - Free the given 64 bit memory
*
* @adev: amdgpu_device pointer
- * @gpu_addr: gpu start address to be freed
+ * @va: gpu start address to be freed
*
* Free the given 64 bit memory from seq64 pool.
- *
*/
-void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr)
+void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va)
{
- u32 offset;
-
- offset = gpu_addr - AMDGPU_SEQ64_VADDR_START;
+ unsigned long bit_pos;
- offset >>= 6;
- if (offset < adev->seq64.num_sem)
- __clear_bit(offset, adev->seq64.used);
+ bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64);
+ if (bit_pos < adev->seq64.num_sem)
+ __clear_bit(bit_pos, adev->seq64.used);
}
/**
@@ -229,7 +231,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
* AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS
* 64bit slots
*/
- r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE,
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->seq64.sbo, NULL,
(void **)&adev->seq64.cpu_base_addr);
@@ -238,7 +240,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
return r;
}
- memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE);
+ memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE);
adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS;
memset(&adev->seq64.used, 0, sizeof(adev->seq64.used));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
index 2196e72be508..4203b2ab318d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -25,10 +25,9 @@
#ifndef __AMDGPU_SEQ64_H__
#define __AMDGPU_SEQ64_H__
-#define AMDGPU_SEQ64_SIZE (2ULL << 20)
-#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_SEQ64_SIZE / (sizeof(u64) * 8))
-#define AMDGPU_SEQ64_VADDR_OFFSET 0x50000
-#define AMDGPU_SEQ64_VADDR_START (AMDGPU_VA_RESERVED_SIZE + AMDGPU_SEQ64_VADDR_OFFSET)
+#include "amdgpu_vm.h"
+
+#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_VA_RESERVED_SEQ64_SIZE / sizeof(u64))
struct amdgpu_seq64 {
struct amdgpu_bo *sbo;
@@ -42,7 +41,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev);
int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va, u64 seq64_addr, uint32_t size);
+ struct amdgpu_bo_va **bo_va);
void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 75c9fd2c6c2a..8722beba494e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -102,23 +102,19 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
/* Don't handle scatter gather BOs */
if (bo->type == ttm_bo_type_sg) {
placement->num_placement = 0;
- placement->num_busy_placement = 0;
return;
}
/* Object isn't an AMDGPU object so ignore */
if (!amdgpu_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
- placement->busy_placement = &placements;
placement->num_placement = 1;
- placement->num_busy_placement = 1;
return;
}
abo = ttm_to_amdgpu_bo(bo);
if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
placement->num_placement = 0;
- placement->num_busy_placement = 0;
return;
}
@@ -128,13 +124,13 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_OA:
case AMDGPU_PL_DOORBELL:
placement->num_placement = 0;
- placement->num_busy_placement = 0;
return;
case TTM_PL_VRAM:
if (!adev->mman.buffer_funcs_enabled) {
/* Move to system memory */
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
@@ -149,8 +145,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
AMDGPU_GEM_DOMAIN_CPU);
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
abo->placements[0].lpfn = 0;
- abo->placement.busy_placement = &abo->placements[1];
- abo->placement.num_busy_placement = 1;
+ abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
} else {
/* Move to GTT memory */
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
@@ -966,8 +961,6 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
/* allocate GART space */
placement.num_placement = 1;
placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
placements.mem_type = TTM_PL_TT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3e12763e477a..0867fd9e15ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -556,6 +556,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
default:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
+ else if (load_type == 3)
+ return AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO;
else
return AMDGPU_FW_LOAD_PSP;
}
@@ -678,6 +680,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
return "UMSCH_MM_DATA";
case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
return "UMSCH_MM_CMD_BUFFER";
+ case AMDGPU_UCODE_ID_JPEG_RAM:
+ return "JPEG";
default:
return "UNKNOWN UCODE";
}
@@ -1060,7 +1064,8 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
{
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
+ if ((adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) &&
+ (adev->firmware.load_type != AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)) {
amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
(amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4244a13f9f22..619445760037 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -511,6 +511,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_UMSCH_MM_DATA,
AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER,
AMDGPU_UCODE_ID_P2S_TABLE,
+ AMDGPU_UCODE_ID_JPEG_RAM,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index d65e21914d8c..20436f81856a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "umc_v6_7.h"
+#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data, uint64_t err_addr,
@@ -85,18 +86,21 @@ out_fini_err_data:
return ret;
}
-static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
- void *ras_error_status,
- struct amdgpu_iv_entry *entry,
- bool reset)
+static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+ void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ unsigned int error_query_mode;
int ret = 0;
+ unsigned long err_count;
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
+
+ mutex_lock(&con->page_retirement_lock);
ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
- if (ret == -EOPNOTSUPP) {
+ if (ret == -EOPNOTSUPP &&
+ error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
@@ -120,7 +124,8 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
*/
adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
}
- } else if (!ret) {
+ } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
+ (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
if (adev->umc.ras &&
adev->umc.ras->ecc_info_query_ras_error_count)
adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
@@ -147,16 +152,13 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
}
/* only uncorrectable error needs gpu reset */
- if (err_data->ue_count) {
- dev_info(adev->dev, "%ld uncorrectable hardware errors "
- "detected in UMC block\n",
- err_data->ue_count);
-
+ if (err_data->ue_count || err_data->de_count) {
+ err_count = err_data->ue_count + err_data->de_count;
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev, &(err_data->ue_count));
+ amdgpu_ras_save_bad_pages(adev, &err_count);
amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
@@ -165,20 +167,87 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
con->update_channel_flag = false;
}
}
-
- if (reset) {
- /* use mode-2 reset for poison consumption */
- if (!entry)
- con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
- amdgpu_ras_reset_gpu(adev);
- }
}
kfree(err_data->err_addr);
+
+ mutex_unlock(&con->page_retirement_lock);
+}
+
+static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry,
+ bool reset)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_umc_handle_bad_pages(adev, ras_error_status);
+
+ if (err_data->ue_count && reset) {
+ /* use mode-2 reset for poison consumption */
+ if (!entry)
+ con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ amdgpu_ras_reset_gpu(adev);
+ }
+
return AMDGPU_RAS_SUCCESS;
}
-int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
+int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
+ bool reset, uint32_t timeout_ms)
+{
+ struct ras_err_data err_data;
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+ uint32_t timeout = timeout_ms;
+
+ memset(&err_data, 0, sizeof(err_data));
+ amdgpu_ras_error_data_init(&err_data);
+
+ do {
+
+ amdgpu_umc_handle_bad_pages(adev, &err_data);
+
+ if (timeout && !err_data.de_count) {
+ msleep(1);
+ timeout--;
+ }
+
+ } while (timeout && !err_data.de_count);
+
+ if (!timeout)
+ dev_warn(adev->dev, "Can't find bad pages\n");
+
+ if (err_data.de_count)
+ dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count);
+
+ if (obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ obj->err_data.de_count += err_data.de_count;
+ }
+
+ amdgpu_ras_error_data_fini(&err_data);
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ if (reset) {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ /* use mode-2 reset for poison consumption */
+ con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ amdgpu_ras_reset_gpu(adev);
+ }
+
+ return 0;
+}
+
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, bool reset)
{
int ret = AMDGPU_RAS_SUCCESS;
@@ -195,27 +264,41 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
}
if (!amdgpu_sriov_vf(adev)) {
- struct ras_err_data err_data;
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
+ struct ras_err_data err_data;
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+ ret = amdgpu_ras_error_data_init(&err_data);
+ if (ret)
+ return ret;
- ret = amdgpu_ras_error_data_init(&err_data);
- if (ret)
- return ret;
+ ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
- ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ obj->err_data.de_count += err_data.de_count;
+ }
- if (ret == AMDGPU_RAS_SUCCESS && obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
- }
+ amdgpu_ras_error_data_fini(&err_data);
+ } else {
+ if (reset) {
+ amdgpu_umc_bad_page_polling_timeout(adev,
+ reset, MAX_UMC_POISON_POLLING_TIME_SYNC);
+ } else {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- amdgpu_ras_error_data_fini(&err_data);
+ atomic_inc(&con->page_retirement_req_cnt);
+
+ wake_up(&con->page_retirement_wq);
+ }
+ }
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
- adev->virt.ops->ras_poison_handler(adev);
+ adev->virt.ops->ras_poison_handler(adev, block);
else
dev_warn(adev->dev,
"No ras_poison_handler interface in SRIOV!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 417a6726c71b..26d2ae498daf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -21,7 +21,7 @@
#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__
#include "amdgpu_ras.h"
-
+#include "amdgpu_mca.h"
/*
* (addr / 256) * 4096, the higher 26 bits in ErrorAddr
* is the index of 4KB block
@@ -64,6 +64,8 @@ struct amdgpu_umc_ras {
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
+ bool (*check_ecc_err_status)(struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, void *ras_error_status);
/* support different eeprom table version for different asic */
void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr);
};
@@ -100,7 +102,8 @@ struct amdgpu_umc {
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
-int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset);
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, bool reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
@@ -118,4 +121,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data);
+
+int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
+ bool reset, uint32_t timeout_ms);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
index 107f9bb0e24f..5b27fc41ffbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
@@ -69,12 +69,12 @@ struct amdgpu_debugfs_gprwave_data {
};
enum AMDGPU_DEBUGFS_REGS2_CMDS {
- AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0,
+ AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE = 0,
AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2,
};
enum AMDGPU_DEBUGFS_GPRWAVE_CMDS {
- AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE=0,
+ AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE = 0,
};
//reg2 interface
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index bfbf59326ee1..ab820cf52668 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -358,7 +358,7 @@ static int setup_umsch_mm_test(struct amdgpu_device *adev,
memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data));
- test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
+ test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va,
test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data));
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f4963330c772..eb2a88991206 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -59,6 +59,7 @@
#define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin"
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
+#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -82,6 +83,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
+MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -1189,7 +1191,7 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
- adev->virt.ops->ras_poison_handler(adev);
+ adev->virt.ops->ras_poison_handler(adev, ras_if->block);
else
dev_warn(adev->dev,
"No ras_poison_handler interface in SRIOV for VCN!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 514c98ea144f..1985f71b4373 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -160,6 +160,48 @@
} \
} while (0)
+#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
+ ({ \
+ uint32_t internal_reg_offset, addr; \
+ bool video_range, aon_range; \
+ \
+ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
+ addr <<= 2; \
+ video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \
+ aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \
+ if (video_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \
+ (VCN_VID_IP_ADDRESS)); \
+ else if (aon_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \
+ (VCN_AON_IP_ADDRESS)); \
+ else \
+ internal_reg_offset = (0xFFFFF & addr); \
+ \
+ internal_reg_offset >>= 2; \
+ })
+
+#define WREG32_SOC24_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \
+ regUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15( \
+ VCN, GET_INST(VCN, inst_idx), \
+ regUVD_DPG_LMA_CTL, \
+ (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ } else { \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ offset; \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ value; \
+ } \
+ } while (0)
+
#define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2)
#define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4)
#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 0dcff2889e25..6ff7d3fb2008 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -71,59 +71,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
amdgpu_num_kcq = 2;
}
-void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
- uint32_t reg0, uint32_t reg1,
- uint32_t ref, uint32_t mask,
- uint32_t xcc_inst)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
- struct amdgpu_ring *ring = &kiq->ring;
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
-
- if (adev->mes.ring.sched.ready) {
- amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
- ref, mask);
- return;
- }
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
- ref, mask);
- r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
- if (r)
- goto failed_undo;
-
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for IRQ context */
- if (r < 1 && in_interrupt())
- goto failed_kiq;
-
- might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq;
-
- return;
-
-failed_undo:
- amdgpu_ring_undo(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-failed_kiq:
- dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
-}
-
/**
* amdgpu_virt_request_full_gpu() - request full gpu access
* @adev: amdgpu device.
@@ -303,11 +250,11 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
if (!*data)
goto data_failure;
- bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
+ bps = kmalloc_array(align_space, sizeof(*(*data)->bps), GFP_KERNEL);
if (!bps)
goto bps_failure;
- bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
+ bps_bo = kmalloc_array(align_space, sizeof(*(*data)->bps_bo), GFP_KERNEL);
if (!bps_bo)
goto bps_bo_failure;
@@ -340,8 +287,10 @@ static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
for (i = data->last_reserved - 1; i >= 0; i--) {
bo = data->bps_bo[i];
- amdgpu_bo_free_kernel(&bo, NULL, NULL);
- data->bps_bo[i] = bo;
+ if (bo) {
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
+ data->bps_bo[i] = bo;
+ }
data->last_reserved = i;
}
}
@@ -381,6 +330,8 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
{
struct amdgpu_virt *virt = &adev->virt;
struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
struct amdgpu_bo *bo = NULL;
uint64_t bp;
int i;
@@ -396,12 +347,18 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
* 2) a ras bad page has been reserved (duplicate error injection
* for one page);
*/
- if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
- AMDGPU_GPU_PAGE_SIZE,
- &bo, NULL))
- DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
-
- data->bps_bo[i] = bo;
+ if (ttm_resource_manager_used(man)) {
+ amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
+ bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE);
+ data->bps_bo[i] = NULL;
+ } else {
+ if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE,
+ &bo, NULL))
+ DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ data->bps_bo[i] = bo;
+ }
data->last_reserved = i + 1;
bo = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index d4207e44141f..fa7be5f277b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -88,7 +88,8 @@ struct amdgpu_virt_ops {
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
u32 data1, u32 data2, u32 data3);
- void (*ras_poison_handler)(struct amdgpu_device *adev);
+ void (*ras_poison_handler)(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
};
/*
@@ -332,10 +333,6 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
-void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
- uint32_t reg0, uint32_t rreg1,
- uint32_t ref, uint32_t mask,
- uint32_t xcc_inst);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 453a4b786cfc..8baa2e0935cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -660,8 +660,7 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.set_powergating_state = amdgpu_vkms_set_powergating_state,
};
-const struct amdgpu_ip_block_version amdgpu_vkms_ip_block =
-{
+const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b8fcb6c55698..ed4a8c5d26d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -234,6 +234,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
}
/**
+ * amdgpu_vm_bo_evicted_user - vm_bo is evicted
+ *
+ * @vm_bo: vm_bo which is evicted
+ *
+ * State for BOs used by user mode queues which are not at the location they
+ * should be.
+ */
+static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
+{
+ vm_bo->moved = true;
+ spin_lock(&vm_bo->vm->status_lock);
+ list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
+ spin_unlock(&vm_bo->vm->status_lock);
+}
+
+/**
* amdgpu_vm_bo_relocated - vm_bo is reloacted
*
* @vm_bo: vm_bo which is relocated
@@ -427,21 +443,25 @@ uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_validate_pt_bos - validate the page table BOs
+ * amdgpu_vm_validate - validate evicted BOs tracked in the VM
*
* @adev: amdgpu device pointer
* @vm: vm providing the BOs
+ * @ticket: optional reservation ticket used to reserve the VM
* @validate: callback to do the validation
* @param: parameter for the validation callback
*
- * Validate the page table BOs on command submission if neccessary.
+ * Validate the page table BOs and per-VM BOs on command submission if
+ * necessary. If a ticket is given, also try to validate evicted user queue
+ * BOs. They must already be reserved with the given ticket.
*
* Returns:
* Validation result.
*/
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int (*validate)(void *p, struct amdgpu_bo *bo),
- void *param)
+int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
struct amdgpu_vm_bo_base *bo_base;
struct amdgpu_bo *shadow;
@@ -484,6 +504,28 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
spin_lock(&vm->status_lock);
}
+ while (ticket && !list_empty(&vm->evicted_user)) {
+ bo_base = list_first_entry(&vm->evicted_user,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo;
+
+ if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
+ pr_warn_ratelimited("Evicted user BO is not reserved in pid %d\n",
+ vm->task_info.pid);
+ return -EINVAL;
+ }
+
+ r = validate(param, bo);
+ if (r)
+ return r;
+
+ amdgpu_vm_bo_invalidated(bo_base);
+
+ spin_lock(&vm->status_lock);
+ }
spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
@@ -651,7 +693,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
gds_switch_needed) {
@@ -1426,11 +1468,21 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
}
r = amdgpu_vm_bo_update(adev, bo_va, clear);
- if (r)
- return r;
if (unlock)
dma_resv_unlock(resv);
+ if (r)
+ return r;
+
+ /* Remember evicted DMABuf imports in compute VMs for later
+ * validation
+ */
+ if (vm->is_compute_context &&
+ bo_va->base.bo->tbo.base.import_attach &&
+ (!bo_va->base.bo->tbo.resource ||
+ bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
+ amdgpu_vm_bo_evicted_user(&bo_va->base);
+
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
@@ -2196,6 +2248,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
INIT_LIST_HEAD(&vm->evicted);
+ INIT_LIST_HEAD(&vm->evicted_user);
INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 4740dd65b99d..42f6ddec50c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -135,8 +135,21 @@ struct amdgpu_mem_stats;
#define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START)
#define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS)
-/* Reserve 2MB at top/bottom of address space for kernel use */
-#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
+/* Reserve space at top/bottom of address space for kernel use */
+#define AMDGPU_VA_RESERVED_CSA_SIZE (2ULL << 20)
+#define AMDGPU_VA_RESERVED_CSA_START(adev) (((adev)->vm_manager.max_pfn \
+ << AMDGPU_GPU_PAGE_SHIFT) \
+ - AMDGPU_VA_RESERVED_CSA_SIZE)
+#define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20)
+#define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \
+ - AMDGPU_VA_RESERVED_SEQ64_SIZE)
+#define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12)
+#define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \
+ - AMDGPU_VA_RESERVED_TRAP_SIZE)
+#define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16)
+#define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_TRAP_SIZE + \
+ AMDGPU_VA_RESERVED_SEQ64_SIZE + \
+ AMDGPU_VA_RESERVED_CSA_SIZE)
/* See vm_update_mode */
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
@@ -288,9 +301,12 @@ struct amdgpu_vm {
/* Lock to protect vm_bo add/del/move on all lists of vm */
spinlock_t status_lock;
- /* BOs who needs a validation */
+ /* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
+ /* BOs for user mode queues that need a validation */
+ struct list_head evicted_user;
+
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
@@ -434,9 +450,10 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int (*callback)(void *p, struct amdgpu_bo *bo),
- void *param);
+int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket,
+ int (*callback)(void *p, struct amdgpu_bo *bo),
+ void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index a6c88f2fe6e5..20d51f6c9bb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1035,15 +1035,74 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return 0;
}
+static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ struct amdgpu_device *adev = handle->adev;
+ const char *error_str;
+ u64 status;
+ int ret, ext_error_code;
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
+
+ error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
+ xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
+ if (error_str)
+ dev_info(adev->dev, "%s detected\n", error_str);
+
+ if ((type == ACA_ERROR_TYPE_UE && ext_error_code == 0) ||
+ (type == ACA_ERROR_TYPE_CE && ext_error_code == 6))
+ report->count[type] = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]);
+
+ return 0;
+}
+
+static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = {
+ .aca_bank_generate_report = xgmi_v6_4_0_aca_bank_generate_report,
+};
+
+static const struct aca_info xgmi_v6_4_0_aca_info = {
+ .hwip = ACA_HWIP_TYPE_PCS_XGMI,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .bank_ops = &xgmi_v6_4_0_aca_bank_ops,
+};
+
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
+ int r;
+
if (!adev->gmc.xgmi.supported ||
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
- return amdgpu_ras_block_late_init(adev, ras_block);
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ &xgmi_v6_4_0_aca_info, NULL);
+ if (r)
+ goto late_fini;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
}
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
@@ -1099,7 +1158,7 @@ static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base)
{
- WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
+ WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
}
static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst)
@@ -1277,12 +1336,12 @@ static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count += ce_cnt;
}
-static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
+static enum aca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
{
const char *error_str;
int ext_error_code;
- ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status);
+ ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
@@ -1291,9 +1350,9 @@ static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdg
switch (ext_error_code) {
case 0:
- return AMDGPU_MCA_ERROR_TYPE_UE;
+ return ACA_ERROR_TYPE_UE;
case 6:
- return AMDGPU_MCA_ERROR_TYPE_CE;
+ return ACA_ERROR_TYPE_CE;
default:
return -EINVAL;
}
@@ -1307,22 +1366,22 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
int xgmi_inst = mcm_info->die_id;
u64 status = 0;
- status = RREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS);
- if (!MCA_REG__STATUS__VAL(status))
+ status = RREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS);
+ if (!ACA_REG__STATUS__VAL(status))
return;
switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
- case AMDGPU_MCA_ERROR_TYPE_UE:
+ case ACA_ERROR_TYPE_UE:
amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
break;
- case AMDGPU_MCA_ERROR_TYPE_CE:
+ case ACA_ERROR_TYPE_CE:
amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
break;
default:
break;
}
- WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
+ WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
}
static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data)
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c
new file mode 100644
index 000000000000..8a0773b80864
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "athub_v4_1_0.h"
+#include "athub/athub_4_1_0_offset.h"
+#include "athub/athub_4_1_0_sh_mask.h"
+#include "soc15_common.h"
+
+static uint32_t athub_v4_1_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ default:
+ data = 0;
+ break;
+ }
+ return data;
+}
+
+static void athub_v4_1_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+athub_v4_1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = athub_v4_1_0_get_cg_cntl(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
+ data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+
+ if (def != data)
+ athub_v4_1_0_set_cg_cntl(adev, data);
+}
+
+static void
+athub_v4_1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = athub_v4_1_0_get_cg_cntl(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
+ data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ athub_v4_1_0_set_cg_cntl(adev, data);
+}
+
+int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ athub_v4_1_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ athub_v4_1_0_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_ATHUB_MGCG */
+ data = athub_v4_1_0_get_cg_cntl(adev);
+ if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
+
+ /* AMD_CG_SUPPORT_ATHUB_LS */
+ if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_LS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h
new file mode 100644
index 000000000000..4d18d0998fa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ATHUB_V4_1_0_H__
+#define __ATHUB_V4_1_0_H__
+
+int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index a33e890c70d9..b888613f653f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -62,6 +62,7 @@
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
+ int ps_size, ws_size;
int ps_shift;
uint16_t start;
unsigned last_jump;
@@ -70,8 +71,8 @@ typedef struct {
} atom_exec_context;
int amdgpu_atom_debug;
-static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
static uint32_t atom_arg_mask[8] =
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@ -223,7 +224,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr)++;
/* get_unaligned_le32 avoids unaligned accesses from atombios
* tables, noticed on a DEC Alpha. */
- val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ if (idx < ctx->ps_size)
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ else
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -261,7 +265,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
val = gctx->reg_block;
break;
default:
- val = ctx->ws[idx];
+ if (idx < ctx->ws_size)
+ val = ctx->ws[idx];
+ else
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
}
break;
case ATOM_ARG_ID:
@@ -495,6 +502,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
idx = U8(*ptr);
(*ptr)++;
DEBUG("PS[0x%02X]", idx);
+ if (idx >= ctx->ps_size) {
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
+ return;
+ }
ctx->ps[idx] = cpu_to_le32(val);
break;
case ATOM_ARG_WS:
@@ -527,6 +538,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
gctx->reg_block = val;
break;
default:
+ if (idx >= ctx->ws_size) {
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
+ return;
+ }
ctx->ws[idx] = val;
}
break;
@@ -624,7 +639,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
if (r) {
ctx->abort = true;
}
@@ -1203,7 +1218,7 @@ static struct {
atom_op_div32, ATOM_ARG_WS},
};
-static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1225,12 +1240,16 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
+ ectx.ps_size = params_size;
ectx.abort = false;
ectx.last_jump = 0;
- if (ws)
+ if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
- else
+ ectx.ws_size = ws;
+ } else {
ectx.ws = NULL;
+ ectx.ws_size = 0;
+ }
debug_depth++;
while (1) {
@@ -1264,7 +1283,7 @@ free:
return ret;
}
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int r;
@@ -1280,7 +1299,7 @@ int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *par
/* reset divmul */
ctx->divmul[0] = 0;
ctx->divmul[1] = 0;
- r = amdgpu_atom_execute_table_locked(ctx, index, params);
+ r = amdgpu_atom_execute_table_locked(ctx, index, params, params_size);
mutex_unlock(&ctx->mutex);
return r;
}
@@ -1552,7 +1571,7 @@ int amdgpu_atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
- ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index c11cf18a0f18..b807f6639a4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -156,7 +156,7 @@ struct atom_context {
extern int amdgpu_atom_debug;
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios);
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
int amdgpu_atom_asic_init(struct atom_context *ctx);
void amdgpu_atom_destroy(struct atom_context *ctx);
bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 10098fdd33fc..3dfc28840a7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -77,7 +77,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border);
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
@@ -106,7 +106,7 @@ void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
args.ucEnable = ATOM_SCALER_DISABLE;
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
@@ -123,7 +123,7 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
args.ucCRTC = amdgpu_crtc->crtc_id;
args.ucEnable = lock;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
@@ -139,7 +139,7 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
args.ucCRTC = amdgpu_crtc->crtc_id;
args.ucEnable = state;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
@@ -155,7 +155,7 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
args.ucCRTC = amdgpu_crtc->crtc_id;
args.ucBlanking = state;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
@@ -171,7 +171,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
args.ucDispPipeId = amdgpu_crtc->crtc_id;
args.ucEnable = state;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
@@ -183,7 +183,7 @@ void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
args.ucEnable = ATOM_INIT;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
@@ -228,7 +228,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = amdgpu_crtc->crtc_id;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union atom_enable_ss {
@@ -293,7 +293,7 @@ static void amdgpu_atombios_crtc_program_ss(struct amdgpu_device *adev,
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union adjust_pixel_clock {
@@ -395,7 +395,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
amdgpu_atom_execute_table(adev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
case 3:
@@ -428,7 +428,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.ucExtTransmitterID = 0;
amdgpu_atom_execute_table(adev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
@@ -514,7 +514,7 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union set_dce_clock {
@@ -544,7 +544,7 @@ u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */
args.v2_1.asParam.ucDCEClkType = clk_type;
args.v2_1.asParam.ucDCEClkSrc = clk_src;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10;
break;
default:
@@ -740,7 +740,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 87c41e0e9b7c..622634c08c7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -83,7 +83,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
args.v2.ucDelay = delay / 10;
args.v2.ucHPD_ID = chan->rec.hpd;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*ack = args.v2.ucReplyStatus;
@@ -301,7 +301,7 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
args.ucLaneNum = lane_num;
args.ucStatus = 0;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return args.ucStatus;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 7672abe6c140..25feab188dfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -335,7 +335,7 @@ amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
args.ucDacStandard = ATOM_DAC1_PS2;
args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -432,7 +432,7 @@ amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
@@ -732,7 +732,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -1136,7 +1136,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
bool
@@ -1164,7 +1164,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
args.v1.ucAction = action;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
@@ -1288,7 +1288,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void
@@ -1633,7 +1633,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
/* This only needs to be called once at startup */
@@ -1706,7 +1706,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return true;
} else
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index af0335535f82..a6501114322f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -86,7 +86,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
@@ -172,5 +172,5 @@ void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device *adev, u8 slave_addr
args.ucSlaveAddr = slave_addr;
args.ucLineNumber = line_number;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
index 567a904804bc..9c85ca6358c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
@@ -21,8 +21,7 @@
*
*/
-static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
@@ -236,8 +235,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
0x00000000, // PA_SC_VPORT_ZMIN_15
0x3f800000, // PA_SC_VPORT_ZMAX_15
};
-static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_2[] = {
0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
0x00000000, // PA_SC_TILE_STEERING_OVERRIDE
0x00000000, // CP_PERFMON_CNTX_CNTL
@@ -521,15 +519,13 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
0x00000000, // CB_MRT6_EPITCH
0x00000000, // CB_MRT7_EPITCH
};
-static const unsigned int gfx9_SECT_CONTEXT_def_3[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_3[] = {
0x00000000, // PA_CL_POINT_X_RAD
0x00000000, // PA_CL_POINT_Y_RAD
0x00000000, // PA_CL_POINT_SIZE
0x00000000, // PA_CL_POINT_CULL_RAD
};
-static const unsigned int gfx9_SECT_CONTEXT_def_4[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_4[] = {
0x00000000, // DB_DEPTH_CONTROL
0x00000000, // DB_EQAA
0x00000000, // CB_COLOR_CONTROL
@@ -688,17 +684,14 @@ static const unsigned int gfx9_SECT_CONTEXT_def_4[] =
0x00000000, // VGT_GS_OUT_PRIM_TYPE
0x00000000, // IA_ENHANCE
};
-static const unsigned int gfx9_SECT_CONTEXT_def_5[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_5[] = {
0x00000000, // WD_ENHANCE
0x00000000, // VGT_PRIMITIVEID_EN
};
-static const unsigned int gfx9_SECT_CONTEXT_def_6[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_6[] = {
0x00000000, // VGT_PRIMITIVEID_RESET
};
-static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_7[] = {
0x00000000, // VGT_GS_MAX_PRIMS_PER_SUBGROUP
0x00000000, // VGT_DRAW_PAYLOAD_CNTL
0, // HOLE
@@ -766,8 +759,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
0x00000000, // VGT_STRMOUT_CONFIG
0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
};
-static const unsigned int gfx9_SECT_CONTEXT_def_8[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_8[] = {
0x00000000, // PA_SC_CENTROID_PRIORITY_0
0x00000000, // PA_SC_CENTROID_PRIORITY_1
0x00001000, // PA_SC_LINE_CNTL
@@ -924,8 +916,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_8[] =
0x00000000, // CB_COLOR7_DCC_BASE
0x00000000, // CB_COLOR7_DCC_BASE_EXT
};
-static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] =
-{
+static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] = {
{gfx9_SECT_CONTEXT_def_1, 0x0000a000, 212 },
{gfx9_SECT_CONTEXT_def_2, 0x0000a0d6, 282 },
{gfx9_SECT_CONTEXT_def_3, 0x0000a1f5, 4 },
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_si.h b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
index 66e39cdb5cb0..5fd96ddd7f0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
@@ -21,8 +21,7 @@
*
*/
-static const u32 si_SECT_CONTEXT_def_1[] =
-{
+static const u32 si_SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
@@ -236,8 +235,7 @@ static const u32 si_SECT_CONTEXT_def_1[] =
0x00000000, // PA_SC_VPORT_ZMIN_15
0x3f800000, // PA_SC_VPORT_ZMAX_15
};
-static const u32 si_SECT_CONTEXT_def_2[] =
-{
+static const u32 si_SECT_CONTEXT_def_2[] = {
0x00000000, // CP_PERFMON_CNTX_CNTL
0x00000000, // CP_RINGID
0x00000000, // CP_VMID
@@ -511,8 +509,7 @@ static const u32 si_SECT_CONTEXT_def_2[] =
0x00000000, // CB_BLEND6_CONTROL
0x00000000, // CB_BLEND7_CONTROL
};
-static const u32 si_SECT_CONTEXT_def_3[] =
-{
+static const u32 si_SECT_CONTEXT_def_3[] = {
0x00000000, // PA_CL_POINT_X_RAD
0x00000000, // PA_CL_POINT_Y_RAD
0x00000000, // PA_CL_POINT_SIZE
@@ -520,8 +517,7 @@ static const u32 si_SECT_CONTEXT_def_3[] =
0x00000000, // VGT_DMA_BASE_HI
0x00000000, // VGT_DMA_BASE
};
-static const u32 si_SECT_CONTEXT_def_4[] =
-{
+static const u32 si_SECT_CONTEXT_def_4[] = {
0x00000000, // DB_DEPTH_CONTROL
0x00000000, // DB_EQAA
0x00000000, // CB_COLOR_CONTROL
@@ -680,16 +676,13 @@ static const u32 si_SECT_CONTEXT_def_4[] =
0x00000000, // VGT_GS_OUT_PRIM_TYPE
0x00000000, // IA_ENHANCE
};
-static const u32 si_SECT_CONTEXT_def_5[] =
-{
+static const u32 si_SECT_CONTEXT_def_5[] = {
0x00000000, // VGT_PRIMITIVEID_EN
};
-static const u32 si_SECT_CONTEXT_def_6[] =
-{
+static const u32 si_SECT_CONTEXT_def_6[] = {
0x00000000, // VGT_PRIMITIVEID_RESET
};
-static const u32 si_SECT_CONTEXT_def_7[] =
-{
+static const u32 si_SECT_CONTEXT_def_7[] = {
0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
0, // HOLE
0, // HOLE
@@ -924,8 +917,7 @@ static const u32 si_SECT_CONTEXT_def_7[] =
0x00000000, // CB_COLOR7_CLEAR_WORD0
0x00000000, // CB_COLOR7_CLEAR_WORD1
};
-static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
-{
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] = {
{si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
{si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
{si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 587ee632a3b8..221af054d874 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -52,6 +52,7 @@
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
+static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
static const u32 crtc_offsets[] = {
CRTC0_REGISTER_OFFSET,
@@ -364,6 +365,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
+ dce_v10_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f22ec27365bd..69e8b0db6cf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -52,6 +52,7 @@
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
+static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
static const u32 crtc_offsets[] =
{
@@ -388,6 +389,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
+ dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 4dbe9b3259b5..60d40201fdd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -273,6 +273,21 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
+static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
+ int hpd)
+{
+ u32 tmp;
+
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hdp %d\n", hpd);
+ return;
+ }
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
/**
* dce_v6_0_hpd_init - hpd setup callback.
*
@@ -312,6 +327,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -3089,7 +3105,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, tmp;
+ uint32_t disp_int, mask;
unsigned hpd;
if (entry->src_data[0] >= adev->mode_info.num_hpd) {
@@ -3102,9 +3118,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
mask = interrupt_status_offsets[hpd].hpd;
if (disp_int & mask) {
- tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+ dce_v6_0_hpd_int_ack(adev, hpd);
schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 05bcce23385e..5a5fcc45e452 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -265,6 +265,21 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
+static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
+ int hpd)
+{
+ u32 tmp;
+
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hdp %d\n", hpd);
+ return;
+ }
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
/**
* dce_v8_0_hpd_init - hpd setup callback.
*
@@ -304,6 +319,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ dce_v8_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -3177,7 +3193,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, tmp;
+ uint32_t disp_int, mask;
unsigned hpd;
if (entry->src_data[0] >= adev->mode_info.num_hpd) {
@@ -3190,9 +3206,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
mask = interrupt_status_offsets[hpd].hpd;
if (disp_int & mask) {
- tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+ dce_v8_0_hpd_int_ack(adev, hpd);
schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index dcdecb18b230..b02d63328f1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -7947,7 +7947,7 @@ static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
-static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid)
{
amdgpu_gfx_off_ctrl(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 4f3bfdc75b37..2fb1342d5bd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -727,7 +727,7 @@ static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
return 0;
}
@@ -5027,7 +5027,7 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
{
u32 data;
@@ -5041,6 +5041,14 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
amdgpu_gfx_off_ctrl(adev, true);
+
+ if (ring
+ && amdgpu_sriov_is_pp_one_vf(adev)
+ && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+ || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
+ uint32_t reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
+ amdgpu_ring_emit_wreg(ring, reg, data);
+ }
}
static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
@@ -6104,7 +6112,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.get_rptr = gfx_v11_0_ring_get_rptr_gfx,
.get_wptr = gfx_v11_0_ring_get_wptr_gfx,
.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
- .emit_frame_size = /* totally 242 maximum if 16 IBs */
+ .emit_frame_size = /* totally 247 maximum if 16 IBs */
+ 5 + /* update_spm_vmid */
5 + /* COND_EXEC */
9 + /* SET_Q_PREEMPTION_MODE */
7 + /* PIPELINE_SYNC */
@@ -6154,6 +6163,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.get_wptr = gfx_v11_0_ring_get_wptr_compute,
.set_wptr = gfx_v11_0_ring_set_wptr_compute,
.emit_frame_size =
+ 5 + /* update_spm_vmid */
20 + /* gfx_v11_0_ring_emit_gds_switch */
7 + /* gfx_v11_0_ring_emit_hdp_flush */
5 + /* hdp invalidate */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
index 26d6286d86c9..9e7ce1e6bc06 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
@@ -69,7 +69,7 @@ static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
- adev->virt.ops->ras_poison_handler(adev);
+ adev->virt.ops->ras_poison_handler(adev, ras_if->block);
else
dev_warn(adev->dev,
"No ras_poison_handler interface in SRIOV for %s!\n", ras_if->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index c2faf6b4c2fc..86a4865b1ae5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3274,7 +3274,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
return 0;
}
@@ -3500,7 +3500,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
{
u32 data;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1943beb135c4..ea174b76ee70 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1288,7 +1288,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
return 0;
}
@@ -5579,7 +5579,7 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
}
}
-static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
{
u32 data;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3bc6943365a4..169d45268ef6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4902,7 +4902,7 @@ static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
-static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid)
{
amdgpu_gfx_off_ctrl(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index bc8416afb62c..f53b379d8971 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -970,8 +970,9 @@ static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
}
-static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs =
- { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
+static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs = {
+ SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32
+};
static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 131cddbdda0d..aace4594a603 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -38,6 +38,7 @@
#include "gfx_v9_4_3.h"
#include "amdgpu_xcp.h"
+#include "amdgpu_aca.h"
MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
@@ -48,6 +49,10 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
@@ -675,6 +680,66 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
};
+static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle,
+ struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ u64 status, misc0;
+ u32 instlo;
+ int ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ if ((type == ACA_ERROR_TYPE_UE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
+ (type == ACA_ERROR_TYPE_CE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ /* NOTE: overwrite info.die_id with xcd id for gfx */
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+ report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+ switch (instlo) {
+ case mmSMNAID_XCD0_MCA_SMU:
+ case mmSMNAID_XCD1_MCA_SMU:
+ case mmSMNXCD_XCD0_MCA_SMU:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
+ .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report,
+ .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
+};
+
+static const struct aca_info gfx_v9_4_3_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .bank_ops = &gfx_v9_4_3_aca_bank_ops,
+};
+
static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
@@ -1109,7 +1174,7 @@ static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
{
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
return 0;
}
@@ -1320,7 +1385,7 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev,
+static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned vmid)
{
u32 reg, data;
@@ -4242,9 +4307,32 @@ struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
};
+static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
+ &gfx_v9_4_3_aca_info,
+ NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
.ras_block = {
.hw_ops = &gfx_v9_4_3_ras_ops,
+ .ras_late_init = &gfx_v9_4_3_ras_late_init,
},
.enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 6c5185608854..db89d13bd80d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -262,16 +262,17 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* flush hdp cache */
adev->hdp.funcs->flush_hdp(adev, NULL);
- /* For SRIOV run time, driver shouldn't access the register through MMIO
- * Directly use kiq to do the vm invalidation instead
+ /* This is necessary for SRIOV as well as for GFXOFF to function
+ * properly under bare metal
*/
if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
- amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
- 1 << vmid, GET_INST(GC, 0));
+ amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+ 1 << vmid, GET_INST(GC, 0));
return;
}
+ /* This path is needed before KIQ/MES/GFXOFF are set up */
hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index c9c653cfc765..998daa702b44 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -223,16 +223,17 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* flush hdp cache */
adev->hdp.funcs->flush_hdp(adev, NULL);
- /* For SRIOV run time, driver shouldn't access the register through MMIO
- * Directly use kiq to do the vm invalidation instead
+ /* This is necessary for SRIOV as well as for GFXOFF to function
+ * properly under bare metal
*/
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
- amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
- 1 << vmid, GET_INST(GC, 0));
+ amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+ 1 << vmid, GET_INST(GC, 0));
return;
}
+ /* This path is needed before KIQ/MES/GFXOFF are set up */
hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
@@ -570,6 +571,7 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
break;
case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
adev->mmhub.funcs = &mmhub_v3_3_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 59d9215e5556..23b478639921 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -435,9 +435,10 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
WREG32(mmVM_PRT_CNTL, tmp);
if (enable) {
- uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
+ uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
+ AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn -
- (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
+ (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 45a2f8e031a2..3da7b6a2b00d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -563,9 +563,10 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
WREG32(mmVM_PRT_CNTL, tmp);
if (enable) {
- uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
+ uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
+ AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn -
- (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
+ (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 4422b27a3cc2..969a9e867170 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -777,9 +777,10 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
WREG32(mmVM_PRT_CNTL, tmp);
if (enable) {
- uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
+ uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
+ AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn -
- (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
+ (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e67a62db9e12..d442ae85162d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -829,23 +829,25 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
- /* This is necessary for a HW workaround under SRIOV as well
- * as GFXOFF under bare metal
- */
if (vmhub >= AMDGPU_MMHUB0(0))
inst = GET_INST(GC, 0);
else
inst = vmhub;
+
+ /* This is necessary for SRIOV as well as for GFXOFF to function
+ * properly under bare metal
+ */
if (adev->gfx.kiq[inst].ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
- amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
- 1 << vmid, inst);
+ amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+ 1 << vmid, inst);
return;
}
+ /* This path is needed before KIQ/MES/GFXOFF are set up */
spin_lock(&adev->gmc.invalidate_lock);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
new file mode 100644
index 000000000000..8d7d0813e331
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v7_0.h"
+
+#include "hdp/hdp_7_0_0_offset.h"
+#include "hdp/hdp_7_0_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ else
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl, hdp_clk_cntl1;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0,regHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch,
+ * forced on IPH & RC clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ }
+
+ /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
+ * be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
+ }
+
+ /* disable IPH & RC clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+const struct amdgpu_hdp_funcs hdp_v7_0_funcs = {
+ .flush_hdp = hdp_v7_0_flush_hdp,
+ .update_clock_gating = hdp_v7_0_update_clock_gating,
+ .get_clock_gating_state = hdp_v7_0_get_clockgating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h
new file mode 100644
index 000000000000..25b69201402d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V7_0_H__
+#define __HDP_V7_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v7_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
new file mode 100644
index 000000000000..16fe428c0722
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+
+#include "oss/osssys_7_0_0_offset.h"
+#include "oss/osssys_7_0_0_sh_mask.h"
+
+#include "soc15_common.h"
+#include "ih_v7_0.h"
+
+#define MAX_REARM_RETRY 10
+
+static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * ih_v7_0_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (IH_V7_0).
+ */
+static void ih_v7_0_init_register_offset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ /* ih ring 2 is removed
+ * ih ring and ih ring 1 are available */
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+ }
+
+ if (adev->irq.ih1.ring_size) {
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+ }
+}
+
+/**
+ * force_update_wptr_for_self_int - Force update the wptr for self interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @threshold: threshold to trigger the wptr reporting
+ * @timeout: timeout to trigger the wptr reporting
+ * @enabled: Enable/disable timeout flush mechanism
+ *
+ * threshold input range: 0 ~ 15, default 0,
+ * real_threshold = 2^threshold
+ * timeout input range: 0 ~ 20, default 8,
+ * real_timeout = (2^timeout) * 1024 / (socclk_freq)
+ *
+ * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
+ */
+static void
+force_update_wptr_for_self_int(struct amdgpu_device *adev,
+ u32 threshold, u32 timeout, bool enabled)
+{
+ u32 ih_cntl, ih_rb_cntl;
+
+ ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
+
+ ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+ SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
+ ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+ SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_USED_INT_THRESHOLD, threshold);
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
+ return;
+ } else {
+ WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+
+ WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
+}
+
+/**
+ * ih_v7_0_toggle_ring_interrupts - toggle the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
+ *
+ * Toggle the interrupt ring buffer (IH_V7_0)
+ */
+static int ih_v7_0_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (enable) {
+ ih->enabled = true;
+ } else {
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ih_v7_0_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (IH_V7_0).
+ */
+static int ih_v7_0_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
+ int i;
+ int r;
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = ih_v7_0_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t ih_v7_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
+{
+ int rb_bufsz = order_base_2(ih->ring_size / 4);
+
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ MC_SPACE, ih->use_bus_addr ? 2 : 4);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_CLEAR, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
+ * value is written to memory
+ */
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_WRITEBACK_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
+
+ return ih_rb_cntl;
+}
+
+static uint32_t ih_v7_0_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
+/**
+ * ih_v7_0_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (IH_V7_0)
+ */
+static int ih_v7_0_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = ih_v7_0_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, ih_v7_0_doorbell_rptr(ih));
+
+ return 0;
+}
+
+/**
+ * ih_v7_0_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it.
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int ih_v7_0_irq_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
+ u32 ih_chicken;
+ u32 tmp;
+ int ret;
+ int i;
+
+ /* disable irqs */
+ ret = ih_v7_0_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
+
+ adev->nbio.funcs->ih_control(adev);
+
+ if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
+ if (ih[0]->use_bus_addr) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
+ ih_chicken = REG_SET_FIELD(ih_chicken,
+ IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ ret = ih_v7_0_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* update doorbell range for ih ring 0 */
+ adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
+ ih[0]->doorbell_index);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
+ CLIENT18_IS_STORM_CLIENT, 1);
+ WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
+
+ /* GC/MMHUB UTCL2 page fault interrupts are configured as
+ * MSI storm capable interrupts by deafult. The delay is
+ * used to avoid ISR being called too frequently
+ * when page fault happens on several continuous page
+ * and thus avoid MSI storm */
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
+ tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
+ DELAY, 3);
+ WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
+
+ pci_set_master(adev->pdev);
+
+ /* enable interrupts */
+ ret = ih_v7_0_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
+ /* enable wptr force update for self int */
+ force_update_wptr_for_self_int(adev, 0, 8, true);
+
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
+ return 0;
+}
+
+/**
+ * ih_v7_0_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw.
+ */
+static void ih_v7_0_irq_disable(struct amdgpu_device *adev)
+{
+ force_update_wptr_for_self_int(adev, 0, 8, false);
+ ih_v7_0_toggle_interrupts(adev, false);
+
+ /* Wait and acknowledge irq */
+ mdelay(1);
+}
+
+/**
+ * ih_v7_0_get_wptr() - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: IH ring buffer to fetch wptr
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer. Also check for
+ * ring buffer overflow and deal with it.
+ * Returns the value of the wptr.
+ */
+static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
+
+ wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 32). Hopefully
+ * this should allow us to catch up.
+ */
+ tmp = (wptr + 32) & ih->ptr_mask;
+ dev_warn(adev->dev, "IH ring buffer overflow "
+ "(0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, tmp);
+ ih->rptr = tmp;
+
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+out:
+ return (wptr & ih->ptr_mask);
+}
+
+/**
+ * ih_v7_0_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: IH ring to match
+ *
+ */
+static void ih_v7_0_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t v = 0;
+ uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Rearm IRQ / re-write doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
+ * ih_v7_0_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: IH ring buffer to set rptr
+ */
+static void ih_v7_0_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (ih->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ *ih->rptr_cpu = ih->rptr;
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ ih_v7_0_irq_rearm(adev, ih);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
+ }
+}
+
+/**
+ * ih_v7_0_self_irq - dispatch work for ring 1
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int ih_v7_0_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ default: break;
+ }
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs ih_v7_0_self_irq_funcs = {
+ .process = ih_v7_0_self_irq,
+};
+
+static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs;
+}
+
+static int ih_v7_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ih_v7_0_set_interrupt_funcs(adev);
+ ih_v7_0_set_self_irq_funcs(adev);
+ return 0;
+}
+
+static int ih_v7_0_sw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool use_bus_addr;
+
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+
+ if (r)
+ return r;
+
+ /* use gpu virtual address for ih ring
+ * until ih_checken is programmed to allow
+ * use bus address for ih ring by psp bl */
+ use_bus_addr =
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
+ if (r)
+ return r;
+
+ adev->irq.ih.use_doorbell = true;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+
+ adev->irq.ih1.ring_size = 0;
+ adev->irq.ih2.ring_size = 0;
+
+ /* initialize ih control register offset */
+ ih_v7_0_init_register_offset(adev);
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_init(adev);
+
+ return r;
+}
+
+static int ih_v7_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini_sw(adev);
+
+ return 0;
+}
+
+static int ih_v7_0_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = ih_v7_0_irq_init(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int ih_v7_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ih_v7_0_irq_disable(adev);
+
+ return 0;
+}
+
+static int ih_v7_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return ih_v7_0_hw_fini(adev);
+}
+
+static int ih_v7_0_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return ih_v7_0_hw_init(adev);
+}
+
+static bool ih_v7_0_is_idle(void *handle)
+{
+ /* todo */
+ return true;
+}
+
+static int ih_v7_0_wait_for_idle(void *handle)
+{
+ /* todo */
+ return -ETIMEDOUT;
+}
+
+static int ih_v7_0_soft_reset(void *handle)
+{
+ /* todo */
+ return 0;
+}
+
+static void ih_v7_0_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
+ }
+
+ return;
+}
+
+static int ih_v7_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ih_v7_0_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE);
+ return 0;
+}
+
+static void ih_v7_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t ih_mem_pwr_cntl;
+
+ /* Disable ih sram power cntl before switch powergating mode */
+ ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_CTRL_EN, 0);
+ WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
+
+ /* It is recommended to set mem powergating mode to DS mode */
+ if (enable) {
+ /* mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_DS_EN, 1);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_SD_EN, 0);
+ /* cam mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
+ /* re-enable power cntl */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_CTRL_EN, 1);
+ } else {
+ /* mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_DS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_SD_EN, 0);
+ /* cam mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
+ /* re-enable power cntl*/
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_CTRL_EN, 1);
+ }
+
+ WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
+}
+
+static int ih_v7_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_PG_STATE_GATE);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
+ ih_v7_0_update_ih_mem_power_gating(adev, enable);
+
+ return 0;
+}
+
+static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
+ *flags |= AMD_CG_SUPPORT_IH_CG;
+
+ return;
+}
+
+static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
+ .name = "ih_v7_0",
+ .early_init = ih_v7_0_early_init,
+ .late_init = NULL,
+ .sw_init = ih_v7_0_sw_init,
+ .sw_fini = ih_v7_0_sw_fini,
+ .hw_init = ih_v7_0_hw_init,
+ .hw_fini = ih_v7_0_hw_fini,
+ .suspend = ih_v7_0_suspend,
+ .resume = ih_v7_0_resume,
+ .is_idle = ih_v7_0_is_idle,
+ .wait_for_idle = ih_v7_0_wait_for_idle,
+ .soft_reset = ih_v7_0_soft_reset,
+ .set_clockgating_state = ih_v7_0_set_clockgating_state,
+ .set_powergating_state = ih_v7_0_set_powergating_state,
+ .get_clockgating_state = ih_v7_0_get_clockgating_state,
+};
+
+static const struct amdgpu_ih_funcs ih_v7_0_funcs = {
+ .get_wptr = ih_v7_0_get_wptr,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
+ .set_rptr = ih_v7_0_set_rptr
+};
+
+static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.ih_funcs = &ih_v7_0_funcs;
+}
+
+const struct amdgpu_ip_block_version ih_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ih_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.h b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.h
new file mode 100644
index 000000000000..af9dcbc451fd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __IH_V7_0_IH_H__
+#define __IH_V7_0_IH_H__
+
+extern const struct amdgpu_ip_block_version ih_v7_0_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index e67a337457ed..99cd49ee8ef6 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -551,7 +551,7 @@ static int jpeg_v2_5_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
- if(state == adev->jpeg.cur_state)
+ if (state == adev->jpeg.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
@@ -559,7 +559,7 @@ static int jpeg_v2_5_set_powergating_state(void *handle,
else
ret = jpeg_v2_5_start(adev);
- if(!ret)
+ if (!ret)
adev->jpeg.cur_state = state;
return ret;
@@ -754,8 +754,7 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
-{
+const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = {
.type = AMD_IP_BLOCK_TYPE_JPEG,
.major = 2,
.minor = 5,
@@ -763,8 +762,7 @@ const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
.funcs = &jpeg_v2_5_ip_funcs,
};
-const struct amdgpu_ip_block_version jpeg_v2_6_ip_block =
-{
+const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = {
.type = AMD_IP_BLOCK_TYPE_JPEG,
.major = 2,
.minor = 6,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 82b6b62c170b..32caeb37cef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -652,7 +652,7 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
*
* Write a start command to the ring.
*/
-static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
+void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev)) {
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
@@ -672,7 +672,7 @@ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
*
* Write a end command to the ring.
*/
-static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
+void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev)) {
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
@@ -695,7 +695,7 @@ static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
*
* Write a fence and a trap command to the ring.
*/
-static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned int flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
@@ -764,7 +764,7 @@ static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
*
* Write ring commands to execute the indirect buffer.
*/
-static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
+void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
@@ -815,7 +815,7 @@ static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0x2);
}
-static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
uint32_t reg_offset = (reg << 2);
@@ -842,7 +842,7 @@ static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_
amdgpu_ring_write(ring, mask);
}
-static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
@@ -857,7 +857,7 @@ static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
jpeg_v4_0_3_dec_ring_emit_reg_wait(ring, data0, data1, mask);
}
-static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
{
uint32_t reg_offset = (reg << 2);
@@ -875,7 +875,7 @@ static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t re
amdgpu_ring_write(ring, val);
}
-static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 22483dc66351..747a3e5f6856 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -48,4 +48,19 @@
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
+void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags);
+void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned int flags);
+void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr);
+void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
+void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring);
+void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
+void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+
#endif /* __JPEG_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 78b74daf4eeb..8d1754e35605 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -34,7 +34,17 @@
#include "vcn/vcn_4_0_5_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
-#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+#define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
+#define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
+#define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
+#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
+
+#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+#define regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET 0x4026
+#define regJPEG_SYS_INT_EN_INTERNAL_OFFSET 0x4141
+#define regJPEG_CGC_CTRL_INTERNAL_OFFSET 0x4161
+#define regJPEG_CGC_GATE_INTERNAL_OFFSET 0x4160
+#define regUVD_NO_OP_INTERNAL_OFFSET 0x0029
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
@@ -155,11 +165,18 @@ static int jpeg_v4_0_5_hw_init(void *handle)
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
+ // TODO: Enable ring test with DPG support
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
+ DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully under DPG Mode");
+ return 0;
+ }
+
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
- DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
+ if (!r)
+ DRM_INFO("JPEG decode initialized successfully under SPG Mode\n");
return 0;
}
@@ -227,11 +244,11 @@ static int jpeg_v4_0_5_resume(void *handle)
return r;
}
-static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev)
+static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
{
uint32_t data = 0;
- data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
+ data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK);
@@ -241,21 +258,21 @@ static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev)
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
+ WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
- data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
+ data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
| JPEG_CGC_GATE__JPEG2_DEC_MASK
| JPEG_CGC_GATE__JMCIF_MASK
| JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
+ WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
}
-static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev)
+static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
{
uint32_t data = 0;
- data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
+ data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK;
@@ -265,47 +282,66 @@ static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev)
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
+ WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
- data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
+ data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
|JPEG_CGC_GATE__JPEG2_DEC_MASK
|JPEG_CGC_GATE__JMCIF_MASK
|JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
+ WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
}
-static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev)
+static void jpeg_engine_4_0_5_dpg_clock_gating_mode(struct amdgpu_device *adev,
+ int inst_idx, uint8_t indirect)
+{
+ uint32_t data = 0;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_CTRL_INTERNAL_OFFSET, data, indirect);
+
+ data = 0;
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_GATE_INTERNAL_OFFSET,
+ data, indirect);
+}
+
+static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst)
{
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
- WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
+ WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
- SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
+ SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
}
/* disable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
/* keep the JPEG in static PG mode */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
return 0;
}
-static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev)
+static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst)
{
/* enable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS),
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
- WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
+ WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
- SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
+ SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
}
@@ -314,61 +350,149 @@ static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev)
}
/**
- * jpeg_v4_0_5_start - start JPEG block
+ * jpeg_v4_0_5_start_dpg_mode - Jpeg start with dpg mode
*
* @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
*
- * Setup and start the JPEG block
+ * Start JPEG block with dpg mode
*/
-static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
+static void jpeg_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
- int r;
+ struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec;
+ uint32_t reg_data = 0;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_jpeg(adev, true);
+ /* enable anti hang mechanism */
+ reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
+ reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
+ reg_data |= 0x1;
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
- /* doorbell programming is done for every playback */
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ WREG32(SOC15_REG_OFFSET(JPEG, inst_idx, regUVD_IPX_DLDO_CONFIG),
+ 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
+ SOC15_WAIT_ON_RREG(JPEG, inst_idx, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
+ }
- WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
- ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
- VCN_JPEG_DB_CTRL__EN_MASK);
+ reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
+ reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
- /* disable power gating */
- r = jpeg_v4_0_5_disable_static_power_gating(adev);
- if (r)
- return r;
+ if (indirect)
+ adev->jpeg.inst[inst_idx].dpg_sram_curr_addr =
+ (uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr;
- /* JPEG disable CGC */
- jpeg_v4_0_5_disable_clock_gating(adev);
+ jpeg_engine_4_0_5_dpg_clock_gating_mode(adev, inst_idx, indirect);
/* MJPEG global tiling registers */
- WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET,
+ adev->gfx.config.gb_addr_config, indirect);
+ /* enable System Interrupt for JRBC */
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_SYS_INT_EN_INTERNAL_OFFSET,
+ JPEG_SYS_INT_EN__DJRBC_MASK, indirect);
+ /* add nop to workaround PSP size check */
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regUVD_NO_OP_INTERNAL_OFFSET, 0, indirect);
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+ if (indirect)
+ amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0);
- /* enable System Interrupt for JRBC */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
- JPEG_SYS_INT_EN__DJRBC_MASK,
- ~JPEG_SYS_INT_EN__DJRBC_MASK);
-
- WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
- WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0);
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L);
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v4_0_5_stop_dpg_mode - Jpeg stop with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ *
+ * Stop JPEG block with dpg mode
+ */
+static void jpeg_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t reg_data = 0;
+
+ reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
+ reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
+
+}
+
+/**
+ * jpeg_v4_0_5_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
+ int r, i;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, true);
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ /* doorbell programming is done for every playback */
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
+
+ WREG32_SOC15(VCN, i, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
+ jpeg_v4_0_5_start_dpg_mode(adev, i, adev->jpeg.indirect_sram);
+ continue;
+ }
+
+ /* disable power gating */
+ r = jpeg_v4_0_5_disable_static_power_gating(adev, i);
+ if (r)
+ return r;
+
+ /* JPEG disable CGC */
+ jpeg_v4_0_5_disable_clock_gating(adev, i);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, regJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+ WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR);
+ }
return 0;
}
@@ -382,19 +506,26 @@ static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
*/
static int jpeg_v4_0_5_stop(struct amdgpu_device *adev)
{
- int r;
+ int r, i;
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
- jpeg_v4_0_5_enable_clock_gating(adev);
+ jpeg_v4_0_5_stop_dpg_mode(adev, i);
+ continue;
+ }
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
- /* enable power gating */
- r = jpeg_v4_0_5_enable_static_power_gating(adev);
- if (r)
- return r;
+ jpeg_v4_0_5_enable_clock_gating(adev, i);
+
+ /* enable power gating */
+ r = jpeg_v4_0_5_enable_static_power_gating(adev, i);
+ if (r)
+ return r;
+ }
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, false);
@@ -478,13 +609,20 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ int i;
- if (enable) {
- if (!jpeg_v4_0_5_is_idle(handle))
- return -EBUSY;
- jpeg_v4_0_5_enable_clock_gating(adev);
- } else {
- jpeg_v4_0_5_disable_clock_gating(adev);
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (enable) {
+ if (!jpeg_v4_0_5_is_idle(handle))
+ return -EBUSY;
+
+ jpeg_v4_0_5_enable_clock_gating(adev, i);
+ } else {
+ jpeg_v4_0_5_disable_clock_gating(adev, i);
+ }
}
return 0;
@@ -589,8 +727,15 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
- adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs;
- DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs;
+ DRM_DEV_INFO(adev->dev, "JPEG%d decode is enabled in VM mode\n", i);
+ }
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
@@ -599,8 +744,15 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->jpeg.inst->irq.num_types = 1;
- adev->jpeg.inst->irq.funcs = &jpeg_v4_0_5_irq_funcs;
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ adev->jpeg.inst[i].irq.num_types = 1;
+ adev->jpeg.inst[i].irq.funcs = &jpeg_v4_0_5_irq_funcs;
+ }
}
const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
new file mode 100644
index 000000000000..e70200f97555
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "jpeg_v4_0_3.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+
+static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v5_0_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+
+/**
+ * jpeg_v5_0_0_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v5_0_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->jpeg.num_jpeg_inst = 1;
+ adev->jpeg.num_jpeg_rings = 1;
+
+ jpeg_v5_0_0_set_dec_ring_funcs(adev);
+ jpeg_v5_0_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v5_0_0_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int r;
+
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ ring = adev->jpeg.inst->ring_dec;
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
+
+ sprintf(ring->name, "jpeg_dec");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v5_0_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_0_hw_init - start and test JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+static int jpeg_v5_0_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
+ int r;
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+
+ WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v5_0_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
+ jpeg_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_suspend - suspend JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v5_0_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = jpeg_v5_0_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_0_resume - resume JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v5_0_0_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v5_0_0_hw_init(adev);
+
+ return r;
+}
+
+static void jpeg_v5_0_0_disable_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data = 0;
+
+ WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
+
+ data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
+ data &= ~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK
+ | JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK);
+ WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
+}
+
+static void jpeg_v5_0_0_enable_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data = 0;
+
+ data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
+
+ data |= 1 << JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT;
+ WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
+ data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK
+ |JPEG_CGC_GATE__JPEG_ENC_MASK
+ |JPEG_CGC_GATE__JMCIF_MASK
+ |JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
+}
+
+static int jpeg_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev)
+{
+ uint32_t data = 0;
+
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(JPEG, 0, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
+
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* keep the JPEG in static PG mode */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
+
+ return 0;
+}
+
+static int jpeg_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev)
+{
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
+ 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
+ SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v5_0_0_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
+ int r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, true);
+
+ /* disable power gating */
+ r = jpeg_v5_0_0_disable_static_power_gating(adev);
+ if (r)
+ return r;
+
+ /* JPEG disable CGC */
+ jpeg_v5_0_0_disable_clock_gating(adev);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC0_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC0_MASK);
+
+ WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v5_0_0_stop(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v5_0_0_enable_clock_gating(adev);
+
+ /* enable power gating */
+ r = jpeg_v5_0_0_enable_static_power_gating(adev);
+ if (r)
+ return r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, false);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v5_0_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v5_0_0_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v5_0_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return *ring->wptr_cpu_addr;
+ else
+ return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v5_0_0_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v5_0_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+}
+
+static bool jpeg_v5_0_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 1;
+
+ ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
+
+ return ret;
+}
+
+static int jpeg_v5_0_0_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+}
+
+static int jpeg_v5_0_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+ if (enable) {
+ if (!jpeg_v5_0_0_is_idle(handle))
+ return -EBUSY;
+ jpeg_v5_0_0_enable_clock_gating(adev);
+ } else {
+ jpeg_v5_0_0_disable_clock_gating(adev);
+ }
+
+ return 0;
+}
+
+static int jpeg_v5_0_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if (state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_0_stop(adev);
+ else
+ ret = jpeg_v5_0_0_start(adev);
+
+ if (!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v5_0_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("IH: JPEG TRAP\n");
+
+ switch (entry->src_id) {
+ case VCN_4_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(adev->jpeg.inst->ring_dec);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
+ .name = "jpeg_v5_0_0",
+ .early_init = jpeg_v5_0_0_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v5_0_0_sw_init,
+ .sw_fini = jpeg_v5_0_0_sw_fini,
+ .hw_init = jpeg_v5_0_0_hw_init,
+ .hw_fini = jpeg_v5_0_0_hw_fini,
+ .suspend = jpeg_v5_0_0_suspend,
+ .resume = jpeg_v5_0_0_resume,
+ .is_idle = jpeg_v5_0_0_is_idle,
+ .wait_for_idle = jpeg_v5_0_0_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
+ .set_powergating_state = jpeg_v5_0_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
+ .get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
+ .set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v5_0_0_dec_ring_emit_vm_flush */
+ 22 + 22 + /* jpeg_v5_0_0_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v5_0_0_dec_ring_emit_ib */
+ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
+ .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v4_0_3_dec_ring_nop,
+ .insert_start = jpeg_v4_0_3_dec_ring_insert_start,
+ .insert_end = jpeg_v4_0_3_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->ring_dec->funcs = &jpeg_v5_0_0_dec_ring_vm_funcs;
+ DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v5_0_0_irq_funcs = {
+ .set = jpeg_v5_0_0_set_interrupt_state,
+ .process = jpeg_v5_0_0_process_interrupt,
+};
+
+static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->irq.num_types = 1;
+ adev->jpeg.inst->irq.funcs = &jpeg_v5_0_0_irq_funcs;
+}
+
+const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &jpeg_v5_0_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h
new file mode 100644
index 000000000000..bd348336b215
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V5_0_0_H__
+#define __JPEG_V5_0_0_H__
+
+extern const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block;
+
+#endif /* __JPEG_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c
new file mode 100644
index 000000000000..396262044ea8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include "amdgpu.h"
+#include "lsdma_v7_0.h"
+#include "amdgpu_lsdma.h"
+
+#include "lsdma/lsdma_7_0_0_offset.h"
+#include "lsdma/lsdma_7_0_0_sh_mask.h"
+
+static int lsdma_v7_0_wait_pio_status(struct amdgpu_device *adev)
+{
+ return amdgpu_lsdma_wait_for(adev, SOC15_REG_OFFSET(LSDMA, 0, regLSDMA_PIO_STATUS),
+ LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK,
+ LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK);
+}
+
+static int lsdma_v7_0_copy_mem(struct amdgpu_device *adev,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint64_t size)
+{
+ int ret;
+ uint32_t tmp;
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_LO, lower_32_bits(src_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_HI, upper_32_bits(src_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 0);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
+
+ ret = lsdma_v7_0_wait_pio_status(adev);
+ if (ret)
+ dev_err(adev->dev, "LSDMA PIO failed to copy memory!\n");
+
+ return ret;
+}
+
+static int lsdma_v7_0_fill_mem(struct amdgpu_device *adev,
+ uint64_t dst_addr,
+ uint32_t data,
+ uint64_t size)
+{
+ int ret;
+ uint32_t tmp;
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONSTFILL_DATA, data);
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 1);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
+
+ ret = lsdma_v7_0_wait_pio_status(adev);
+ if (ret)
+ dev_err(adev->dev, "LSDMA PIO failed to fill memory!\n");
+
+ return ret;
+}
+
+static void lsdma_v7_0_update_memory_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL);
+ tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, 0);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp);
+
+ tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, enable);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp);
+}
+
+const struct amdgpu_lsdma_funcs lsdma_v7_0_funcs = {
+ .copy_mem = lsdma_v7_0_copy_mem,
+ .fill_mem = lsdma_v7_0_fill_mem,
+ .update_memory_power_gating = lsdma_v7_0_update_memory_power_gating
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h
new file mode 100644
index 000000000000..52b4485cdd98
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __LSDMA_V7_0_H__
+#define __LSDMA_V7_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_lsdma_funcs lsdma_v7_0_funcs;
+
+#endif /* __LSDMA_V7_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index fb53aacdcba2..c0fc44cdd658 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -33,6 +33,7 @@
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400
static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
{
@@ -705,8 +706,94 @@ static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
.reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
};
+static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle,
+ struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ u64 status, misc0;
+ int ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ if ((type == ACA_ERROR_TYPE_UE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
+ (type == ACA_ERROR_TYPE_CE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+/* reference to smu driver if header file */
+static int mmhub_v1_8_err_codes[] = {
+ 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */
+ 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */
+ 10, /* CODE_UTCL2_ROUTER */
+ 11, /* CODE_VML2 */
+ 12, /* CODE_VML2_WALKER */
+ 13, /* CODE_MMCANE */
+};
+
+static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ mmhub_v1_8_err_codes,
+ ARRAY_SIZE(mmhub_v1_8_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = {
+ .aca_bank_generate_report = mmhub_v1_8_aca_bank_generate_report,
+ .aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid,
+};
+
+static const struct aca_info mmhub_v1_8_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &mmhub_v1_8_aca_bank_ops,
+};
+
+static int mmhub_v1_8_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__MMHUB,
+ &mmhub_v1_8_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
struct amdgpu_mmhub_ras mmhub_v1_8_ras = {
.ras_block = {
.hw_ops = &mmhub_v1_8_ras_hw_ops,
+ .ras_late_init = mmhub_v1_8_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index dc4812ecc98d..b3961968c10c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -98,6 +98,7 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 63725b2ebc03..a2bd2c3b1ef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -404,7 +404,8 @@ static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}
-static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev)
+static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
{
xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 6a68ee946f1c..77f5b55decf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -152,14 +152,14 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_nv_mailbox_set_valid(adev, false);
}
-static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
- enum idh_request req)
+static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
+ enum idh_request req, u32 data1, u32 data2, u32 data3)
{
int r, retry = 1;
enum idh_event event = -1;
send_request:
- xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
+ xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
switch (req) {
case IDH_REQ_GPU_INIT_ACCESS:
@@ -170,6 +170,10 @@ send_request:
case IDH_REQ_GPU_INIT_DATA:
event = IDH_REQ_GPU_INIT_DATA_READY;
break;
+ case IDH_RAS_POISON:
+ if (data1 != 0)
+ event = IDH_RAS_POISON_READY;
+ break;
default:
break;
}
@@ -206,6 +210,13 @@ send_request:
return 0;
}
+static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+{
+ return xgpu_nv_send_access_requests_with_param(adev,
+ req, 0, 0, 0);
+}
+
static int xgpu_nv_request_reset(struct amdgpu_device *adev)
{
int ret, i = 0;
@@ -424,9 +435,17 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
-static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
+static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
{
- xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
+ xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
+ } else {
+ amdgpu_virt_fini_data_exchange(adev);
+ xgpu_nv_send_access_requests_with_param(adev,
+ IDH_RAS_POISON, block, 0, 0);
+ amdgpu_virt_init_data_exchange(adev);
+ }
}
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index d0221ce08769..1e8fd90cab43 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -51,6 +51,7 @@ enum idh_event {
IDH_FAIL,
IDH_QUERY_ALIVE,
IDH_REQ_GPU_INIT_DATA_READY,
+ IDH_RAS_POISON_READY,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index de93614726c9..4178f4e5dad7 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -728,8 +728,7 @@ static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &navi10_ih_funcs;
}
-const struct amdgpu_ip_block_version navi10_ih_ip_block =
-{
+const struct amdgpu_ip_block_version navi10_ih_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 5,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 1f52b4b1db03..05020141c0ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -89,7 +89,9 @@ static void nbio_v7_11_vpe_doorbell_range(struct amdgpu_device *adev, int instan
bool use_doorbell, int doorbell_index,
int doorbell_size)
{
- u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE_DOORBELL_RANGE);
+ u32 reg = instance == 0 ?
+ SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE_DOORBELL_RANGE) :
+ SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE1_DOORBELL_RANGE);
u32 doorbell_range = RREG32_PCIE_PORT(reg);
if (use_doorbell) {
@@ -112,7 +114,10 @@ static void nbio_v7_11_vcn_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell,
int doorbell_index, int instance)
{
- u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE);
+ u32 reg = instance == 0 ?
+ SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE):
+ SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN1_DOORBELL_RANGE);
+
u32 doorbell_range = RREG32_PCIE_PORT(reg);
if (use_doorbell) {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 4bb5e10217bb..7566973ed8f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -296,6 +296,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_VPEC_FW1 = 100, /* VPEC FW1 To Save VPE */
GFX_FW_TYPE_VPEC_FW2 = 101, /* VPEC FW2 To Save VPE */
GFX_FW_TYPE_VPE = 102,
+ GFX_FW_TYPE_JPEG_RAM = 128, /**< JPEG Command buffer */
GFX_FW_TYPE_P2S_TABLE = 129,
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index efa37e3b7931..2395f1856962 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -506,7 +506,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
* before training, and restore it after training to avoid
* VRAM corruption.
*/
- sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+ sz = BIST_MEM_TRAINING_ENCROACHED_SIZE;
if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index df1844d0800f..0da50ea46eaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -27,6 +27,7 @@
#include "amdgpu_ucode.h"
#include "soc15_common.h"
#include "psp_v13_0.h"
+#include "amdgpu_ras.h"
#include "mp/mp_13_0_2_offset.h"
#include "mp/mp_13_0_2_sh_mask.h"
@@ -52,6 +53,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_1_toc.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_1_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -100,6 +103,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
@@ -187,11 +191,18 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ int ret;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
- psp_v13_0_wait_for_vmbx_ready(psp);
+ ret = psp_v13_0_wait_for_vmbx_ready(psp);
+ if (ret)
+ amdgpu_ras_query_boot_status(adev, 4);
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+ if (ret)
+ amdgpu_ras_query_boot_status(adev, 4);
- return psp_v13_0_wait_for_bootloader(psp);
+ return ret;
}
return 0;
@@ -553,7 +564,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
* before training, and restore it after training to avoid
* VRAM corruption.
*/
- sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+ sz = BIST_MEM_TRAINING_ENCROACHED_SIZE;
if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
dev_err(adev->dev, "visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
@@ -763,81 +774,28 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
return 0;
}
-
-static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev,
- uint32_t inst,
- uint32_t boot_error)
-{
- uint32_t socket_id;
- uint32_t aid_id;
- uint32_t hbm_id;
- uint32_t reg_data;
-
- socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID);
- aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID);
- hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID);
-
- reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109);
- dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
- socket_id, aid_id, reg_data);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
- socket_id, aid_id, hbm_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD))
- dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST))
- dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
- socket_id, aid_id, hbm_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST))
- dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
- socket_id, aid_id, hbm_id);
-}
-
-static int psp_v13_0_query_boot_status(struct psp_context *psp)
+static bool psp_v13_0_get_ras_capability(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- int inst_mask = adev->aid_mask;
- uint32_t reg_data;
- uint32_t i;
- int ret = 0;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ u32 reg_data;
- if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
- return 0;
+ /* query ras cap should be done from host side */
+ if (amdgpu_sriov_vf(adev))
+ return false;
- if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10109)
- return 0;
+ if (!con)
+ return false;
- for_each_inst(i, inst_mask) {
- reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126);
- if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) {
- psp_v13_0_boot_error_reporting(adev, i, reg_data);
- ret = -EINVAL;
- break;
- }
+ if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) &&
+ (!(adev->flags & AMD_IS_APU))) {
+ reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127);
+ adev->ras_hw_enabled = (reg_data & GENMASK_ULL(23, 0));
+ con->poison_supported = ((reg_data & GENMASK_ULL(24, 24)) >> 24) ? true : false;
+ return true;
+ } else {
+ return false;
}
-
- return ret;
}
static const struct psp_funcs psp_v13_0_funcs = {
@@ -862,7 +820,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.update_spirom = psp_v13_0_update_spirom,
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
- .query_boot_status = psp_v13_0_query_boot_status,
+ .get_ras_capability = psp_v13_0_get_ras_capability,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
new file mode 100644
index 000000000000..78a95f8f370b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -0,0 +1,672 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <drm/drm_drv.h>
+#include <linux/vmalloc.h>
+#include "amdgpu.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_ucode.h"
+#include "soc15_common.h"
+#include "psp_v14_0.h"
+
+#include "mp/mp_14_0_2_offset.h"
+#include "mp/mp_14_0_2_sh_mask.h"
+
+MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+
+/* For large FW files the time to complete can be very long */
+#define USBC_PD_POLLING_LIMIT_S 240
+
+/* Read USB-PD from LFB */
+#define GFX_CMD_USB_PD_USE_LFB 0x480
+
+/* VBIOS gfl defines */
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0x0000FFFF
+#define MBOX_COMMAND_MASK 0x00FF0000
+#define MBOX_READY_FLAG 0x80000000
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
+#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+
+/* memory training timeout define */
+#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
+
+static int psp_v14_0_init_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char ucode_prefix[30];
+ int err = 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ err = psp_init_sos_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static bool psp_v14_0_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
+static int psp_v14_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ int ret;
+ int retry_loop;
+
+ for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000,
+ 0x80000000,
+ false);
+
+ if (ret == 0)
+ return 0;
+ }
+
+ return ret;
+}
+
+static int psp_v14_0_bootloader_load_component(struct psp_context *psp,
+ struct psp_bin_desc *bin_desc,
+ enum psp_bootloader_cmd bl_cmd)
+{
+ int ret;
+ uint32_t psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check tOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v14_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v14_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy PSP KDB binary to memory */
+ memcpy(psp->fw_pri_buf, bin_desc->start_addr, bin_desc->size_bytes);
+
+ /* Provide the PSP KDB to bootloader */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = bl_cmd;
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ ret = psp_v14_0_wait_for_bootloader(psp);
+
+ return ret;
+}
+
+static int psp_v14_0_bootloader_load_kdb(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE);
+}
+
+static int psp_v14_0_bootloader_load_spl(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_TOS_SPL_TABLE);
+}
+
+static int psp_v14_0_bootloader_load_sysdrv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV);
+}
+
+static int psp_v14_0_bootloader_load_soc_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->soc_drv, PSP_BL__LOAD_SOCDRV);
+}
+
+static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->intf_drv, PSP_BL__LOAD_INTFDRV);
+}
+
+static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
+}
+
+static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
+}
+
+
+static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
+{
+ int ret;
+ unsigned int psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v14_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v14_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy Secure OS binary to PSP memory */
+ memcpy(psp->fw_pri_buf, psp->sos.start_addr, psp->sos.size_bytes);
+
+ /* Provide the PSP secure OS to bootloader */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81),
+ 0, true);
+
+ return ret;
+}
+
+static int psp_v14_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ } else {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ }
+
+ return ret;
+}
+
+static int psp_v14_0_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ unsigned int psp_ring_reg = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ ret = psp_v14_0_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v14_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Wait for sOS ready for ring creation */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ if (ret) {
+ DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
+
+ return ret;
+}
+
+static int psp_v14_0_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v14_0_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
+ return ret;
+}
+
+static uint32_t psp_v14_0_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev))
+ data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67);
+
+ return data;
+}
+
+static void psp_v14_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102, value);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67, value);
+}
+
+static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg)
+{
+ int ret;
+ int i;
+ uint32_t data_32;
+ int max_wait;
+ struct amdgpu_device *adev = psp->adev;
+
+ data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, data_32);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, msg);
+
+ max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
+ for (i = 0; i < max_wait; i++) {
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret == 0)
+ break;
+ }
+ if (i < max_wait)
+ ret = 0;
+ else
+ ret = -ETIME;
+
+ dev_dbg(adev->dev, "training %s %s, cost %d @ %d ms\n",
+ (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
+ (ret == 0) ? "succeed" : "failed",
+ i, adev->usec_timeout/1000);
+ return ret;
+}
+
+
+static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+ uint32_t *pcache = (uint32_t *)ctx->sys_cache;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t p2c_header[4];
+ uint32_t sz;
+ void *buf;
+ int ret, idx;
+
+ if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
+ dev_dbg(adev->dev, "Memory training is not supported.\n");
+ return 0;
+ } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
+ dev_err(adev->dev, "Memory training initialization failure.\n");
+ return -EINVAL;
+ }
+
+ if (psp_v14_0_is_sos_alive(psp)) {
+ dev_dbg(adev->dev, "SOS is alive, skip memory training.\n");
+ return 0;
+ }
+
+ amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ dev_dbg(adev->dev, "sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
+ pcache[0], pcache[1], pcache[2], pcache[3],
+ p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ dev_dbg(adev->dev, "Short training depends on restore.\n");
+ ops |= PSP_MEM_TRAIN_RESTORE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_RESTORE) &&
+ pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ dev_dbg(adev->dev, "sys_cache[0] is invalid, restore depends on save.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ pcache[3] == p2c_header[3])) {
+ dev_dbg(adev->dev, "sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_SAVE) &&
+ p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ dev_dbg(adev->dev, "p2c_header[0] is invalid, save depends on long training.\n");
+ ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ dev_dbg(adev->dev, "Memory training ops:%x.\n", ops);
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ /*
+ * Long training will encroach a certain amount on the bottom of VRAM;
+ * save the content from the bottom of VRAM to system memory
+ * before training, and restore it after training to avoid
+ * VRAM corruption.
+ */
+ sz = BIST_MEM_TRAINING_ENCROACHED_SIZE;
+
+ if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
+ dev_err(adev->dev, "visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
+ adev->gmc.visible_vram_size,
+ adev->mman.aper_base_kaddr);
+ return -EINVAL;
+ }
+
+ buf = vmalloc(sz);
+ if (!buf) {
+ dev_err(adev->dev, "failed to allocate system memory.\n");
+ return -ENOMEM;
+ }
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
+ ret = psp_v14_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
+ if (ret) {
+ DRM_ERROR("Send long training msg failed.\n");
+ vfree(buf);
+ drm_dev_exit(idx);
+ return ret;
+ }
+
+ memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
+ vfree(buf);
+ drm_dev_exit(idx);
+ } else {
+ vfree(buf);
+ return -ENODEV;
+ }
+ }
+
+ if (ops & PSP_MEM_TRAIN_SAVE) {
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
+ }
+
+ if (ops & PSP_MEM_TRAIN_RESTORE) {
+ amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ ret = psp_v14_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
+ PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
+ if (ret) {
+ dev_err(adev->dev, "send training msg failed.\n");
+ return ret;
+ }
+ }
+ ctx->training_cnt++;
+ return 0;
+}
+
+static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t reg_status;
+ int ret, i = 0;
+
+ /*
+ * LFB address which is aligned to 1MB address and has to be
+ * right-shifted by 20 so that LFB address can be passed on a 32-bit C2P
+ * register
+ */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the address */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, (GFX_CMD_USB_PD_USE_LFB << 16));
+
+ /* FW load takes very long time */
+ do {
+ msleep(1000);
+ reg_status = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35);
+
+ if (reg_status & 0x80000000)
+ goto done;
+
+ } while (++i < USBC_PD_POLLING_LIMIT_S);
+
+ return -ETIME;
+done:
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %04x\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (!ret)
+ *fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36);
+
+ return ret;
+}
+
+static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd)
+{
+ uint32_t reg_status = 0, reg_val = 0;
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ /* clear MBX ready (MBOX_READY_MASK bit is 0) and set update command */
+ reg_val |= (cmd << 16);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115, reg_val);
+
+ /* Ring the doorbell */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_73, 1);
+
+ if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
+ ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ if (ret) {
+ dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
+ return ret;
+ }
+
+ reg_status = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115);
+ if ((reg_status & 0xFFFF) != 0) {
+ dev_err(adev->dev, "SPI cmd %x failed, fail status = %04x\n",
+ cmd, reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int psp_v14_0_update_spirom(struct psp_context *psp,
+ uint64_t fw_pri_mc_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ /* Confirm PSP is ready to start */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ if (ret) {
+ dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
+ return ret;
+ }
+
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO);
+ if (ret)
+ return ret;
+
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI);
+ if (ret)
+ return ret;
+
+ psp->vbflash_done = true;
+
+ ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int psp_v14_0_vbflash_status(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ return RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115);
+}
+
+static const struct psp_funcs psp_v14_0_funcs = {
+ .init_microcode = psp_v14_0_init_microcode,
+ .bootloader_load_kdb = psp_v14_0_bootloader_load_kdb,
+ .bootloader_load_spl = psp_v14_0_bootloader_load_spl,
+ .bootloader_load_sysdrv = psp_v14_0_bootloader_load_sysdrv,
+ .bootloader_load_soc_drv = psp_v14_0_bootloader_load_soc_drv,
+ .bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv,
+ .bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv,
+ .bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv,
+ .bootloader_load_sos = psp_v14_0_bootloader_load_sos,
+ .ring_create = psp_v14_0_ring_create,
+ .ring_stop = psp_v14_0_ring_stop,
+ .ring_destroy = psp_v14_0_ring_destroy,
+ .ring_get_wptr = psp_v14_0_ring_get_wptr,
+ .ring_set_wptr = psp_v14_0_ring_set_wptr,
+ .mem_training = psp_v14_0_memory_training,
+ .load_usbc_pd_fw = psp_v14_0_load_usbc_pd_fw,
+ .read_usbc_pd_fw = psp_v14_0_read_usbc_pd_fw,
+ .update_spirom = psp_v14_0_update_spirom,
+ .vbflash_stat = psp_v14_0_vbflash_status
+};
+
+void psp_v14_0_set_psp_funcs(struct psp_context *psp)
+{
+ psp->funcs = &psp_v14_0_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.h
new file mode 100644
index 000000000000..dd18ba2cfad5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __PSP_V14_0_H__
+#define __PSP_V14_0_H__
+
+#include "amdgpu_psp.h"
+
+#define PSP_SPIROM_UPDATE_TIMEOUT 60000 /* 60s */
+
+void psp_v14_0_set_psp_funcs(struct psp_context *psp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 8d5d86675a7f..07e19caf2bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -57,22 +57,19 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
-static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
-{
+static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = {
SDMA0_REGISTER_OFFSET,
SDMA1_REGISTER_OFFSET
};
-static const u32 golden_settings_iceland_a11[] =
-{
+static const u32 golden_settings_iceland_a11[] = {
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
};
-static const u32 iceland_mgcg_cgcg_init[] =
-{
+static const u32 iceland_mgcg_cgcg_init[] = {
mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
@@ -142,7 +139,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
case CHIP_TOPAZ:
chip_name = "topaz";
break;
- default: BUG();
+ default:
+ BUG();
}
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1258,8 +1256,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
-const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
-{
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 2,
.minor = 4,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 2d688dca26be..fec5a3d1c4bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -45,6 +45,8 @@
MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400
+
#define WREG32_SDMA(instance, offset, value) \
WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
#define RREG32_SDMA(instance, offset) \
@@ -2204,9 +2206,79 @@ static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = {
.reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count,
};
+static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle,
+ struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ u64 status, misc0;
+ int ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ if ((type == ACA_ERROR_TYPE_UE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
+ (type == ACA_ERROR_TYPE_CE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+/* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
+static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 };
+
+static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ sdma_v4_4_2_err_codes,
+ ARRAY_SIZE(sdma_v4_4_2_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = {
+ .aca_bank_generate_report = sdma_v4_4_2_aca_bank_generate_report,
+ .aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid,
+};
+
+static const struct aca_info sdma_v4_4_2_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &sdma_v4_4_2_aca_bank_ops,
+};
+
+static int sdma_v4_4_2_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_sdma_ras_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ return amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__SDMA,
+ &sdma_v4_4_2_aca_info, NULL);
+}
+
static struct amdgpu_sdma_ras sdma_v4_4_2_ras = {
.ras_block = {
.hw_ops = &sdma_v4_4_2_ras_hw_ops,
+ .ras_late_init = sdma_v4_4_2_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 3c7ddd219de8..4874ded45653 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin");
+MODULE_FIRMWARE("amdgpu/sdma_6_1_1.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 4d7188912edf..5f81c264e310 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -711,6 +711,7 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG_DPG |
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
@@ -865,6 +866,7 @@ static int soc21_common_set_clockgating_state(void *handle,
case IP_VERSION(7, 7, 0):
case IP_VERSION(7, 7, 1):
case IP_VERSION(7, 11, 0):
+ case IP_VERSION(7, 11, 1):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 879bb7af297c..056d4df8fa1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -36,6 +36,9 @@ enum ras_command {
TA_RAS_COMMAND__ENABLE_FEATURES = 0,
TA_RAS_COMMAND__DISABLE_FEATURES,
TA_RAS_COMMAND__TRIGGER_ERROR,
+ TA_RAS_COMMAND__QUERY_BLOCK_INFO,
+ TA_RAS_COMMAND__QUERY_SUB_BLOCK_INFO,
+ TA_RAS_COMMAND__QUERY_ADDRESS,
};
enum ta_ras_status {
@@ -105,6 +108,11 @@ enum ta_ras_error_type {
TA_RAS_ERROR__POISON = 8,
};
+enum ta_ras_address_type {
+ TA_RAS_MCA_TO_PA,
+ TA_RAS_PA_TO_MCA,
+};
+
/* Input/output structures for RAS commands */
/**********************************************************/
@@ -133,12 +141,38 @@ struct ta_ras_init_flags {
uint8_t channel_dis_num;
};
+struct ta_ras_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+};
+
+struct ta_ras_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct ta_ras_query_address_input {
+ enum ta_ras_address_type addr_type;
+ struct ta_ras_mca_addr ma;
+ struct ta_ras_phy_addr pa;
+};
+
struct ta_ras_output_flags {
uint8_t ras_init_success_flag;
uint8_t err_inject_switch_disable_flag;
uint8_t reg_access_failure_flag;
};
+struct ta_ras_query_address_output {
+ /* don't use the flags here */
+ struct ta_ras_output_flags flags;
+ struct ta_ras_mca_addr ma;
+ struct ta_ras_phy_addr pa;
+};
+
/* Common input structure for RAS callbacks */
/**********************************************************/
union ta_ras_cmd_input {
@@ -146,12 +180,14 @@ union ta_ras_cmd_input {
struct ta_ras_enable_features_input enable_features;
struct ta_ras_disable_features_input disable_features;
struct ta_ras_trigger_error_input trigger_error;
+ struct ta_ras_query_address_input address;
uint32_t reserve_pad[256];
};
union ta_ras_cmd_output {
struct ta_ras_output_flags flags;
+ struct ta_ras_query_address_output address;
uint32_t reserve_pad[256];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 7458a218e89d..14ef7a24be7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -89,12 +89,28 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
umc_v12_0_reset_error_count_per_channel, NULL);
}
+bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
+{
+ dev_info(adev->dev,
+ "MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n",
+ mc_umc_status,
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC)
+ );
+
+ return (amdgpu_ras_is_poison_mode_supported(adev) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1));
+}
+
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
{
- if (amdgpu_ras_is_poison_mode_supported(adev) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
- return true;
+ if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
+ return false;
return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
@@ -104,9 +120,7 @@ bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_um
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
{
- if (amdgpu_ras_is_poison_mode_supported(adev) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
+ if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
return false;
return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
@@ -119,9 +133,10 @@ bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_
!(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
}
-static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
+static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev,
uint64_t umc_reg_offset,
- unsigned long *error_count)
+ unsigned long *error_count,
+ check_error_type_func error_type_func)
{
uint64_t mc_umc_status;
uint64_t mc_umc_status_addr;
@@ -129,31 +144,11 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- /* Rely on MCUMC_STATUS for correctable error counter
- * MCUMC_STATUS is a 64 bit register
- */
+ /* Check MCUMC_STATUS */
mc_umc_status =
RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
- if (umc_v12_0_is_correctable_error(adev, mc_umc_status))
- *error_count += 1;
-}
-
-static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev,
- uint64_t umc_reg_offset,
- unsigned long *error_count)
-{
- uint64_t mc_umc_status;
- uint64_t mc_umc_status_addr;
-
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
-
- /* Check the MCUMC_STATUS. */
- mc_umc_status =
- RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
-
- if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))
+ if (error_type_func(adev, mc_umc_status))
*error_count += 1;
}
@@ -162,7 +157,7 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
uint32_t ch_inst, void *data)
{
struct ras_err_data *err_data = (struct ras_err_data *)data;
- unsigned long ue_count = 0, ce_count = 0;
+ unsigned long ue_count = 0, ce_count = 0, de_count = 0;
/* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3],
* which can be used as die ID directly */
@@ -174,11 +169,16 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
uint64_t umc_reg_offset =
get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
- umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count);
- umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count);
+ umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
+ &ce_count, umc_v12_0_is_correctable_error);
+ umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
+ &ue_count, umc_v12_0_is_uncorrectable_error);
+ umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
+ &de_count, umc_v12_0_is_deferred_error);
amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count);
return 0;
}
@@ -203,14 +203,14 @@ static bool umc_v12_0_bit_wise_xor(uint32_t val)
return result;
}
-static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
- struct ras_err_data *err_data, uint64_t err_addr,
- uint32_t ch_inst, uint32_t umc_inst,
- uint32_t node_inst)
+static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst,
+ uint32_t node_inst,
+ struct ta_ras_query_address_output *addr_out)
{
uint32_t channel_index, i;
- uint64_t soc_pa, na, retired_page, column;
- uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row, row_xor;
+ uint64_t na, soc_pa;
+ uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
uint32_t bank0, bank1, bank2, bank3, bank;
bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
@@ -260,12 +260,44 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
/* the umc channel bits are not original values, they are hashed */
UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
+ addr_out->pa.pa = soc_pa;
+ addr_out->pa.bank = bank;
+ addr_out->pa.channel_idx = channel_index;
+}
+
+static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst,
+ uint32_t node_inst)
+{
+ uint32_t col, row, row_xor, bank, channel_index;
+ uint64_t soc_pa, retired_page, column;
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+
+ addr_in.addr_type = TA_RAS_MCA_TO_PA;
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch_inst;
+ addr_in.ma.umc_inst = umc_inst;
+ addr_in.ma.node_inst = node_inst;
+
+ if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out))
+ /* fallback to old path if fail to get pa from psp */
+ umc_v12_0_mca_addr_to_pa(adev, err_addr, ch_inst, umc_inst,
+ node_inst, &addr_out);
+
+ soc_pa = addr_out.pa.pa;
+ bank = addr_out.pa.bank;
+ channel_index = addr_out.pa.channel_idx;
+
+ col = (err_addr >> 1) & 0x1fULL;
+ row = (err_addr >> 10) & 0x3fffULL;
+ row_xor = row ^ (0x1ULL << 13);
/* clear [C3 C2] in soc physical address */
soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
/* clear [C4] in soc physical address */
soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
- row_xor = row ^ (0x1ULL << 13);
/* loop for all possibilities of [C4 C3 C2] */
for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
@@ -316,10 +348,7 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
}
/* calculate error address if ue error is detected */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1) {
-
+ if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) {
mc_umc_addrt0 =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
@@ -385,45 +414,69 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
{
struct ras_err_node *err_node;
uint64_t mc_umc_status;
+ struct ras_err_info *err_info;
+ struct ras_err_addr *mca_err_addr, *tmp;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
for_each_ras_error(err_node, err_data) {
- mc_umc_status = err_node->err_info.err_addr.err_status;
- if (!mc_umc_status)
+ err_info = &err_node->err_info;
+ if (list_empty(&err_info->err_addr_list))
continue;
- if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) {
- uint64_t mca_addr, err_addr, mca_ipid;
- uint32_t InstanceIdLo;
- struct amdgpu_smuio_mcm_config_info *mcm_info;
-
- mcm_info = &err_node->err_info.mcm_info;
- mca_addr = err_node->err_info.err_addr.err_addr;
- mca_ipid = err_node->err_info.err_addr.err_ipid;
-
- err_addr = REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
-
- dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
- mca_ipid,
- mcm_info->die_id,
- MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
- MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
- err_addr);
-
- umc_v12_0_convert_error_address(adev,
- err_data, err_addr,
- MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
- MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
- mcm_info->die_id);
-
- /* Clear umc error address content */
- memset(&err_node->err_info.err_addr,
- 0, sizeof(err_node->err_info.err_addr));
+ list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) {
+ mc_umc_status = mca_err_addr->err_status;
+ if (mc_umc_status &&
+ (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) ||
+ umc_v12_0_is_deferred_error(adev, mc_umc_status))) {
+ uint64_t mca_addr, err_addr, mca_ipid;
+ uint32_t InstanceIdLo;
+
+ mca_addr = mca_err_addr->err_addr;
+ mca_ipid = mca_err_addr->err_ipid;
+
+ err_addr = REG_GET_FIELD(mca_addr,
+ MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
+
+ dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
+ mca_ipid,
+ err_info->mcm_info.die_id,
+ MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+ MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+ err_addr);
+
+ umc_v12_0_convert_error_address(adev,
+ err_data, err_addr,
+ MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+ MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+ err_info->mcm_info.die_id);
+ }
+
+ /* Delete error address node from list and free memory */
+ amdgpu_ras_del_mca_err_addr(err_info, mca_err_addr);
}
}
}
+static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, void *ras_error_status)
+{
+ uint64_t mc_umc_status = *(uint64_t *)ras_error_status;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status);
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ return umc_v12_0_is_correctable_error(adev, mc_umc_status);
+ case AMDGPU_MCA_ERROR_TYPE_DE:
+ return umc_v12_0_is_deferred_error(adev, mc_umc_status);
+ default:
+ return false;
+ }
+
+ return false;
+}
+
static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
{
amdgpu_umc_loop_channels(adev,
@@ -444,12 +497,71 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
.query_ras_error_address = umc_v12_0_query_ras_error_address,
};
+static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ struct amdgpu_device *adev = handle->adev;
+ u64 status;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ if (umc_v12_0_is_uncorrectable_error(adev, status)) {
+ report->count[type] = 1;
+ }
+ break;
+ case ACA_ERROR_TYPE_CE:
+ if (umc_v12_0_is_correctable_error(adev, status)) {
+ report->count[type] = 1;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
+ .aca_bank_generate_report = umc_v12_0_aca_bank_generate_report,
+};
+
+const struct aca_info umc_v12_0_aca_info = {
+ .hwip = ACA_HWIP_TYPE_UMC,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .bank_ops = &umc_v12_0_aca_bank_ops,
+};
+
+static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int ret;
+
+ ret = amdgpu_umc_ras_late_init(adev, ras_block);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC,
+ &umc_v12_0_aca_info, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
+ .ras_late_init = umc_v12_0_ras_late_init,
},
.err_cnt_init = umc_v12_0_err_cnt_init,
.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
+ .check_ecc_err_status = umc_v12_0_check_ecc_err_status,
};
+
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index e8de3a92251a..5973bfb14fce 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -121,9 +121,12 @@
(((_ipid_lo) >> 12) & 0xF))
#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
+typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status);
+
extern const uint32_t
umc_v12_0_channel_idx_tbl[]
[UMC_V12_0_UMC_INSTANCE_NUM]
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
index 0d6b50528d76..97fa88ed770c 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
@@ -25,7 +25,7 @@
static void umc_v6_0_init_registers(struct amdgpu_device *adev)
{
- unsigned i,j;
+ unsigned i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
new file mode 100644
index 000000000000..d6ee9958ba5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -0,0 +1,1339 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "soc15_hw_ip.h"
+#include "vcn_v2_0.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#include "vcn_v5_0_0.h"
+
+#include <drm/drm_drv.h>
+
+static int amdgpu_ih_clientid_vcns[] = {
+ SOC15_IH_CLIENTID_VCN,
+ SOC15_IH_CLIENTID_VCN1
+};
+
+static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
+static int vcn_v5_0_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state);
+static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
+
+/**
+ * vcn_v5_0_0_early_init - set function pointers and load microcode
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ * Load microcode from filesystem
+ */
+static int vcn_v5_0_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* re-use enc ring as unified ring */
+ adev->vcn.num_enc_rings = 1;
+
+ vcn_v5_0_0_set_unified_ring_funcs(adev);
+ vcn_v5_0_0_set_irq_funcs(adev);
+
+ return amdgpu_vcn_early_init(adev);
+}
+
+/**
+ * vcn_v5_0_0_sw_init - sw init for VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int vcn_v5_0_0_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, r;
+
+ r = amdgpu_vcn_sw_init(adev);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev);
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ atomic_set(&adev->vcn.inst[i].sched_score, 0);
+
+ /* VCN UNIFIED TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
+ VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
+ if (r)
+ return r;
+
+ /* VCN POISON TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
+ VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
+ if (r)
+ return r;
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
+
+ ring->vm_hub = AMDGPU_MMHUB0(0);
+ sprintf(ring->name, "vcn_unified_%d", i);
+
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
+ AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
+ if (r)
+ return r;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_sw_fini - sw fini for VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * VCN suspend and free up sw allocation
+ */
+static int vcn_v5_0_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, r, idx;
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ fw_shared->sq.is_enabled = 0;
+ }
+
+ drm_dev_exit(idx);
+ }
+
+ r = amdgpu_vcn_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_0_hw_init - start and test VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vcn_v5_0_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+ }
+
+done:
+ if (!r)
+ DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_0_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the VCN block, mark ring as not ready any more
+ */
+static int vcn_v5_0_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_suspend - suspend VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend VCN block
+ */
+static int vcn_v5_0_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = vcn_v5_0_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_suspend(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_0_resume - resume VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init VCN block
+ */
+static int vcn_v5_0_0_resume(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ r = vcn_v5_0_0_hw_init(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_0_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Let the VCN memory controller know it's offsets
+ */
+static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
+{
+ uint32_t offset, size;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr));
+ offset = size;
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
+
+ /* cache window 1: stack */
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+
+ /* cache window 2: context */
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+}
+
+/**
+ * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Let the VCN memory controller know it's offsets with dpg mode
+ */
+static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ uint32_t offset, size;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ }
+ offset = 0;
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ offset = size;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+ }
+
+ if (!indirect)
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ else
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+ /* cache window 1: stack */
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ }
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+ /* cache window 2: context */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+ /* non-cache window */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+
+ return;
+}
+
+/**
+ * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Disable static power gating for VCN block
+ */
+static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+{
+ uint32_t data = 0;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
+ } else {
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
+
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
+
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
+
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
+ }
+
+ data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
+ data &= ~0x103;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
+ data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
+ UVD_POWER_STATUS__UVD_PG_EN_MASK;
+
+ WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
+ return;
+}
+
+/**
+ * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Enable static power gating for VCN block
+ */
+static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+{
+ uint32_t data;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ /* Before power off, this indicator has to be turned on */
+ data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
+ data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
+ data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+ WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
+ }
+ return;
+}
+
+/**
+ * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Disable clock gating for VCN block
+ */
+static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+ return;
+}
+
+#if 0
+/**
+ * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @sram_sel: sram select
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Disable clock gating for VCN block with dpg mode
+ */
+static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
+ int inst_idx, uint8_t indirect)
+{
+ return;
+}
+#endif
+
+/**
+ * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Enable clock gating for VCN block
+ */
+static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+ return;
+}
+
+/**
+ * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Start VCN block with dpg mode
+ */
+static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_ring *ring;
+ uint32_t tmp;
+
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+ WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
+
+ if (indirect)
+ adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* disable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
+
+ vcn_v5_0_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* enable LMI MC and UMC channels */
+ tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
+
+ /* enable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+ if (indirect)
+ amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_start - VCN start
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Start VCN block
+ */
+static int vcn_v5_0_0_start(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_ring *ring;
+ uint32_t tmp;
+ int i, j, k, r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v5_0_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ continue;
+ }
+
+ /* disable VCN power gating */
+ vcn_v5_0_0_disable_static_power_gating(adev, i);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_0_mc_resume(adev, i);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
+ r = 0;
+ break;
+ }
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
+ }
+ }
+
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
+
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ *
+ * Stop VCN block with dpg mode
+ */
+static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
+ uint32_t tmp;
+
+ vcn_v5_0_0_pause_dpg_mode(adev, inst_idx, &state);
+
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ return;
+}
+
+/**
+ * vcn_v5_0_0_stop - VCN stop
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop VCN block
+ */
+static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ uint32_t tmp;
+ int i, r = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_0_stop_dpg_mode(adev, i);
+ continue;
+ }
+
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ return r;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+
+ /* enable VCN power gating */
+ vcn_v5_0_0_enable_static_power_gating(adev, i);
+ }
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @new_state: pause state
+ *
+ * Pause dpg mode for VCN block
+ */
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
+ struct dpg_pause_state *new_state)
+{
+ uint32_t reg_data = 0;
+ int ret_code;
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
+ DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
+ reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ if (!ret_code) {
+ /* pause DPG */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
+
+ /* wait for ACK */
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+ }
+ } else {
+ /* unpause dpg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified read pointer
+ */
+static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
+}
+
+/**
+ * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified write pointer
+ */
+static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell)
+ return *ring->wptr_cpu_addr;
+ else
+ return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
+}
+
+/**
+ * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell) {
+ *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+}
+
+static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_ENC,
+ .align_mask = 0x3f,
+ .nop = VCN_ENC_CMD_NO_OP,
+ .get_rptr = vcn_v5_0_0_unified_ring_get_rptr,
+ .get_wptr = vcn_v5_0_0_unified_ring_get_wptr,
+ .set_wptr = vcn_v5_0_0_unified_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
+ .emit_ib = vcn_v2_0_enc_ring_emit_ib,
+ .emit_fence = vcn_v2_0_enc_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
+ .test_ib = amdgpu_vcn_unified_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = vcn_v2_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+/**
+ * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set unified ring functions
+ */
+static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs;
+ adev->vcn.inst[i].ring_enc[0].me = i;
+
+ DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);
+ }
+}
+
+/**
+ * vcn_v5_0_0_is_idle - check VCN block is idle
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Check whether VCN block is idle
+ */
+static bool vcn_v5_0_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 1;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
+ }
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_0_wait_for_idle - wait for VCN block idle
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Wait for VCN block idle
+ */
+static int vcn_v5_0_0_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
+ UVD_STATUS__IDLE);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
+ *
+ * @handle: amdgpu_device pointer
+ * @state: clock gating state
+ *
+ * Set VCN block clockgating state
+ */
+static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ if (enable) {
+ if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
+ return -EBUSY;
+ vcn_v5_0_0_enable_clock_gating(adev, i);
+ } else {
+ vcn_v5_0_0_disable_clock_gating(adev, i);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_set_powergating_state - set VCN block powergating state
+ *
+ * @handle: amdgpu_device pointer
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+static int vcn_v5_0_0_set_powergating_state(void *handle, enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if (state == adev->vcn.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = vcn_v5_0_0_stop(adev);
+ else
+ ret = vcn_v5_0_0_start(adev);
+
+ if (!ret)
+ adev->vcn.cur_state = state;
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_0_set_interrupt_state - set VCN block interrupt state
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @type: interrupt types
+ * @state: interrupt states
+ *
+ * Set VCN block interrupt state
+ */
+static int vcn_v5_0_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
+ unsigned type, enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_process_interrupt - process VCN block interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @entry: interrupt entry from clients and sources
+ *
+ * Process VCN block interrupt
+ */
+static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t ip_instance;
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_VCN:
+ ip_instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_VCN1:
+ ip_instance = 1;
+ break;
+ default:
+ DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
+ return 0;
+ }
+
+ DRM_DEBUG("IH: VCN TRAP\n");
+
+ switch (entry->src_id) {
+ case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
+ break;
+ case VCN_4_0__SRCID_UVD_POISON:
+ amdgpu_vcn_process_poison_irq(adev, source, entry);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = {
+ .set = vcn_v5_0_0_set_interrupt_state,
+ .process = vcn_v5_0_0_process_interrupt,
+};
+
+/**
+ * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set VCN block interrupt irq functions
+ */
+static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
+ }
+}
+
+static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
+ .name = "vcn_v5_0_0",
+ .early_init = vcn_v5_0_0_early_init,
+ .late_init = NULL,
+ .sw_init = vcn_v5_0_0_sw_init,
+ .sw_fini = vcn_v5_0_0_sw_fini,
+ .hw_init = vcn_v5_0_0_hw_init,
+ .hw_fini = vcn_v5_0_0_hw_fini,
+ .suspend = vcn_v5_0_0_suspend,
+ .resume = vcn_v5_0_0_resume,
+ .is_idle = vcn_v5_0_0_is_idle,
+ .wait_for_idle = vcn_v5_0_0_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
+ .set_powergating_state = vcn_v5_0_0_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCN,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vcn_v5_0_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
new file mode 100644
index 000000000000..51bbccd4360f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCN_V5_0_0_H__
+#define __VCN_V5_0_0_H__
+
+#define VCN_VID_SOC_ADDRESS 0x1FC00
+#define VCN_AON_SOC_ADDRESS 0x1F800
+#define VCN1_VID_SOC_ADDRESS 0x48300
+#define VCN1_AON_SOC_ADDRESS 0x48000
+
+#define VCN_VID_IP_ADDRESS 0x0
+#define VCN_AON_IP_ADDRESS 0x30000
+
+extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
+
+#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index d1caaf0e6a7c..2e9b64edb8d2 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -2518,7 +2518,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0x8b6eff7b, 0x00000400,
0xbfa20045, 0xbf830010,
0xb8fbf803, 0xbfa0fffa,
- 0x8b6eff7b, 0x00000900,
+ 0x8b6eff7b, 0x00160900,
0xbfa20015, 0x8b6eff7b,
0x000071ff, 0xbfa10008,
0x8b6fff7b, 0x00007080,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 71b3dc0c7363..7568ff3af978 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -81,6 +81,11 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
var SQ_WAVE_TRAPSTS_EXCP_HI_MASK = 0x7000
+#if ASIC_FAMILY >= CHIP_PLUM_BONITO
+var SQ_WAVE_TRAPSTS_WAVE_START_MASK = 0x20000
+var SQ_WAVE_TRAPSTS_WAVE_END_MASK = 0x40000
+var SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK = 0x100000
+#endif
var SQ_WAVE_MODE_EXCP_EN_SHIFT = 12
var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT = 19
@@ -92,6 +97,16 @@ var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000
var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
+#if ASIC_FAMILY < CHIP_PLUM_BONITO
+var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+#else
+var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK |\
+ SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK |\
+ SQ_WAVE_TRAPSTS_WAVE_START_MASK |\
+ SQ_WAVE_TRAPSTS_WAVE_END_MASK |\
+ SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK
+#endif
+
// bits [31:24] unused by SPI debug data
var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
var TTMP11_SAVE_REPLAY_W64H_MASK = 0x80000000
@@ -224,7 +239,7 @@ L_NOT_HALTED:
// Check non-maskable exceptions. memory_violation, illegal_instruction
// and xnack_error exceptions always cause the wave to enter the trap
// handler.
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ s_and_b32 ttmp2, s_save_trapsts, S_TRAPSTS_NON_MASKABLE_EXCP_MASK
s_cbranch_scc1 L_FETCH_2ND_TRAP
// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index cd8e459201f1..002b08fa632f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -55,6 +55,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -64,6 +65,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
/* Scalar L1 Instruction Cache (in SQC module) per bank */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -73,6 +75,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
/* Scalar L1 Data Cache (in SQC module) per bank */
.cache_size = 8,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -88,6 +91,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -95,8 +99,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
},
{
/* Scalar L1 Instruction Cache (in SQC module) per bank */
- .cache_size = 8,
+ .cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -104,8 +109,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
},
{
/* Scalar L1 Data Cache (in SQC module) per bank. */
- .cache_size = 4,
+ .cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -135,6 +141,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -144,6 +151,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -153,6 +161,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -162,6 +171,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 4096,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -174,6 +184,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -183,6 +194,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -192,6 +204,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -201,6 +214,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -213,6 +227,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -222,6 +237,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -231,6 +247,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -240,6 +257,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -252,6 +270,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -261,6 +280,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -270,6 +290,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -279,6 +300,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -291,6 +313,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -300,6 +323,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -309,6 +333,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -318,6 +343,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 8192,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -330,6 +356,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -339,6 +366,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -348,6 +376,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -357,6 +386,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 8192,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -369,6 +399,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -378,6 +409,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -387,6 +419,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -396,6 +429,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -405,6 +439,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 4096,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -417,6 +452,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -426,6 +462,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -435,6 +472,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -444,6 +482,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -453,6 +492,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -465,6 +505,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -474,6 +515,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -483,6 +525,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -492,6 +535,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -501,6 +545,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -513,6 +558,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -522,6 +568,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -531,6 +578,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -540,6 +588,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -549,6 +598,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 4096,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -558,6 +608,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* L3 Data Cache per GPU */
.cache_size = 128*1024,
.cache_level = 3,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -570,6 +621,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -579,6 +631,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -588,6 +641,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -597,6 +651,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -606,6 +661,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 3072,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -615,6 +671,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* L3 Data Cache per GPU */
.cache_size = 96*1024,
.cache_level = 3,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -627,6 +684,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -636,6 +694,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -645,6 +704,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -654,6 +714,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -663,6 +724,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -672,6 +734,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* L3 Data Cache per GPU */
.cache_size = 32*1024,
.cache_level = 3,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -684,6 +747,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -693,6 +757,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -702,6 +767,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -711,6 +777,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -720,6 +787,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -729,6 +797,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* L3 Data Cache per GPU */
.cache_size = 16*1024,
.cache_level = 3,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -741,6 +810,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -750,6 +820,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -759,6 +830,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -768,6 +840,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -777,6 +850,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -789,6 +863,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -798,6 +873,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -807,6 +883,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -816,6 +893,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -825,6 +903,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 256,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -837,6 +916,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -846,6 +926,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -855,6 +936,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -864,6 +946,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -873,6 +956,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 256,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -885,6 +969,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -894,6 +979,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -903,6 +989,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -912,6 +999,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -921,6 +1009,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
index 74c2d7a0d628..300634b9f668 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -303,6 +303,7 @@ struct kfd_node;
struct kfd_gpu_cache_info {
uint32_t cache_size;
uint32_t cache_level;
+ uint32_t cache_line_size;
uint32_t flags;
/* Indicates how many Compute Units share this cache
* within a SA. Value = 1 indicates the cache is not shared
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index 9ec750666382..d889e3545120 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -1018,12 +1018,14 @@ int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
uint32_t *entry_size)
{
struct kfd_dbg_device_info_entry device_info;
- uint32_t tmp_entry_size = *entry_size, tmp_num_devices;
+ uint32_t tmp_entry_size, tmp_num_devices;
int i, r = 0;
if (!(target && user_info && number_of_device_infos && entry_size))
return -EINVAL;
+ tmp_entry_size = *entry_size;
+
tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds);
*number_of_device_infos = target->n_pdds;
*entry_size = min_t(size_t, *entry_size, sizeof(device_info));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 739721254a5d..9b33d9d2c9ad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1285,8 +1285,10 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID;
int user_gpu_id;
- if (!p)
+ if (!p) {
+ dev_warn(dev->adev->dev, "Not find process with pasid:%d\n", pasid);
return; /* Presumably process exited. */
+ }
user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
@@ -1322,6 +1324,8 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
}
}
+ dev_warn(dev->adev->dev, "Send SIGBUS to process %s(pasid:%d)\n",
+ p->lead_thread->comm, pasid);
rcu_read_unlock();
/* user application will handle SIGBUS signal */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 6604a3f99c5e..4a64307bc438 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -36,6 +36,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/processor.h>
+#include "amdgpu_vm.h"
/*
* The primary memory I/O features being added for revisions of gfxip
@@ -326,10 +327,16 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
* with small reserved space for kernel.
* Set them to CANONICAL addresses.
*/
- pdd->gpuvm_base = SVM_USER_BASE;
+ pdd->gpuvm_base = max(SVM_USER_BASE, AMDGPU_VA_RESERVED_BOTTOM);
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
+ /* dGPUs: the reserved space for kernel
+ * before SVM
+ */
+ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+ pdd->qpd.ib_base = SVM_IB_BASE;
+
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
}
@@ -339,18 +346,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
- /* Raven needs SVM to support graphic handle, etc. Leave the small
- * reserved space before SVM on Raven as well, even though we don't
- * have to.
- * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
- * are used in Thunk to reserve SVM.
- */
- pdd->gpuvm_base = SVM_USER_BASE;
+ pdd->gpuvm_base = AMDGPU_VA_RESERVED_BOTTOM;
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+
+ /*
+ * Place TBA/TMA on opposite side of VM hole to prevent
+ * stray faults from triggering SVM on these pages.
+ */
+ pdd->qpd.cwsr_base = AMDGPU_VA_RESERVED_TRAP_START(pdd->dev->adev);
}
int kfd_init_apertures(struct kfd_process *process)
@@ -407,12 +414,6 @@ int kfd_init_apertures(struct kfd_process *process)
return -EINVAL;
}
}
-
- /* dGPUs: the reserved space for kernel
- * before SVM
- */
- pdd->qpd.cwsr_base = SVM_CWSR_BASE;
- pdd->qpd.ib_base = SVM_IB_BASE;
}
dev_dbg(kfd_device, "node id %u\n", id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index a7697ec8188e..9a06c6fb6605 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -132,6 +132,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
static void event_interrupt_poison_consumption(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
+ enum amdgpu_ras_block block = 0;
int old_poison, ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -151,12 +152,14 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
+ block = AMDGPU_RAS_BLOCK__GFX;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
case SOC15_IH_CLIENTID_SDMA2:
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
+ block = AMDGPU_RAS_BLOCK__SDMA;
break;
default:
break;
@@ -171,12 +174,12 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
} else {
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index 2a65792fd116..7e2859736a55 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -191,6 +191,7 @@ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
uint16_t pasid, uint16_t source_id)
{
+ enum amdgpu_ras_block block = 0;
int ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -210,9 +211,11 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
case SOC15_INTSRC_SQ_INTERRUPT_MSG:
if (dev->dqm->ops.reset_queues)
ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
+ block = AMDGPU_RAS_BLOCK__GFX;
break;
case SOC21_INTSRC_SDMA_ECC:
default:
+ block = AMDGPU_RAS_BLOCK__GFX;
break;
}
@@ -221,9 +224,9 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
/* resetting queue passes, do page retirement without gpu reset
resetting queue fails, fallback to gpu reset solution */
if (!ret)
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
else
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
}
static bool event_interrupt_isr_v11(struct kfd_node *dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 27cdaea40501..91dd5e045b51 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -143,6 +143,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
+ enum amdgpu_ras_block block = 0;
int old_poison, ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -162,12 +163,14 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
+ block = AMDGPU_RAS_BLOCK__GFX;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
case SOC15_IH_CLIENTID_SDMA2:
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
+ block = AMDGPU_RAS_BLOCK__SDMA;
break;
default:
break;
@@ -182,12 +185,12 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
} else {
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 57bf5e513f4d..e5cc697a3ca8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -128,6 +128,31 @@ struct mqd_manager {
uint32_t mqd_size;
};
+struct mqd_user_context_save_area_header {
+ /* Byte offset from start of user context
+ * save area to the last saved top (lowest
+ * address) of control stack data. Must be
+ * 4 byte aligned.
+ */
+ uint32_t control_stack_offset;
+
+ /* Byte size of the last saved control stack
+ * data. Must be 4 byte aligned.
+ */
+ uint32_t control_stack_size;
+
+ /* Byte offset from start of user context save
+ * area to the last saved base (lowest address)
+ * of wave state data. Must be 4 byte aligned.
+ */
+ uint32_t wave_state_offset;
+
+ /* Byte size of the last saved wave state data.
+ * Must be 4 byte aligned.
+ */
+ uint32_t wave_state_size;
+};
+
struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev,
struct queue_properties *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index c50a0dc9c9c0..f0f7f48af413 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1515,9 +1515,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
goto unreserve_out;
}
- r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
- drm_priv_to_vm(pdd->drm_priv),
- svm_range_bo_validate, NULL);
+ r = amdgpu_vm_validate(pdd->dev->adev,
+ drm_priv_to_vm(pdd->drm_priv), NULL,
+ svm_range_bo_validate, NULL);
if (r) {
pr_debug("failed %d validate pt bos\n", r);
goto unreserve_out;
@@ -1641,7 +1641,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
goto free_ctx;
}
- svm_range_reserve_bos(ctx, intr);
+ r = svm_range_reserve_bos(ctx, intr);
+ if (r)
+ goto free_ctx;
p = container_of(prange->svms, struct kfd_process, svms);
owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 6ed2ec381aaa..bc9eb847ecfe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1564,6 +1564,7 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
pcache->processor_id_low = cu_processor_id + (first_active_cu - 1);
pcache->cache_level = pcache_info[cache_type].cache_level;
pcache->cache_size = pcache_info[cache_type].cache_size;
+ pcache->cacheline_size = pcache_info[cache_type].cache_line_size;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
@@ -1632,6 +1633,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
pcache->processor_id_low = cu_processor_id
+ (first_active_cu - 1);
pcache->cache_level = pcache_info[cache_type].cache_level;
+ pcache->cacheline_size = pcache_info[cache_type].cache_line_size;
if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3))
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
@@ -1703,6 +1705,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
gpu_processor_id = dev->node_props.simd_id_base;
+ memset(cache_info, 0, sizeof(cache_info));
pcache_info = cache_info;
num_of_cache_types = kfd_get_gpu_cache_info(kdev, &pcache_info);
if (!num_of_cache_types) {
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
deleted file mode 100644
index a8a6c106e8c7..000000000000
--- a/drivers/gpu/drm/amd/display/TODO
+++ /dev/null
@@ -1,110 +0,0 @@
-===============================================================================
-TODOs
-===============================================================================
-
-1. Base this on drm-next - WIP
-
-
-2. Cleanup commit history
-
-
-3. WIP - Drop page flip helper and use DRM's version
-
-
-4. DONE - Flatten all DC objects
- * dc_stream/core_stream/stream should just be dc_stream
- * Same for other DC objects
-
- "Is there any major reason to keep all those abstractions?
-
- Could you collapse everything into struct dc_stream?
-
- I haven't looked recently but I didn't get the impression there was a
- lot of design around what was public/protected, more whatever needed
- to be used by someone else was in public."
- ~ Dave Airlie
-
-
-5. DONE - Rename DC objects to align more with DRM
- * dc_surface -> dc_plane_state
- * dc_stream -> dc_stream_state
-
-
-6. DONE - Per-plane and per-stream validation
-
-
-7. WIP - Per-plane and per-stream commit
-
-
-8. WIP - Split pipe_ctx into plane and stream resource structs
-
-
-9. Attach plane and stream reources to state object instead of validate_context
-
-
-10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
- * Use drm_display_info instead
- * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
-
- "Making sure you use the sink-specific helper libraries and kernel
- subsystems, since there's really no good reason to have 2nd
- implementation of those in the kernel. Looks likes that's done for mst
- and edid parsing. There's still a bit a midlayer feeling to the edid
- parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
- think it'd be much better if you convert that over to reading stuff
- from drm_display_info and if needed, push stuff into the core). Also,
- I can't come up with a good reason why DC needs all this (except to
- reimplement half of our edid quirk table, which really isn't a good
- idea). Might be good if you put this onto the list of things to fix
- long-term, but imo not a blocker. Definitely make sure new stuff
- doesn't slip in (i.e. if you start adding edid quirks to DC instead of
- the drm core, refactoring to use the core edid stuff was pointless)."
- ~ Daniel Vetter
-
-
-11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
-overy complicated HW programming function for sendind and receiving i2c/aux
-commands. We can greatly simplify that and move it into dc/dceXYZ like other
-HW blocks.
-
-12. drm_modeset_lock in MST should no longer be needed in recent kernels
- * Adopt appropriate locking scheme
-
-13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
-a few indirections, and consider removing entirely and using the
-drm_atomic_helper_best_encoder default behaviour.
-
-14. core/dc_debug.c, consider switching to the atomic state debug helpers and
-moving all your driver state printing into the various atomic_print_state
-callbacks. There's also plans to expose this stuff in a standard way across all
-drivers, to make debugging userspace compositors easier across different hw.
-
-15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
-dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
-
-16. Move to core SCDC helpers (I think those are new since initial DC review).
-
-17. There's still a pretty massive layer cake around dp aux and DPCD handling,
-with like 3 levels of abstraction and using your own structures instead of the
-stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
-incompatible styles, just means more reasons not to add a third (or well third
-one gets to do the cleanup refactor).
-
-18. There's a pile of sink handling code, both for DP and HDMI where I didn't
-immediately recognize the standard. I think long term it'd be best for the drm
-subsystem if we try to move as much of that into helpers/core as possible, and
-share it with drivers. But that's a very long term goal, and by far not just an
-issue with DC - other drivers, especially around DP sink handling, are equally
-guilty.
-
-19. DONE - The DC logger is still a rather sore thing, but I know that the
-DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
-something that integrates better with DRM and linux debug printing, while not
-being useless with filtering output. dynamic debug printing might be an option.
-
-20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
-retimer that we need to program to pass PHY compliance. Currently that's
-bypassing the i2c device and goes directly to HW. This should be changed.
-
-21. Remove vector.c from dc/basics. It's used in DDC code which can probably
-be simplified enough to no longer need a vector implementation.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5853cf022917..bcdd4f28b64c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -67,6 +67,7 @@
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -1929,17 +1930,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = NULL;
}
- if (adev->dm.dc)
+ if (adev->dm.dc) {
dc_deinit_callbacks(adev->dm.dc);
-
- if (adev->dm.dc)
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
-
- if (dc_enable_dmub_notifications(adev->dm.dc)) {
- kfree(adev->dm.dmub_notify);
- adev->dm.dmub_notify = NULL;
- destroy_workqueue(adev->dm.delayed_hpd_wq);
- adev->dm.delayed_hpd_wq = NULL;
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ kfree(adev->dm.dmub_notify);
+ adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
+ }
}
if (adev->dm.dmub_bo)
@@ -2112,6 +2111,17 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
const struct dmcub_firmware_header_v1_0 *hdr;
enum dmub_asic dmub_asic;
enum dmub_status status;
+ static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
+ };
int r;
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
@@ -2209,7 +2219,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
PSP_HEADER_BYTES;
- region_params.is_mailbox_in_inbox = false;
+ region_params.window_memory_type = window_memory_type;
status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info);
@@ -2237,6 +2247,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
memory_params.region_info = &region_info;
+ memory_params.window_memory_type = window_memory_type;
adev->dm.dmub_fb_info =
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@@ -4395,6 +4406,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
@@ -4506,6 +4518,23 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
+ /* Determine whether to enable Replay support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ replay_feature_enabled = true;
+ break;
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
+
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -4578,6 +4607,11 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_dm_update_connector_after_detect(aconnector);
setup_backlight_device(dm, aconnector);
+ /* Disable PSR if Replay can be enabled */
+ if (replay_feature_enabled)
+ if (amdgpu_dm_set_replay_caps(link, aconnector))
+ psr_feature_enabled = false;
+
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
@@ -6409,10 +6443,82 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
return ret;
}
+/**
+ * DOC: panel power savings
+ *
+ * The display manager allows you to set your desired **panel power savings**
+ * level (between 0-4, with 0 representing off), e.g. using the following::
+ *
+ * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+
+static ssize_t panel_power_savings_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ u8 val;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ val = to_dm_connector_state(connector->state)->abm_level ==
+ ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
+ to_dm_connector_state(connector->state)->abm_level;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t panel_power_savings_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (val < 0 || val > 4)
+ return -EINVAL;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ drm_kms_helper_hotplug_event(dev);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(panel_power_savings);
+
+static struct attribute *amdgpu_attrs[] = {
+ &dev_attr_panel_power_savings.attr,
+ NULL
+};
+
+static const struct attribute_group amdgpu_group = {
+ .name = "amdgpu",
+ .attrs = amdgpu_attrs
+};
+
static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0)
+ sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
}
@@ -6474,9 +6580,12 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->vcpi_slots = 0;
state->pbn = 0;
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- state->abm_level = amdgpu_dm_abm_level ?:
- ABM_LEVEL_IMMEDIATE_DISABLE;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (amdgpu_dm_abm_level <= 0)
+ state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ else
+ state->abm_level = amdgpu_dm_abm_level;
+ }
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -6514,6 +6623,14 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
int r;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0) {
+ r = sysfs_create_group(&connector->kdev->kobj,
+ &amdgpu_group);
+ if (r)
+ return r;
+ }
+
amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -7548,7 +7665,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
- (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
+ (dc_is_dmcu_initialized(adev->dm.dc) ||
+ adev->dm.dc->ctx->dmub_srv) && amdgpu_dm_abm_level < 0) {
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.abm_level_property, 0);
}
@@ -8546,10 +8664,22 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dm_update_pflip_irq_state(drm_to_adev(dev),
acrtc_attach);
- if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
- !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
- amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
+ !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+ } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+
+ struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+ acrtc_state->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ }
+ }
/* Decrement skip count when PSR is enabled and we're doing fast updates. */
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
@@ -8576,6 +8706,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
!acrtc_state->stream->link->psr_settings.psr_allow_active &&
+ !aconn->disallow_edp_enter_psr &&
(timestamp_ns -
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
500000000)
@@ -8838,11 +8969,12 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
}
} /* for_each_crtc_in_state() */
- /* if there mode set or reset, disable eDP PSR */
+ /* if there mode set or reset, disable eDP PSR, Replay */
if (mode_set_reset_required) {
if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue);
+ amdgpu_dm_replay_disable_all(dm);
amdgpu_dm_psr_disable_all(dm);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 9c1871b866cc..09519b7abf67 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -693,6 +693,7 @@ struct amdgpu_dm_connector {
struct drm_display_mode freesync_vid_base;
int psr_skip_count;
+ bool disallow_edp_enter_psr;
/* Record progress status of mst*/
uint8_t mst_status;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 6e715ef3a556..e23a0a276e33 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -29,6 +29,7 @@
#include "dc.h"
#include "amdgpu.h"
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_trace.h"
@@ -95,6 +96,61 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
+/**
+ * amdgpu_dm_crtc_set_panel_sr_feature() - Manage panel self-refresh features.
+ *
+ * @vblank_work: is a pointer to a struct vblank_control_work object.
+ * @vblank_enabled: indicates whether the DRM vblank counter is currently
+ * enabled (true) or disabled (false).
+ * @allow_sr_entry: represents whether entry into the self-refresh mode is
+ * allowed (true) or not allowed (false).
+ *
+ * The DRM vblank counter enable/disable action is used as the trigger to enable
+ * or disable various panel self-refresh features:
+ *
+ * Panel Replay and PSR SU
+ * - Enable when:
+ * - vblank counter is disabled
+ * - entry is allowed: usermode demonstrates an adequate number of fast
+ * commits)
+ * - CRC capture window isn't active
+ * - Keep enabled even when vblank counter gets enabled
+ *
+ * PSR1
+ * - Enable condition same as above
+ * - Disable when vblank counter is enabled
+ */
+static void amdgpu_dm_crtc_set_panel_sr_feature(
+ struct vblank_control_work *vblank_work,
+ bool vblank_enabled, bool allow_sr_entry)
+{
+ struct dc_link *link = vblank_work->stream->link;
+ bool is_sr_active = (link->replay_settings.replay_allow_active ||
+ link->psr_settings.psr_allow_active);
+ bool is_crc_window_active = false;
+
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ is_crc_window_active =
+ amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
+#endif
+
+ if (link->replay_settings.replay_feature_enabled &&
+ allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+ amdgpu_dm_replay_enable(vblank_work->stream, true);
+ } else if (vblank_enabled) {
+ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
+ amdgpu_dm_psr_disable(vblank_work->stream);
+ } else if (link->psr_settings.psr_feature_enabled &&
+ allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_psr_enable(vblank_work->stream);
+ }
+}
+
static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
{
struct vblank_control_work *vblank_work =
@@ -123,18 +179,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
- if (vblank_work->enable) {
- if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
- vblank_work->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
- } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
- !vblank_work->stream->link->psr_settings.psr_allow_active &&
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
-#endif
- vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
- amdgpu_dm_psr_enable(vblank_work->stream);
- }
+ amdgpu_dm_crtc_set_panel_sr_feature(
+ vblank_work, vblank_work->enable,
+ vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
+ vblank_work->stream->link->replay_settings.replay_feature_enabled);
}
mutex_unlock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 68a846323912..eee4945653e2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1483,7 +1483,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
const uint32_t rd_buf_size = 10;
struct pipe_ctx *pipe_ctx;
ssize_t result = 0;
- int i, r, str_len = 30;
+ int i, r, str_len = 10;
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
@@ -2971,6 +2971,53 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val)
return 0;
}
+/* check if kernel disallow eDP enter psr state
+ * cat /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 0: allow edp enter psr; 1: disallow
+ */
+static int disallow_edp_enter_psr_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *aconnector = data;
+
+ *val = (u64) aconnector->disallow_edp_enter_psr;
+ return 0;
+}
+
+/* set kernel disallow eDP enter psr state
+ * echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 0: allow edp enter psr; 1: disallow
+ *
+ * usage: test app read crc from PSR eDP rx.
+ *
+ * during kernel boot up, kernel write dpcd 0x170 = 5.
+ * this notify eDP rx psr enable and let rx check crc.
+ * rx fw will start checking crc for rx internal logic.
+ * crc read count within dpcd 0x246 is not updated and
+ * value is 0. when eDP tx driver wants to read rx crc
+ * from dpcd 0x246, 0x270, read count 0 lead tx driver
+ * timeout.
+ *
+ * to avoid this, we add this debugfs to let test app to disbable
+ * rx crc checking for rx internal logic. then test app can read
+ * non-zero crc read count.
+ *
+ * expected app sequence is as below:
+ * 1. disable eDP PHY and notify eDP rx with dpcd 0x600 = 2.
+ * 2. echo 0x1 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 3. enable eDP PHY and notify eDP rx with dpcd 0x600 = 1 but
+ * without dpcd 0x170 = 5.
+ * 4. read crc from rx dpcd 0x270, 0x246, etc.
+ * 5. echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr.
+ * this will let eDP back to normal with psr setup dpcd 0x170 = 5.
+ */
+static int disallow_edp_enter_psr_set(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *aconnector = data;
+
+ aconnector->disallow_edp_enter_psr = val ? true : false;
+ return 0;
+}
+
static int dmub_trace_mask_set(void *data, u64 val)
{
struct amdgpu_device *adev = data;
@@ -3092,6 +3139,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops,
allow_edp_hotplug_detection_get,
allow_edp_hotplug_detection_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(disallow_edp_enter_psr_fops,
+ disallow_edp_enter_psr_get,
+ disallow_edp_enter_psr_set, "%llu\n");
+
DEFINE_SHOW_ATTRIBUTE(current_backlight);
DEFINE_SHOW_ATTRIBUTE(target_backlight);
@@ -3265,6 +3316,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
&edp_ilr_debugfs_fops);
debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector,
&allow_edp_hotplug_detection_fops);
+ debugfs_create_file("disallow_edp_enter_psr", 0644, dir, connector,
+ &disallow_edp_enter_psr_fops);
}
for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 5ce542b1f860..738a58eebba7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -60,21 +60,26 @@ static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connecto
if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT)
return false;
+ // Sink shall populate line deviation information
+ if (dpcd_caps->pr_info.pixel_deviation_per_line == 0 ||
+ dpcd_caps->pr_info.max_deviation_line == 0)
+ return false;
+
return true;
}
/*
- * amdgpu_dm_setup_replay() - setup replay configuration
+ * amdgpu_dm_set_replay_caps() - setup Replay capabilities
* @link: link
* @aconnector: aconnector
*
*/
-bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
{
- struct replay_config pr_config;
+ struct replay_config pr_config = { 0 };
union replay_debug_flags *debug_flags = NULL;
- // For eDP, if Replay is supported, return true to skip checks
+ // If Replay is already set to support, return true to skip checks
if (link->replay_settings.config.replay_supported)
return true;
@@ -87,27 +92,50 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac
if (!link_supports_replay(link, aconnector))
return false;
- // Mark Replay is supported in link and update related attributes
+ // Mark Replay is supported in pr_config
pr_config.replay_supported = true;
- pr_config.replay_power_opt_supported = 0;
- pr_config.replay_enable_option |= pr_enable_option_static_screen;
- pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq;
-
- if (!pr_config.replay_timing_sync_supported)
- pr_config.replay_enable_option &= ~pr_enable_option_general_ui;
debug_flags = (union replay_debug_flags *)&pr_config.debug_flags;
debug_flags->u32All = 0;
debug_flags->bitfields.visual_confirm =
link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY;
- link->replay_settings.replay_feature_enabled = true;
-
init_replay_config(link, &pr_config);
return true;
}
+/*
+ * amdgpu_dm_link_setup_replay() - configure replay link
+ * @link: link
+ * @aconnector: aconnector
+ *
+ */
+bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+{
+ struct replay_config *pr_config;
+
+ if (link == NULL || aconnector == NULL)
+ return false;
+
+ pr_config = &link->replay_settings.config;
+
+ if (!pr_config->replay_supported)
+ return false;
+
+ pr_config->replay_power_opt_supported = 0x11;
+ pr_config->replay_smu_opt_supported = false;
+ pr_config->replay_enable_option |= pr_enable_option_static_screen;
+ pr_config->replay_support_fast_resync_in_ultra_sleep_mode = aconnector->max_vfreq >= 2 * aconnector->min_vfreq;
+ pr_config->replay_timing_sync_supported = false;
+
+ if (!pr_config->replay_timing_sync_supported)
+ pr_config->replay_enable_option &= ~pr_enable_option_general_ui;
+
+ link->replay_settings.replay_feature_enabled = true;
+
+ return true;
+}
/*
* amdgpu_dm_replay_enable() - enable replay f/w
@@ -117,51 +145,23 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac
*/
bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
{
- uint64_t state;
- unsigned int retry_count;
bool replay_active = true;
- const unsigned int max_retry = 1000;
- bool force_static = true;
struct dc_link *link = NULL;
-
if (stream == NULL)
return false;
link = stream->link;
- if (link == NULL)
- return false;
-
- link->dc->link_srv->edp_setup_replay(link, stream);
-
- link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL);
-
- link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL);
-
- if (wait == true) {
-
- for (retry_count = 0; retry_count <= max_retry; retry_count++) {
- dc_link_get_replay_state(link, &state);
- if (replay_active) {
- if (state != REPLAY_STATE_0 &&
- (!force_static || state == REPLAY_STATE_3))
- break;
- } else {
- if (state == REPLAY_STATE_0)
- break;
- }
- udelay(500);
- }
-
- /* assert if max retry hit */
- if (retry_count >= max_retry)
- ASSERT(0);
- } else {
- /* To-do: Add trace log */
+ if (link) {
+ link->dc->link_srv->edp_setup_replay(link, stream);
+ link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total);
+ DRM_DEBUG_DRIVER("Enabling replay...\n");
+ link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL);
+ return true;
}
- return true;
+ return false;
}
/*
@@ -172,12 +172,31 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
*/
bool amdgpu_dm_replay_disable(struct dc_stream_state *stream)
{
+ bool replay_active = false;
+ struct dc_link *link = NULL;
- if (stream->link) {
+ if (stream == NULL)
+ return false;
+
+ link = stream->link;
+
+ if (link) {
DRM_DEBUG_DRIVER("Disabling replay...\n");
- stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL);
+ link->dc->link_srv->edp_set_replay_allow_active(stream->link, &replay_active, true, false, NULL);
return true;
}
return false;
}
+
+/*
+ * amdgpu_dm_replay_disable_all() - disable replay f/w
+ * if replay is enabled on any stream
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm)
+{
+ DRM_DEBUG_DRIVER("Disabling replay if replay is enabled on any stream\n");
+ return dc_set_replay_allow_active(dm->dc, false);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 01cba3cd6246..f0d30eb47312 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -40,7 +40,9 @@ enum replay_enable_option {
bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable);
-bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
+bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
+bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
bool amdgpu_dm_replay_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm);
#endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 1090d235086a..bd1f60ecaba4 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -101,6 +101,40 @@ void convert_float_matrix(
}
}
+static struct fixed31_32 int_frac_to_fixed_point(uint16_t arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits)
+{
+ struct fixed31_32 result;
+ uint16_t sign_mask = 1 << (fractional_bits + integer_bits);
+ uint16_t value_mask = sign_mask - 1;
+
+ result.value = (long long)(arg & value_mask) <<
+ (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+
+ if (arg & sign_mask)
+ result = dc_fixpt_neg(result);
+
+ return result;
+}
+
+/**
+ * convert_hw_matrix - converts HW values into fixed31_32 matrix.
+ * @matrix: fixed point 31.32 matrix
+ * @reg: array of register values
+ * @buffer_size: size of the array of register values
+ *
+ * Converts HW register spec defined format S2D13 into a fixed-point 31.32
+ * matrix.
+ */
+void convert_hw_matrix(struct fixed31_32 *matrix,
+ uint16_t *reg,
+ uint32_t buffer_size)
+{
+ for (int i = 0; i < buffer_size; ++i)
+ matrix[i] = int_frac_to_fixed_point(reg[i], 2, 13);
+}
+
static uint32_t find_gcd(uint32_t a, uint32_t b)
{
uint32_t remainder;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index 81da4e6f7a1a..a433cef78496 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -41,6 +41,10 @@ void convert_float_matrix(
void reduce_fraction(uint32_t num, uint32_t den,
uint32_t *out_num, uint32_t *out_den);
+void convert_hw_matrix(struct fixed31_32 *matrix,
+ uint16_t *reg,
+ uint32_t buffer_size);
+
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 818a529cacc3..86f9198e7501 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -37,7 +37,7 @@
#define EXEC_BIOS_CMD_TABLE(command, params)\
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GetIndexIntoMasterTable(COMMAND, command), \
- (uint32_t *)&params) == 0)
+ (uint32_t *)&params, sizeof(params)) == 0)
#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 293a919d605d..cbae1be7b009 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -49,7 +49,7 @@
#define EXEC_BIOS_CMD_TABLE(fname, params)\
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GET_INDEX_INTO_MASTER_TABLE(command, fname), \
- (uint32_t *)&params) == 0)
+ (uint32_t *)&params, sizeof(params)) == 0)
#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 28a2a837d2f0..86ee4fe4f5e3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -340,7 +340,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
- break;
}
case AMDGPU_FAMILY_GC_11_0_1: {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 0c6a4ab72b1d..e3e1940198a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -707,9 +707,7 @@ void rn_clk_mgr_construct(
int is_green_sardine = 0;
struct clk_log_info log_info = {0};
-#if defined(CONFIG_DRM_AMD_DC_FP)
is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
-#endif
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 8c9d45e5b13b..d72acbb049b1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -185,10 +185,6 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index e4f96b6fd79d..19e5b3be9275 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -180,10 +180,6 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 32279c5db724..6904e95113c1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -202,10 +202,6 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 07baa10a8647..c4af406146b7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -220,12 +220,6 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n",
- actual_dcfclk_set_mhz,
- actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 1042cf1a3ab0..879f1494c4cd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -215,10 +215,6 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
index 3ed19197a755..8b82092b91cd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
@@ -189,10 +189,6 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index aadd07bc68c5..e64e45e4c833 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -387,7 +387,15 @@ static void dcn32_update_clocks_update_dentist(
uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider;
if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
+ /*
+ * SMU uses discrete dispclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dispclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK,
+ khz_to_mhz_floor(temp_dispclk_khz));
if (dc->debug.override_dispclk_programming) {
REG_GET(DENTIST_DISPCLK_CNTL,
@@ -426,7 +434,15 @@ static void dcn32_update_clocks_update_dentist(
/* do requested DISPCLK updates*/
if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
+ /*
+ * SMU uses discrete dispclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dispclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK,
+ khz_to_mhz_floor(clk_mgr->base.clks.dispclk_khz));
if (dc->debug.override_dispclk_programming) {
REG_GET(DENTIST_DISPCLK_CNTL,
@@ -493,6 +509,8 @@ static void dcn32_auto_dpm_test_log(
}
}
+ msleep(5);
+
mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
@@ -734,7 +752,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
if (clk_mgr->smu_present && !dpp_clock_lowered)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
+ /*
+ * SMU uses discrete dppclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dppclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK,
+ khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));
update_dppclk = true;
}
@@ -765,7 +791,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn32_update_clocks_update_dentist(clk_mgr, context);
if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
+ /*
+ * SMU uses discrete dppclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dppclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK,
+ khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
index a34c258c19dc..c76352a817de 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
@@ -36,8 +36,7 @@
#define DALSMC_MSG_SetCabForUclkPstate 0x12
#define DALSMC_Result_OK 0x1
-void
-dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
+void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index e64890259235..c378b879c76d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -384,19 +384,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn35_smu_enable_pme_wa(clk_mgr);
}
-void dcn35_init_clocks(struct clk_mgr *clk_mgr)
-{
- uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
-
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
-
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
bool dcn35_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
@@ -422,6 +409,22 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
{
}
+static void init_clk_states(struct clk_mgr *clk_mgr)
+{
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+}
+
+void dcn35_init_clocks(struct clk_mgr *clk_mgr)
+{
+ init_clk_states(clk_mgr);
+}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
@@ -833,7 +836,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
}
}
-static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
+static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc *dc = clk_mgr_base->ctx->dc;
@@ -881,7 +884,7 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
return ips_supported;
}
-static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
+static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -890,7 +893,7 @@ static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
{
- dcn35_init_clocks(clk_mgr);
+ init_clk_states(clk_mgr);
/* TODO: Implement the functions and remove the ifndef guard */
}
@@ -975,8 +978,8 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
- .set_idle_state = dcn35_set_idle_state,
- .get_idle_state = dcn35_get_idle_state
+ .set_idle_state = dcn35_set_ips_idle_state,
+ .get_idle_state = dcn35_get_ips_idle_state
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index 6d4a1ffab5ed..9e588c56c570 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -361,32 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 9) | (1 << 8);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = 0;
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 8);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 8);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = 0x%x\n", __func__, param);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
@@ -400,7 +400,7 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
clk_mgr,
msg_id,
param);
- smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv);
+ smu_print("%s: msg_id = %d, param = 0x%x, return = 0x%x\n", __func__, msg_id, param, retv);
}
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
@@ -447,6 +447,9 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
+ if (!clk_mgr->smu_present)
+ return;
+
dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
@@ -458,6 +461,9 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
{
int retv;
+ if (!clk_mgr->smu_present)
+ return 0;
+
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_DispPsrExit,
@@ -470,6 +476,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
{
int retv;
+ if (!clk_mgr->smu_present)
+ return 0;
+
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_QueryIPS2Support,
@@ -481,6 +490,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
{
+ if (!clk_mgr->smu_present)
+ return;
+
REG_WRITE(MP1_SMN_C2PMSG_71, param);
//smu_print("%s: write_ips_scratch = %x\n", __func__, param);
}
@@ -489,6 +501,9 @@ uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
{
uint32_t retv;
+ if (!clk_mgr->smu_present)
+ return 0;
+
retv = REG_READ(MP1_SMN_C2PMSG_71);
//smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
return retv;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2c424e435962..4d5194293dbd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -414,6 +414,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
if (dc->optimized_required || dc->wm_optimized_required)
return false;
+ dc_exit_ips_for_hw_access(dc);
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -454,6 +456,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
int i = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -484,6 +488,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
bool ret = false;
struct crtc_position position;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe =
&dc->current_state->res_ctx.pipe_ctx[i];
@@ -603,6 +609,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
if (pipe == NULL)
return false;
+ dc_exit_ips_for_hw_access(dc);
+
/* By default, capture the full frame */
param.windowa_x_start = 0;
param.windowa_y_start = 0;
@@ -662,6 +670,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
struct pipe_ctx *pipe;
struct timing_generator *tg;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream)
@@ -686,6 +696,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
int i;
struct pipe_ctx *pipe_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -721,6 +733,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
if (option > DITHER_OPTION_MAX)
return;
+ dc_exit_ips_for_hw_access(stream->ctx->dc);
+
stream->dither_option = option;
memset(&params, 0, sizeof(params));
@@ -745,6 +759,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -762,6 +778,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -788,6 +806,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
struct pipe_ctx *pipes_affected[MAX_PIPES];
int num_pipes_affected = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < num_streams; i++) {
struct dc_stream_state *stream = streams[i];
@@ -1766,6 +1786,8 @@ void dc_enable_stereo(
int i, j;
struct pipe_ctx *pipe;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (context != NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -1785,6 +1807,8 @@ void dc_enable_stereo(
void dc_trigger_sync(struct dc *dc, struct dc_state *context)
{
if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+ dc_exit_ips_for_hw_access(dc);
+
enable_timing_multisync(dc, context);
program_timing_sync(dc, context);
}
@@ -2041,6 +2065,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (!streams_changed(dc, streams, stream_count))
return res;
+ dc_exit_ips_for_hw_access(dc);
+
DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
for (i = 0; i < stream_count; i++) {
@@ -2428,6 +2454,10 @@ static enum surface_update_type get_scaling_info_update_type(
/* Changing clip size of a large surface may result in MPC slice count change */
update_flags->bits.bandwidth_change = 1;
+ if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
+ update_flags->bits.clip_size_change = 1;
+
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
@@ -2441,7 +2471,8 @@ static enum surface_update_type get_scaling_info_update_type(
|| update_flags->bits.scaling_change)
return UPDATE_TYPE_FULL;
- if (update_flags->bits.position_change)
+ if (update_flags->bits.position_change ||
+ update_flags->bits.clip_size_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
@@ -3376,6 +3407,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
top_pipe_to_program = resource_get_otg_master_for_stream(
@@ -3503,10 +3536,23 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
+ }
+
if (update_type == UPDATE_TYPE_FULL) {
dc_allow_idle_optimizations(dc, false);
@@ -3541,17 +3587,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream && pipe->plane_state) {
- set_p_state_switch_method(dc, context, pipe);
-
- if (dc->debug.visual_confirm)
- dc_update_visual_confirm_color(dc, context, pipe);
- }
- }
-
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
struct pipe_ctx *odm_pipe;
@@ -4384,6 +4419,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
bool is_plane_addition = 0;
bool is_fast_update_only;
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
surface_count, stream_update, stream);
@@ -4504,6 +4541,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
int i, j;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -4688,6 +4727,8 @@ void dc_set_power_state(
case DC_ACPI_CM_POWER_STATE_D0:
dc_state_construct(dc, dc->current_state);
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
dc->hwss.init_hw(dc);
@@ -4829,6 +4870,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
+void dc_exit_ips_for_hw_access(struct dc *dc)
+{
+ if (dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, false);
+}
+
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
uint32_t idle_state = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 9fbdb09697fd..1b7765bc5e5e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1834,23 +1834,6 @@ int resource_find_any_free_pipe(struct resource_context *new_res_ctx,
bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type)
{
-#ifdef DBG
- if (pipe_ctx->stream == NULL) {
- /* a free pipe with dangling states */
- ASSERT(!pipe_ctx->plane_state);
- ASSERT(!pipe_ctx->prev_odm_pipe);
- ASSERT(!pipe_ctx->next_odm_pipe);
- ASSERT(!pipe_ctx->top_pipe);
- ASSERT(!pipe_ctx->bottom_pipe);
- } else if (pipe_ctx->top_pipe) {
- /* a secondary DPP pipe must be signed to a plane */
- ASSERT(pipe_ctx->plane_state)
- }
- /* Add more checks here to prevent corrupted pipe ctx. It is very hard
- * to debug this issue afterwards because we can't pinpoint the code
- * location causing inconsistent pipe context states.
- */
-#endif
switch (type) {
case OTG_MASTER:
return !pipe_ctx->prev_odm_pipe &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 54670e0b1518..51a970fcb5d0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -423,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
@@ -493,6 +495,8 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
if (dwb->funcs->set_fc_enable)
dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE);
@@ -542,6 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
/* disable writeback */
if (dc->hwss.disable_writeback) {
struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
@@ -557,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc,
int num_dwb,
struct dc_writeback_info *wb_info)
{
+ dc_exit_ips_for_hw_access(dc);
+
if (dc->hwss.mmhubbub_warmup)
return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
else
@@ -569,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -597,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
dc = stream->ctx->dc;
res_ctx = &dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -628,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -664,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
if (i == MAX_PIPES)
return true;
+ dc_exit_ips_for_hw_access(dc);
+
return dc->hwss.dmdata_status_done(pipe);
}
@@ -698,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
pipe_ctx->stream->dmdata_address = attr->address;
+ dc_exit_ips_for_hw_access(dc);
+
dc->hwss.program_dmdata_engine(pipe_ctx);
if (hubp->funcs->dmdata_set_attributes != NULL &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 19a2c7140ae8..19140fb65787 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -161,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status(
break;
}
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index c9317ea0258e..9b42f6fc8c69 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -51,7 +51,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.266"
+#define DC_VER "3.2.272"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -429,12 +429,12 @@ struct dc_config {
bool force_bios_enable_lttpr;
uint8_t force_bios_fixed_vs;
int sdpif_request_limit_words_per_umc;
- bool use_old_fixed_vs_sequence;
bool dc_mode_clk_limit_support;
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
unsigned int disable_ips_in_vpb;
+ bool usb4_bw_alloc_support;
};
enum visual_confirm {
@@ -987,9 +987,11 @@ struct dc_debug_options {
bool psp_disabled_wa;
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
+ bool disable_dmub_reallow_idle;
bool disable_timeout;
bool disable_extblankadj;
unsigned int static_screen_wait_frames;
+ bool force_chroma_subsampling_1tap;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -1068,6 +1070,7 @@ struct dc {
} scratch;
struct dml2_configuration_options dml2_options;
+ enum dc_acpi_cm_power_state power_state;
};
enum frame_buffer_mode {
@@ -1249,6 +1252,7 @@ union surface_update_flags {
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
+ uint32_t clip_size_change: 1;
uint32_t position_change:1;
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
@@ -1568,7 +1572,19 @@ struct dc_link {
enum engine_id dpia_preferred_eng_id;
bool test_pattern_enabled;
+ /* Pending/Current test pattern are only used to perform and track
+ * FIXED_VS retimer test pattern/lane adjustment override state.
+ * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
+ * to perform specific lane adjust overrides before setting certain
+ * PHY test patterns. In cases when lane adjust and set test pattern
+ * calls are not performed atomically (i.e. performing link training),
+ * pending_test_pattern will be invalid or contain a non-PHY test pattern
+ * and current_test_pattern will contain required context for any future
+ * set pattern/set lane adjust to transition between override state(s).
+ * */
enum dp_test_pattern current_test_pattern;
+ enum dp_test_pattern pending_test_pattern;
+
union compliance_test_state compliance_test_state;
void *priv;
@@ -2219,11 +2235,9 @@ struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
-#if defined(CONFIG_DRM_AMD_DC_FP)
// 'true' if MST topology supports DSC passthrough for sink
// 'false' if MST topology does not support DSC passthrough
bool is_dsc_passthrough_supported;
-#endif
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
@@ -2325,6 +2339,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_
struct dc_cursor_attributes *cursor_attr);
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
+void dc_exit_ips_for_hw_access(struct dc *dc);
bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 363d522603a2..6083b1dcf050 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -74,7 +74,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
@@ -146,7 +149,9 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
- dmub_srv_wait_for_idle(dmub, 100000);
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
@@ -187,7 +192,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
// Wait for DMUB to process command
if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
@@ -781,21 +788,22 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
} else if (subvp_pipe->next_odm_pipe) {
pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
} else {
- pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0;
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
}
// Find phantom pipe index based on phantom stream
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
- if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
+ if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
+ phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
if (phantom_pipe->bottom_pipe) {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
} else if (phantom_pipe->next_odm_pipe) {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
} else {
- pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
}
break;
}
@@ -1193,11 +1201,17 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
{
+ struct dc_dmub_srv *dc_dmub_srv;
union dmub_rb_cmd cmd = {0};
if (dc->debug.dmcub_emulation)
return;
+ if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
+ return;
+
+ dc_dmub_srv = dc->ctx->dmub_srv;
+
memset(&cmd, 0, sizeof(cmd));
cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
@@ -1208,19 +1222,42 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
if (allow_idle) {
- if (dc->hwss.set_idle_state)
- dc->hwss.set_idle_state(dc, true);
+ volatile struct dmub_shared_state_ips_driver *ips_driver =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
+ union dmub_shared_state_ips_driver_signals new_signals;
+
+ dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+
+ memset(&new_signals, 0, sizeof(new_signals));
+
+ if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
+ dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
+ new_signals.bits.allow_ips1 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ }
+
+ ips_driver->signals = new_signals;
}
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
/* We also do not perform a wait since DMCUB could enter idle after the notification. */
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
}
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
- uint32_t allow_state = 0;
- uint32_t commit_state = 0;
+ struct dc_dmub_srv *dc_dmub_srv;
if (dc->debug.dmcub_emulation)
return;
@@ -1228,61 +1265,44 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
- if (dc->hwss.get_idle_state &&
- dc->hwss.set_idle_state &&
- dc->clk_mgr->funcs->exit_low_power_state) {
+ dc_dmub_srv = dc->ctx->dmub_srv;
- allow_state = dc->hwss.get_idle_state(dc);
- dc->hwss.set_idle_state(dc, false);
+ if (dc->clk_mgr->funcs->exit_low_power_state) {
+ volatile const struct dmub_shared_state_ips_fw *ips_fw =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
+ volatile struct dmub_shared_state_ips_driver *ips_driver =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
+ union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
- if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
- // Wait for evaluation time
- for (;;) {
- udelay(dc->debug.ips2_eval_delay_us);
- commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS2_ALLOW_MASK)
- break;
+ ips_driver->signals.all = 0;
- /* allow was still set, retry eval delay */
- dc->hwss.set_idle_state(dc, false);
- }
+ if (prev_driver_signals.bits.allow_ips2) {
+ udelay(dc->debug.ips2_eval_delay_us);
- if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
+ if (ips_fw->signals.bits.ips2_commit) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
// Wait for IPS2 entry upper bound
udelay(dc->debug.ips2_entry_delay_us);
- dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
- for (;;) {
- commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS2_COMMIT_MASK)
- break;
+ dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ while (ips_fw->signals.bits.ips2_commit)
udelay(1);
- }
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- /* TODO: See if we can return early here - IPS2 should go
- * back directly to IPS0 and clear the flags, but it will
- * be safer to directly notify DMCUB of this.
- */
- allow_state = dc->hwss.get_idle_state(dc);
+ dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
- if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
- for (;;) {
- commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS1_COMMIT_MASK)
- break;
-
+ if (prev_driver_signals.bits.allow_ips1) {
+ while (ips_fw->signals.bits.ips1_commit)
udelay(1);
- }
+
}
}
@@ -1364,7 +1384,7 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
else
result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
@@ -1413,7 +1433,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 811474f4419b..aae2f3a2660d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -827,9 +827,7 @@ struct dc_dsc_config {
uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
-#if defined(CONFIG_DRM_AMD_DC_FP)
bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */
-#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
uint32_t mst_pbn; /* pbn of display on dsc mst hub */
const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */
@@ -942,6 +940,7 @@ struct dc_crtc_timing {
uint32_t hdmi_vic;
uint32_t rid;
uint32_t fr_index;
+ uint32_t frl_uncompressed_video_bandwidth_in_kbps;
enum dc_timing_3d_format timing_3d_format;
enum dc_color_depth display_color_depth;
enum dc_pixel_encoding pixel_encoding;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index f0458b8f00af..12f3c35b3a34 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -239,27 +239,294 @@ static void check_audio_bandwidth_hdmi(
}
}
}
+static struct fixed31_32 get_link_symbol_clk_freq_mhz(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_LOW:
+ return dc_fixpt_from_int(162); /* 162 MHz */
+ case LINK_RATE_HIGH:
+ return dc_fixpt_from_int(270); /* 270 MHz */
+ case LINK_RATE_HIGH2:
+ return dc_fixpt_from_int(540); /* 540 MHz */
+ case LINK_RATE_HIGH3:
+ return dc_fixpt_from_int(810); /* 810 MHz */
+ case LINK_RATE_UHBR10:
+ return dc_fixpt_from_fraction(3125, 10); /* 312.5 MHz */
+ case LINK_RATE_UHBR13_5:
+ return dc_fixpt_from_fraction(421875, 1000); /* 421.875 MHz */
+ case LINK_RATE_UHBR20:
+ return dc_fixpt_from_int(625); /* 625 MHz */
+ default:
+ /* Unexpected case, this requires debug if encountered. */
+ ASSERT(0);
+ return dc_fixpt_from_int(0);
+ }
+}
+
+struct dp_audio_layout_config {
+ uint8_t layouts_per_sample_denom;
+ uint8_t symbols_per_layout;
+ uint8_t max_layouts_per_audio_sdp;
+};
+
+static void get_audio_layout_config(
+ uint32_t channel_count,
+ enum dp_link_encoding encoding,
+ struct dp_audio_layout_config *output)
+{
+ /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP,
+ * with each layout being the same size (8ch layout).
+ */
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ }
+}
-/*For DP SST, calculate if specified sample rates can fit into a given timing */
-static void check_audio_bandwidth_dpsst(
+static uint32_t get_av_stream_map_lane_count(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t av_stream_map_lane_count = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (!is_mst)
+ av_stream_map_lane_count = lane_count;
+ else
+ av_stream_map_lane_count = 4;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ av_stream_map_lane_count = 4;
+ }
+
+ ASSERT(av_stream_map_lane_count != 0);
+
+ return av_stream_map_lane_count;
+}
+
+static uint32_t get_audio_sdp_overhead(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t audio_sdp_overhead = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (is_mst)
+ audio_sdp_overhead = 16; /* 4 * 2 + 8 */
+ else
+ audio_sdp_overhead = lane_count * 2 + 8;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ audio_sdp_overhead = 10; /* 4 x 2.5 */
+ }
+
+ ASSERT(audio_sdp_overhead != 0);
+
+ return audio_sdp_overhead;
+}
+
+static uint32_t calculate_required_audio_bw_in_symbols(
const struct audio_crtc_info *crtc_info,
+ const struct dp_audio_layout_config *layout_config,
uint32_t channel_count,
- union audio_sample_rates *sample_rates)
+ uint32_t sample_rate_hz,
+ uint32_t av_stream_map_lane_count,
+ uint32_t audio_sdp_overhead)
+{
+ /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */
+ struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100);
+ struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction(
+ crtc_info->requested_pixel_clock_100Hz, crtc_info->h_total * 10);
+ struct fixed31_32 samples_per_line;
+ struct fixed31_32 layouts_per_line;
+ struct fixed31_32 symbols_per_sdp_max_layout;
+ struct fixed31_32 remainder;
+ uint32_t num_sdp_with_max_layouts;
+ uint32_t required_symbols_per_hblank;
+
+ samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000);
+ samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz);
+ layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config->layouts_per_sample_denom);
+
+ num_sdp_with_max_layouts = dc_fixpt_floor(
+ dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp));
+ symbols_per_sdp_max_layout = dc_fixpt_from_int(
+ layout_config->max_layouts_per_audio_sdp * layout_config->symbols_per_layout);
+ symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead);
+ symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin);
+ required_symbols_per_hblank = num_sdp_with_max_layouts;
+ required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+
+ if (num_sdp_with_max_layouts != dc_fixpt_ceil(
+ dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp))) {
+ remainder = dc_fixpt_sub_int(layouts_per_line,
+ num_sdp_with_max_layouts * layout_config->max_layouts_per_audio_sdp);
+ remainder = dc_fixpt_mul_int(remainder, layout_config->symbols_per_layout);
+ remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead);
+ remainder = dc_fixpt_mul(remainder, audio_sdp_margin);
+ required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+ }
+
+ return required_symbols_per_hblank;
+}
+
+/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST.
+ */
+static uint32_t calculate_available_hblank_bw_in_symbols(
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_dp_link_info *dp_link_info)
{
- /* do nothing */
+ uint64_t hblank = crtc_info->h_total - crtc_info->h_active;
+ struct fixed31_32 hblank_time_msec =
+ dc_fixpt_from_fraction(hblank * 10, crtc_info->requested_pixel_clock_100Hz);
+ struct fixed31_32 lsclkfreq_mhz =
+ get_link_symbol_clk_freq_mhz(dp_link_info->link_rate);
+ struct fixed31_32 average_stream_sym_bw_frac;
+ struct fixed31_32 peak_stream_bw_kbps;
+ struct fixed31_32 bits_per_pixel;
+ struct fixed31_32 link_bw_kbps;
+ struct fixed31_32 available_stream_sym_count;
+ uint32_t available_hblank_bw = 0; /* in stream symbols */
+
+ if (crtc_info->dsc_bits_per_pixel) {
+ bits_per_pixel = dc_fixpt_from_fraction(crtc_info->dsc_bits_per_pixel, 16);
+ } else {
+ switch (crtc_info->color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_pixel = dc_fixpt_from_int(6);
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_pixel = dc_fixpt_from_int(8);
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_pixel = dc_fixpt_from_int(10);
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_pixel = dc_fixpt_from_int(12);
+ break;
+ default:
+ /* Default to commonly supported color depth. */
+ bits_per_pixel = dc_fixpt_from_int(8);
+ break;
+ }
+
+ bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 3);
+
+ if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 3);
+ bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 2);
+ } else if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 2);
+ }
+ }
+
+ /* Use simple stream BW calculation because mainlink overhead is
+ * accounted for separately in the audio BW calculations.
+ */
+ peak_stream_bw_kbps = dc_fixpt_from_fraction(crtc_info->requested_pixel_clock_100Hz, 10);
+ peak_stream_bw_kbps = dc_fixpt_mul(peak_stream_bw_kbps, bits_per_pixel);
+ link_bw_kbps = dc_fixpt_from_int(dp_link_info->link_bandwidth_kbps);
+ average_stream_sym_bw_frac = dc_fixpt_div(peak_stream_bw_kbps, link_bw_kbps);
+
+ available_stream_sym_count = dc_fixpt_mul_int(hblank_time_msec, 1000);
+ available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, lsclkfreq_mhz);
+ available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, average_stream_sym_bw_frac);
+ available_hblank_bw = dc_fixpt_floor(available_stream_sym_count);
+ available_hblank_bw *= dp_link_info->lane_count;
+ available_hblank_bw -= crtc_info->dsc_num_slices * 4; /* EOC overhead */
+
+ if (available_hblank_bw < dp_link_info->hblank_min_symbol_width)
+ available_hblank_bw = dp_link_info->hblank_min_symbol_width;
+
+ if (available_hblank_bw < 12)
+ available_hblank_bw = 0;
+ else
+ available_hblank_bw -= 12; /* Main link overhead */
+
+ return available_hblank_bw;
}
-/*For DP MST, calculate if specified sample rates can fit into a given timing */
-static void check_audio_bandwidth_dpmst(
+static void check_audio_bandwidth_dp(
const struct audio_crtc_info *crtc_info,
+ const struct audio_dp_link_info *dp_link_info,
uint32_t channel_count,
union audio_sample_rates *sample_rates)
{
- /* do nothing */
+ struct dp_audio_layout_config layout_config = {0};
+ uint32_t available_hblank_bw;
+ uint32_t av_stream_map_lane_count;
+ uint32_t audio_sdp_overhead;
+
+ /* TODO: Add validation for SST 8b/10 case */
+ if (!dp_link_info->is_mst && dp_link_info->encoding == DP_8b_10b_ENCODING)
+ return;
+
+ available_hblank_bw = calculate_available_hblank_bw_in_symbols(
+ crtc_info, dp_link_info);
+ av_stream_map_lane_count = get_av_stream_map_lane_count(
+ dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst);
+ audio_sdp_overhead = get_audio_sdp_overhead(
+ dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst);
+ get_audio_layout_config(
+ channel_count, dp_link_info->encoding, &layout_config);
+
+ if (layout_config.max_layouts_per_audio_sdp == 0 ||
+ layout_config.symbols_per_layout == 0 ||
+ layout_config.layouts_per_sample_denom == 0) {
+ return;
+ }
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 192000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_192 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 176400,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_176_4 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 96000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_96 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 88200,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_88_2 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 48000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_48 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 44100,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_44_1 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 32000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_32 = 0;
}
static void check_audio_bandwidth(
const struct audio_crtc_info *crtc_info,
+ const struct audio_dp_link_info *dp_link_info,
uint32_t channel_count,
enum signal_type signal,
union audio_sample_rates *sample_rates)
@@ -271,12 +538,9 @@ static void check_audio_bandwidth(
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
- check_audio_bandwidth_dpsst(
- crtc_info, channel_count, sample_rates);
- break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
- check_audio_bandwidth_dpmst(
- crtc_info, channel_count, sample_rates);
+ check_audio_bandwidth_dp(
+ crtc_info, dp_link_info, channel_count, sample_rates);
break;
default:
break;
@@ -394,7 +658,8 @@ void dce_aud_az_configure(
struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
- const struct audio_info *audio_info)
+ const struct audio_info *audio_info,
+ const struct audio_dp_link_info *dp_link_info)
{
struct dce_audio *aud = DCE_AUD(audio);
@@ -529,6 +794,7 @@ void dce_aud_az_configure(
check_audio_bandwidth(
crtc_info,
+ dp_link_info,
channel_count,
signal,
&sample_rates);
@@ -588,6 +854,7 @@ void dce_aud_az_configure(
check_audio_bandwidth(
crtc_info,
+ dp_link_info,
8,
signal,
&sample_rate);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index dbd2cfed0603..539f881928d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -170,7 +170,8 @@ void dce_aud_az_disable(struct audio *audio);
void dce_aud_az_configure(struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
- const struct audio_info *audio_info);
+ const struct audio_info *audio_info,
+ const struct audio_dp_link_info *dp_link_info);
void dce_aud_wall_dto_setup(struct audio *audio,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index ba1fec3016d5..bf636b28e3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -65,5 +65,9 @@ bool should_use_dmub_lock(struct dc_link *link)
{
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
return true;
+
+ if (link->replay_settings.replay_feature_enabled)
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 38e4797e9476..b010814706fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -258,7 +258,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
*residency = 0;
}
-/**
+/*
* Set REPLAY power optimization flags and coasting vtotal.
*/
static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
@@ -280,7 +280,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
-/**
+/*
* send Replay general cmd to DMUB.
*/
static void dmub_replay_send_cmd(struct dmub_replay *dmub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 3538973bd0c6..b7e57aa27361 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -62,6 +62,26 @@ void cm_helper_program_color_matrices(
}
+void cm_helper_read_color_matrices(struct dc_context *ctx,
+ uint16_t *regval,
+ const struct color_matrices_reg *reg)
+{
+ uint32_t cur_csc_reg, regval0, regval1;
+ unsigned int i = 0;
+
+ for (cur_csc_reg = reg->csc_c11_c12;
+ cur_csc_reg <= reg->csc_c33_c34; cur_csc_reg++) {
+ REG_GET_2(cur_csc_reg,
+ csc_c11, &regval0,
+ csc_c12, &regval1);
+
+ regval[2 * i] = regval0;
+ regval[(2 * i) + 1] = regval1;
+
+ i++;
+ }
+}
+
void cm_helper_program_xfer_func(
struct dc_context *ctx,
const struct pwl_params *params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 0a68b63d6126..decc50b1ac53 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -114,5 +114,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params);
-
+void cm_helper_read_color_matrices(struct dc_context *ctx,
+ uint16_t *regval,
+ const struct color_matrices_reg *reg);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index ef52e6b6eccf..4e391fd1d71c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -543,7 +543,8 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
.dpp_program_blnd_lut = NULL,
.dpp_program_shaper_lut = NULL,
- .dpp_program_3dlut = NULL
+ .dpp_program_3dlut = NULL,
+ .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap,
};
static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index c9e045666dcc..a039eedc7c24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1521,4 +1521,7 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
const struct dcn_dpp_registers *tf_regs,
const struct dcn_dpp_shift *tf_shift,
const struct dcn_dpp_mask *tf_mask);
+
+void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 904c2d278998..2f994a3a0b9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -98,7 +98,7 @@ static void program_gamut_remap(
if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
- CM_GAMUT_REMAP_MODE, 0);
+ CM_GAMUT_REMAP_MODE, 0);
return;
}
switch (select) {
@@ -181,6 +181,74 @@ void dpp1_cm_set_gamut_remap(
}
}
+static void read_gamut_remap(struct dcn10_dpp *dpp,
+ uint16_t *regval,
+ enum gamut_remap_select *select)
+{
+ struct color_matrices_reg gam_regs;
+ uint32_t selection;
+
+ REG_GET(CM_GAMUT_REMAP_CONTROL,
+ CM_GAMUT_REMAP_MODE, &selection);
+
+ *select = selection;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (*select == GAMUT_REMAP_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_read_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+ cm_helper_read_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMB_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_read_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ uint16_t arr_reg_val[12];
+ enum gamut_remap_select select;
+
+ read_gamut_remap(dpp, arr_reg_val, &select);
+
+ if (select == GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
static void dpp1_cm_program_color_matrix(
struct dcn10_dpp *dpp,
const uint16_t *regval)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 0dec57679269..48a40dcc7050 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -23,6 +23,7 @@
*
*/
+#include "core_types.h"
#include "dm_services.h"
#include "dcn10_opp.h"
#include "reg_helper.h"
@@ -160,6 +161,9 @@ static void opp1_set_pixel_encoding(
struct dcn10_opp *oppn10,
const struct clamping_and_pixel_encoding_params *params)
{
+ bool force_chroma_subsampling_1tap =
+ oppn10->base.ctx->dc->debug.force_chroma_subsampling_1tap;
+
switch (params->pixel_encoding) {
case PIXEL_ENCODING_RGB:
@@ -178,6 +182,9 @@ static void opp1_set_pixel_encoding(
default:
break;
}
+
+ if (force_chroma_subsampling_1tap)
+ REG_UPDATE(FMT_CONTROL, FMT_SUBSAMPLING_MODE, 0);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index eaa7032f0f1a..1516c0a48726 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -55,21 +55,23 @@ void dpp20_read_state(struct dpp *dpp_base,
REG_GET(DPP_CONTROL,
DPP_CLOCK_ENABLE, &s->is_enabled);
+
+ // Degamma LUT (RAM)
REG_GET(CM_DGAM_CONTROL,
- CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
- // BGAM has no ROM, and definition is different, can't reuse same dump
- //REG_GET(CM_BLNDGAM_CONTROL,
- // CM_BLNDGAM_LUT_MODE, &s->rgam_lut_mode);
- REG_GET(CM_GAMUT_REMAP_CONTROL,
- CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
- if (s->gamut_remap_mode) {
- s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
- s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
- s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
- s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
- s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
- s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
- }
+ CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
+
+ // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size)
+ REG_GET(CM_SHAPER_CONTROL,
+ CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
+ REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode,
+ CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_SIZE, &s->lut3d_size);
+
+ // Blend/Out Gamma (RAM)
+ REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK,
+ CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode);
}
void dpp2_power_on_obuf(
@@ -393,6 +395,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,
};
static struct dpp_caps dcn20_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index e735363d0051..672cde46c4b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -775,4 +775,7 @@ bool dpp2_construct(struct dcn20_dpp *dpp2,
void dpp2_power_on_obuf(
struct dpp *dpp_base,
bool power_on);
+
+void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index 598caa508d43..58dc69926e8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -234,6 +234,61 @@ void dpp2_cm_set_gamut_remap(
}
}
+static void read_gamut_remap(struct dcn20_dpp *dpp,
+ uint16_t *regval,
+ enum dcn20_gamut_remap_select *select)
+{
+ struct color_matrices_reg gam_regs;
+ uint32_t selection;
+
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection);
+
+ *select = selection;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (*select == DCN2_GAMUT_REMAP_COEF_A) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == DCN2_GAMUT_REMAP_COEF_B) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ uint16_t arr_reg_val[12];
+ enum dcn20_gamut_remap_select select;
+
+ read_gamut_remap(dpp, arr_reg_val, &select);
+
+ if (select == DCN2_GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
void dpp2_program_input_csc(
struct dpp *dpp_base,
enum dc_color_space color_space,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 5da6e44f284a..16b5ff208d14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -542,8 +542,30 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
return NULL;
}
+static void mpc2_read_mpcc_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct mpcc_state *s)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+ REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+ REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+ MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+ REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+ MPCC_BUSY, &s->busy);
+
+ /* Gamma block state */
+ REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_inst],
+ MPCC_OGAM_CONFIG_STATUS, &s->rgam_mode);
+}
+
static const struct mpc_funcs dcn20_mpc_funcs = {
- .read_mpcc_state = mpc1_read_mpcc_state,
+ .read_mpcc_state = mpc2_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
index a7268027a472..f809a7d21033 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
@@ -275,6 +275,7 @@ static struct dpp_funcs dcn201_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,
};
static struct dpp_caps dcn201_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 11f7746f3a65..a3a769aad042 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -44,12 +44,45 @@
void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint32_t gamcor_lut_mode, rgam_lut_mode;
REG_GET(DPP_CONTROL,
- DPP_CLOCK_ENABLE, &s->is_enabled);
+ DPP_CLOCK_ENABLE, &s->is_enabled);
+
+ // Pre-degamma (ROM)
+ REG_GET_2(PRE_DEGAM,
+ PRE_DEGAM_MODE, &s->pre_dgam_mode,
+ PRE_DEGAM_SELECT, &s->pre_dgam_select);
+
+ // Gamma Correction (RAM)
+ REG_GET(CM_GAMCOR_CONTROL,
+ CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode);
+ if (s->gamcor_mode) {
+ REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode);
+ if (!gamcor_lut_mode)
+ s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+ }
- // TODO: Implement for DCN3
+ // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size)
+ REG_GET(CM_SHAPER_CONTROL,
+ CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ REG_GET(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_SIZE, &s->lut3d_size);
+
+ // Blend/Out Gamma (RAM)
+ REG_GET(CM_BLNDGAM_CONTROL,
+ CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode);
+ if (s->rgam_lut_mode){
+ REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode);
+ if (!rgam_lut_mode)
+ s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+ }
}
+
/*program post scaler scs block in dpp CM*/
void dpp3_program_post_csc(
struct dpp *dpp_base,
@@ -1462,6 +1495,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
index cea3208e4ab1..2ac8045a87a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
@@ -637,4 +637,6 @@ void dpp3_program_cm_dealpha(
struct dpp *dpp_base,
uint32_t enable, uint32_t additive_blending);
+void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 5f97a868ada3..2f5b3fbd3507 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -405,3 +405,57 @@ void dpp3_cm_set_gamut_remap(
program_gamut_remap(dpp, arr_reg_val, gamut_mode);
}
}
+
+static void read_gamut_remap(struct dcn3_dpp *dpp,
+ uint16_t *regval,
+ int *select)
+{
+ struct color_matrices_reg gam_regs;
+ uint32_t selection;
+
+ //current coefficient set in use
+ REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection);
+
+ *select = selection;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (*select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint16_t arr_reg_val[12];
+ int select;
+
+ read_gamut_remap(dpp, arr_reg_val, &select);
+
+ if (select == GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index d1500b223858..bf3386cd444d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1129,6 +1129,64 @@ void mpc3_set_gamut_remap(
}
}
+static void read_gamut_remap(struct dcn30_mpc *mpc30,
+ int mpcc_id,
+ uint16_t *regval,
+ uint32_t *select)
+{
+ struct color_matrices_reg gam_regs;
+
+ //current coefficient set in use
+ REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, select);
+
+ gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
+ gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
+ gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
+ gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
+
+ if (*select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]);
+ gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]);
+
+ cm_helper_read_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]);
+ gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]);
+
+ cm_helper_read_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &gam_regs);
+
+ }
+
+}
+
+void mpc3_get_gamut_remap(struct mpc *mpc,
+ int mpcc_id,
+ struct mpc_grph_gamut_adjustment *adjust)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ uint16_t arr_reg_val[12];
+ int select;
+
+ read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select);
+
+ if (select == GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
bool mpc3_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
@@ -1382,8 +1440,54 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
}
}
+static void mpc3_read_mpcc_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct mpcc_state *s)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ uint32_t rmu_status = 0xf;
+
+ REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+ REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+ REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+ MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+ REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+ MPCC_BUSY, &s->busy);
+
+ /* Color blocks state */
+ REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &rmu_status);
+
+ if (rmu_status == mpcc_inst) {
+ REG_GET(SHAPER_CONTROL[0],
+ MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode);
+ REG_GET(RMU_3DLUT_MODE[0],
+ MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[0],
+ MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(RMU_3DLUT_MODE[0],
+ MPC_RMU_3DLUT_SIZE, &s->lut3d_size);
+ } else {
+ REG_GET(SHAPER_CONTROL[1],
+ MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode);
+ REG_GET(RMU_3DLUT_MODE[1],
+ MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[1],
+ MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(RMU_3DLUT_MODE[1],
+ MPC_RMU_3DLUT_SIZE, &s->lut3d_size);
+ }
+
+ REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst],
+ MPCC_OGAM_MODE_CURRENT, &s->rgam_mode,
+ MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut);
+}
+
static const struct mpc_funcs dcn30_mpc_funcs = {
- .read_mpcc_state = mpc1_read_mpcc_state,
+ .read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
index 5198f2167c7c..9cb96ae95a2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
@@ -1056,6 +1056,10 @@ void mpc3_set_gamut_remap(
int mpcc_id,
const struct mpc_grph_gamut_adjustment *adjust);
+void mpc3_get_gamut_remap(struct mpc *mpc,
+ int mpcc_id,
+ struct mpc_grph_gamut_adjustment *adjust);
+
void mpc3_set_rmu_mux(
struct mpc *mpc,
int rmu_idx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
index dcf12a0b031c..681e75c6dbaf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
@@ -133,6 +133,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 4229369c57f4..f4d3f04ec857 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -26,6 +26,9 @@
#ifndef DM_CP_PSP_IF__H
#define DM_CP_PSP_IF__H
+/*
+ * Interface to CPLIB/PSP to enable ASSR
+ */
struct dc_link;
struct cp_psp_stream_config {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 63c48c29ba49..e7f4a2d491cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -4273,7 +4273,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k];
@@ -4576,7 +4576,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Return BW
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->BlendingAndTiming[k] == k) {
@@ -4635,7 +4635,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency);
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->DCFCLKState[i][j] = v->DCFCLKPerState[i];
}
@@ -4646,7 +4646,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (v->ClampMinDCFCLK) {
/* Clamp calculated values to actual minimum */
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) {
v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk;
@@ -4656,7 +4656,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->IdealSDPPortBandwidthPerState[i][j] = dml_min3(
v->ReturnBusWidth * v->DCFCLKState[i][j],
@@ -4674,7 +4674,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Re-ordering Buffer Support Check
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]
> (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) {
@@ -4692,7 +4692,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
}
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(
v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
@@ -4708,7 +4708,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Prefetch Check
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
int NextPrefetchModeState = MinPrefetchMode;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index 3eb3a021ab7d..3f02bb806d42 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -266,6 +266,17 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
+ } else {
+ /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
+ * This is required for dcn303 because it just so happens that the memory
+ * bandwidth is low enough such that all the optimal DCFCLK for each UCLK
+ * is lower than the smallest DCFCLK STA target. In this case we need to
+ * populate the optimal UCLK for each DCFCLK STA target to be the max UCLK.
+ */
+ if (j == num_uclk_states - 1) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 7ea2bd5374d5..80bebfc268db 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -583,12 +583,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
plane_count++;
}
- if (plane_count == 0) {
+ if (context->stream_count == 0 || plane_count == 0) {
support = DCN_ZSTATE_SUPPORT_ALLOW;
- } else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
bool is_pwrseq0 = link && link->link_index == 0;
- bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
int minmum_z8_residency =
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
@@ -596,12 +598,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000;
bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency;
+ /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
if (is_pwrseq0 && allow_z10)
support = DCN_ZSTATE_SUPPORT_ALLOW;
- else if (is_pwrseq0 && is_psr1)
+ else if (is_pwrseq0 && (is_psr || is_replay))
support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else if (allow_z8)
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+
}
context->bw_ctx.bw.dcn.clk.zstate_support = support;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 0baf39d64a2d..a52c594e1ba4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -141,14 +141,33 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx,
{
int i;
unsigned int num_found = 0;
- unsigned int plane_id_assigned_to_pipe;
+ unsigned int plane_id_assigned_to_pipe = -1;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
- if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state,
- state->res_ctx.pipe_ctx[i].stream->stream_id,
- ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) {
- if (plane_id_assigned_to_pipe == plane_id)
- pipes[num_found++] = i;
+ struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state || !pipe->stream)
+ continue;
+
+ get_plane_id(ctx, state, pipe->plane_state, pipe->stream->stream_id,
+ ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[pipe->pipe_idx],
+ &plane_id_assigned_to_pipe);
+ if (plane_id_assigned_to_pipe == plane_id && !pipe->prev_odm_pipe
+ && (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) {
+ while (pipe) {
+ struct pipe_ctx *mpc_pipe = pipe;
+
+ while (mpc_pipe) {
+ pipes[num_found++] = mpc_pipe->pipe_idx;
+ mpc_pipe = mpc_pipe->bottom_pipe;
+ if (!mpc_pipe)
+ break;
+ if (mpc_pipe->plane_state != pipe->plane_state)
+ mpc_pipe = NULL;
+ }
+ pipe = pipe->next_odm_pipe;
+ }
+ break;
}
}
@@ -566,8 +585,14 @@ static unsigned int find_pipes_assigned_to_stream(struct dml2_context *ctx, stru
unsigned int num_found = 0;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
- if (state->res_ctx.pipe_ctx[i].stream && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) {
- pipes[num_found++] = i;
+ struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->stream_id == stream_id && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ while (pipe) {
+ pipes[num_found++] = pipe->pipe_idx;
+ pipe = pipe->next_odm_pipe;
+ }
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index 1068b962d1c1..f15d1dbad6a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -234,7 +234,7 @@ static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state
if (state->streams[i]->stream_id == stream_id) {
for (j = 0; j < state->stream_status[i].plane_count; j++) {
if (state->stream_status[i].plane_states[j] == plane &&
- (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) {
+ (!is_plane_duplicate || (j == plane_index))) {
*plane_id = (i << 16) | j;
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 0df6c55eb326..ac41f9c0a283 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -137,6 +137,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
kbps = apply_128b_132b_stream_overhead(timing, kbps);
+ if (link_encoding == DC_LINK_ENCODING_HDMI_FRL &&
+ timing->vic == 0 && timing->hdmi_vic == 0 &&
+ timing->frl_uncompressed_video_bandwidth_in_kbps != 0)
+ kbps = timing->frl_uncompressed_video_bandwidth_in_kbps;
+
return kbps;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 01493c49bd7a..9d5df4c0da59 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1291,6 +1291,46 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
}
}
+static void populate_audio_dp_link_info(
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_dp_link_info *dp_link_info)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ const struct dc_link *link = stream->link;
+ struct fixed31_32 link_bw_kbps;
+
+ dp_link_info->encoding = link->dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings);
+ dp_link_info->is_mst = (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+ dp_link_info->lane_count = pipe_ctx->link_config.dp_link_settings.lane_count;
+ dp_link_info->link_rate = pipe_ctx->link_config.dp_link_settings.link_rate;
+
+ link_bw_kbps = dc_fixpt_from_int(dc_link_bandwidth_kbps(link,
+ &pipe_ctx->link_config.dp_link_settings));
+
+ /* For audio stream calculations, the video stream should not include FEC or SSC
+ * in order to get the most pessimistic values.
+ */
+ if (dp_link_info->encoding == DP_8b_10b_ENCODING &&
+ link->dc->link_srv->dp_is_fec_supported(link)) {
+ link_bw_kbps = dc_fixpt_mul(link_bw_kbps,
+ dc_fixpt_from_fraction(100, DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100));
+ } else if (dp_link_info->encoding == DP_128b_132b_ENCODING) {
+ link_bw_kbps = dc_fixpt_mul(link_bw_kbps,
+ dc_fixpt_from_fraction(10000, 9975)); /* 99.75% SSC overhead*/
+ }
+
+ dp_link_info->link_bandwidth_kbps = dc_fixpt_floor(link_bw_kbps);
+
+ /* HW minimum for 128b/132b HBlank is 4 frame symbols.
+ * TODO: Plumb the actual programmed HBlank min symbol width to here.
+ */
+ if (dp_link_info->encoding == DP_128b_132b_ENCODING)
+ dp_link_info->hblank_min_symbol_width = 4;
+ else
+ dp_link_info->hblank_min_symbol_width = 0;
+}
+
static void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
@@ -1338,6 +1378,15 @@ static void build_audio_output(
audio_output->crtc_info.calculated_pixel_clock_100Hz =
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz;
+ audio_output->crtc_info.pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ audio_output->crtc_info.dsc_bits_per_pixel =
+ stream->timing.dsc_cfg.bits_per_pixel;
+
+ audio_output->crtc_info.dsc_num_slices =
+ stream->timing.dsc_cfg.num_slices_h;
+
/*for HDMI, audio ACR is with deep color ratio factor*/
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) &&
audio_output->crtc_info.requested_pixel_clock_100Hz ==
@@ -1371,6 +1420,10 @@ static void build_audio_output(
audio_output->pll_info.ss_percentage =
pipe_ctx->pll_settings.ss_percentage;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ populate_audio_dp_link_info(pipe_ctx, &audio_output->dp_link_info);
+ }
}
static void program_scaler(const struct dc *dc,
@@ -1507,7 +1560,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
&audio_output.crtc_info,
- &pipe_ctx->stream->audio_info);
+ &pipe_ctx->stream->audio_info,
+ &audio_output.dp_link_info);
}
/* make sure no pipes syncd to the pipe being enabled */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 6dd479e8a348..314798400b16 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -283,33 +283,33 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
DTN_INFO("\n");
}
-void dcn10_log_hw_state(struct dc *dc,
- struct dc_log_buffer_ctx *log_ctx)
+static void dcn10_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
int i;
- DTN_INFO_BEGIN();
-
- dcn10_log_hubbub_state(dc, log_ctx);
-
- dcn10_log_hubp_states(dc, log_ctx);
-
- DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
- " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
- "C31 C32 C33 C34\n");
+ DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
+ " GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
for (i = 0; i < pool->pipe_count; i++) {
struct dpp *dpp = pool->dpps[i];
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
- "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
+ DTN_INFO("[%2d]: %11xh %11s %9s %9s"
+ " %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
dpp->inst,
s.igam_input_format,
(s.igam_lut_mode == 0) ? "BypassFixed" :
@@ -329,16 +329,42 @@ void dcn10_log_hw_state(struct dc *dc,
((s.rgam_lut_mode == 3) ? "RAM" :
((s.rgam_lut_mode == 4) ? "RAM" :
"Unknown")))),
- s.gamut_remap_mode,
- s.gamut_remap_c11_c12,
- s.gamut_remap_c13_c14,
- s.gamut_remap_c21_c22,
- s.gamut_remap_c23_c24,
- s.gamut_remap_c31_c32,
- s.gamut_remap_c33_c34);
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
DTN_INFO("\n");
}
DTN_INFO("\n");
+ DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
+ " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+ " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
+ " blnd_lut:%d oscs:%d\n\n",
+ dc->caps.color.dpp.input_lut_shared,
+ dc->caps.color.dpp.icsc,
+ dc->caps.color.dpp.dgam_ram,
+ dc->caps.color.dpp.dgam_rom_caps.srgb,
+ dc->caps.color.dpp.dgam_rom_caps.bt2020,
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+ dc->caps.color.dpp.dgam_rom_caps.pq,
+ dc->caps.color.dpp.dgam_rom_caps.hlg,
+ dc->caps.color.dpp.post_csc,
+ dc->caps.color.dpp.gamma_corr,
+ dc->caps.color.dpp.dgam_rom_for_yuv,
+ dc->caps.color.dpp.hw_3d_lut,
+ dc->caps.color.dpp.ogam_ram,
+ dc->caps.color.dpp.ocsc);
DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
for (i = 0; i < pool->pipe_count; i++) {
@@ -352,6 +378,30 @@ void dcn10_log_hw_state(struct dc *dc,
s.idle);
}
DTN_INFO("\n");
+ DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+ dc->caps.color.mpc.gamut_remap,
+ dc->caps.color.mpc.num_3dluts,
+ dc->caps.color.mpc.ogam_ram,
+ dc->caps.color.mpc.ocsc);
+}
+
+void dcn10_log_hw_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO_BEGIN();
+
+ dcn10_log_hubbub_state(dc, log_ctx);
+
+ dcn10_log_hubp_states(dc, log_ctx);
+
+ if (dc->hwss.log_color_state)
+ dc->hwss.log_color_state(dc, log_ctx);
+ else
+ dcn10_log_color_state(dc, log_ctx);
DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
@@ -1840,6 +1890,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ if (!stream)
+ return false;
+
if (dpp == NULL)
return false;
@@ -1862,8 +1915,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
} else
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
- if (stream != NULL && stream->ctx != NULL &&
- stream->out_transfer_func != NULL) {
+ if (stream->ctx &&
+ stream->out_transfer_func) {
log_tf(stream->ctx,
stream->out_transfer_func,
dpp->regamma_params.hw_points_num);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 931ac8ed7069..c55d5155ecb9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -71,6 +71,112 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
+void dcn20_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth"
+ " 3DLUT size RGAM mode GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dpp *dpp = pool->dpps[i];
+ struct dcn_dpp_state s = {0};
+
+ dpp->funcs->dpp_read_state(dpp, &s);
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+ if (!s.is_enabled)
+ continue;
+
+ DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+ dpp->inst,
+ (s.dgam_lut_mode == 0) ? "Bypass" :
+ ((s.dgam_lut_mode == 1) ? "sRGB" :
+ ((s.dgam_lut_mode == 2) ? "Ycc" :
+ ((s.dgam_lut_mode == 3) ? "RAM" :
+ ((s.dgam_lut_mode == 4) ? "RAM" :
+ "Unknown")))),
+ (s.shaper_lut_mode == 1) ? "RAM A" :
+ ((s.shaper_lut_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_mode == 1) ? "RAM A" :
+ ((s.lut3d_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+ (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+ (s.rgam_lut_mode == 1) ? "RAM A" :
+ ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"),
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+ DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
+ " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+ " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
+ " blnd_lut:%d oscs:%d\n\n",
+ dc->caps.color.dpp.input_lut_shared,
+ dc->caps.color.dpp.icsc,
+ dc->caps.color.dpp.dgam_ram,
+ dc->caps.color.dpp.dgam_rom_caps.srgb,
+ dc->caps.color.dpp.dgam_rom_caps.bt2020,
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+ dc->caps.color.dpp.dgam_rom_caps.pq,
+ dc->caps.color.dpp.dgam_rom_caps.hlg,
+ dc->caps.color.dpp.post_csc,
+ dc->caps.color.dpp.gamma_corr,
+ dc->caps.color.dpp.dgam_rom_for_yuv,
+ dc->caps.color.dpp.hw_3d_lut,
+ dc->caps.color.dpp.ogam_ram,
+ dc->caps.color.dpp.ocsc);
+
+ DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE"
+ " OGAM mode\n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %9s\n",
+ i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+ s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+ s.idle,
+ (s.rgam_mode == 1) ? "RAM A" :
+ ((s.rgam_mode == 2) ? "RAM B" :
+ "Bypass"));
+ }
+ DTN_INFO("\n");
+ DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+ dc->caps.color.mpc.gamut_remap,
+ dc->caps.color.mpc.num_3dluts,
+ dc->caps.color.mpc.ogam_ram,
+ dc->caps.color.mpc.ocsc);
+}
+
+
static int find_free_gsl_group(const struct dc *dc)
{
if (dc->res_pool->gsl_groups.gsl_0 == 0)
@@ -1633,6 +1739,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.scaler ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.clip_size_change ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
pipe_ctx->stream->update_flags.bits.scaling) {
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
@@ -1645,6 +1752,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
hubp->funcs->mem_program_viewport(
@@ -1958,7 +2066,6 @@ void dcn20_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
-
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index d950b3e54ec2..5c874f7b0683 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -28,6 +28,8 @@
#include "hw_sequencer_private.h"
+void dcn20_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
bool dcn20_set_shaper_3dlut(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index c34c13e1e0a4..7e6b7f2a6dc9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -69,6 +69,155 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
+void dcn30_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode"
+ " 3DLUT mode 3DLUT bit depth 3DLUT size RGAM mode"
+ " GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dpp *dpp = pool->dpps[i];
+ struct dcn_dpp_state s = {0};
+
+ dpp->funcs->dpp_read_state(dpp, &s);
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+ if (!s.is_enabled)
+ continue;
+
+ DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s"
+ " %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+ dpp->inst,
+ s.pre_dgam_mode,
+ (s.pre_dgam_select == 0) ? "sRGB" :
+ ((s.pre_dgam_select == 1) ? "Gamma 2.2" :
+ ((s.pre_dgam_select == 2) ? "Gamma 2.4" :
+ ((s.pre_dgam_select == 3) ? "Gamma 2.6" :
+ ((s.pre_dgam_select == 4) ? "BT.709" :
+ ((s.pre_dgam_select == 5) ? "PQ" :
+ ((s.pre_dgam_select == 6) ? "HLG" :
+ "Unknown")))))),
+ (s.gamcor_mode == 0) ? "Bypass" :
+ ((s.gamcor_mode == 1) ? "RAM A" :
+ "RAM B"),
+ (s.shaper_lut_mode == 1) ? "RAM A" :
+ ((s.shaper_lut_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_mode == 1) ? "RAM A" :
+ ((s.lut3d_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+ (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+ (s.rgam_lut_mode == 0) ? "Bypass" :
+ ((s.rgam_lut_mode == 1) ? "RAM A" :
+ "RAM B"),
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+ DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
+ " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+ " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
+ " blnd_lut:%d oscs:%d\n\n",
+ dc->caps.color.dpp.input_lut_shared,
+ dc->caps.color.dpp.icsc,
+ dc->caps.color.dpp.dgam_ram,
+ dc->caps.color.dpp.dgam_rom_caps.srgb,
+ dc->caps.color.dpp.dgam_rom_caps.bt2020,
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+ dc->caps.color.dpp.dgam_rom_caps.pq,
+ dc->caps.color.dpp.dgam_rom_caps.hlg,
+ dc->caps.color.dpp.post_csc,
+ dc->caps.color.dpp.gamma_corr,
+ dc->caps.color.dpp.dgam_rom_for_yuv,
+ dc->caps.color.dpp.hw_3d_lut,
+ dc->caps.color.dpp.ogam_ram,
+ dc->caps.color.dpp.ocsc);
+
+ DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE"
+ " SHAPER mode 3DLUT mode 3DLUT bit-depth 3DLUT size OGAM mode OGAM LUT"
+ " GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ mpc3_get_gamut_remap(pool->mpc, i, &s.gamut_remap);
+
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %11s %11s %16s %11s %10s %9s"
+ " %-12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld\n",
+ i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+ s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+ s.idle,
+ (s.shaper_lut_mode == 1) ? "RAM A" :
+ ((s.shaper_lut_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_mode == 1) ? "RAM A" :
+ ((s.lut3d_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+ (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+ (s.rgam_mode == 0) ? "Bypass" :
+ ((s.rgam_mode == 2) ? "RAM" :
+ "Unknown"),
+ (s.rgam_mode == 1) ? "B" : "A",
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+
+ }
+ DTN_INFO("\n");
+ DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+ dc->caps.color.mpc.gamut_remap,
+ dc->caps.color.mpc.num_3dluts,
+ dc->caps.color.mpc.ogam_ram,
+ dc->caps.color.mpc.ocsc);
+}
+
bool dcn30_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
@@ -1015,21 +1164,3 @@ void dcn30_prepare_bandwidth(struct dc *dc,
if (!dc->clk_mgr->clks.fw_based_mclk_switching)
dc_dmub_srv_p_state_delegate(dc, false, context);
}
-
-void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params)
-{
- unsigned int i;
- unsigned int triggers = 0;
-
- if (params->triggers.surface_update)
- triggers |= 0x100;
- if (params->triggers.cursor_update)
- triggers |= 0x8;
- if (params->triggers.force_trigger)
- triggers |= 0x1;
-
- for (i = 0; i < num_pipes; i++)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
- triggers, params->num_frames);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index e557e2b98618..638f018a3cb5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -52,6 +52,9 @@ bool dcn30_mmhubbub_warmup(
unsigned int num_dwb,
struct dc_writeback_info *wb_info);
+void dcn30_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
+
bool dcn30_set_blend_lut(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
@@ -90,7 +93,4 @@ void dcn30_set_hubp_blank(const struct dc *dc,
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
-void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params);
-
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 9894caedffed..ef913445a795 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 7423880fabb6..a760f0c6fe98 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -98,10 +98,8 @@ static void enable_memory_low_power(struct dc *dc)
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
if (dc->res_pool->stream_enc[i]->vpg)
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
-#if defined(CONFIG_DRM_AMD_DC_FP)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
-#endif
}
}
@@ -617,3 +615,21 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
if (hws->ctx->dc->debug.hpo_optimization)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
}
+
+void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
+{
+ unsigned int i;
+ unsigned int triggers = 0;
+
+ if (params->triggers.surface_update)
+ triggers |= 0x100;
+ if (params->triggers.cursor_update)
+ triggers |= 0x8;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
index edfc01d6ad73..b8bc939da155 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
@@ -56,4 +56,8 @@ bool dcn31_is_abm_supported(struct dc *dc,
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
+void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
+
+
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 669f524bd064..c06cc2c5da92 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index ccb7e317e86a..542ce3b7f9e4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index e8ac94a005b8..2b073123d3ed 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -65,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 8b6c49622f3b..4b92df23ff0d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1342,8 +1342,8 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
{
int i = 0;
struct drr_params params = {0};
- // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
- unsigned int event_triggers = 0x800;
+ // DRR set trigger event mapped to OTG_TRIG_A
+ unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A
// Note DRR trigger events are generated regardless of whether num frames met.
unsigned int num_frames = 2;
@@ -1377,3 +1377,20 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
}
}
}
+void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
+{
+ unsigned int i;
+ unsigned int triggers = 0;
+
+ if (params->triggers.surface_update)
+ triggers |= 0x200;/*bit 9 : 10 0000 0000*/
+ if (params->triggers.cursor_update)
+ triggers |= 0x8;/*bit3*/
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index fd66316e33de..c354efa6c1b2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -90,4 +90,7 @@ uint32_t dcn35_get_idle_state(const struct dc *dc);
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, struct dc_crtc_timing_adjust adjust);
+void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index a630aa77dcec..a93073055e7b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -70,7 +70,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn35_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 143d3fc0221c..ab17fa1c64e8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn35_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 64ca7c66509b..f89f205e42a1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -339,6 +339,8 @@ struct hw_sequencer_funcs {
/* HW State Logging Related */
void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx);
+ void (*log_color_state)(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
void (*get_hw_state)(struct dc *dc, char *pBuf,
unsigned int bufSize, unsigned int mask);
void (*clear_status_bits)(struct dc *dc, unsigned int mask);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index b3c62a82cb1c..554cfab5ab24 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -155,7 +155,6 @@ struct hwseq_private_funcs {
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
-#ifdef CONFIG_DRM_AMD_DC_FP
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
@@ -170,7 +169,6 @@ struct hwseq_private_funcs {
struct dc_state *context,
struct dc *dc);
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
-#endif
void (*reset_back_end_for_pipe)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
index 6ed1fb8c9300..b6203253111c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -43,7 +43,8 @@ struct audio_funcs {
void (*az_configure)(struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
- const struct audio_info *audio_info);
+ const struct audio_info *audio_info,
+ const struct audio_dp_link_info *dp_link_info);
void (*wall_dto_setup)(struct audio *audio,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 6f4c97543c14..f4d4a68c91dc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -356,6 +356,7 @@ struct clk_mgr_internal {
long long wm_range_table_addr;
bool dpm_present;
+ bool pme_trigger_pending;
};
struct clk_mgr_internal_funcs {
@@ -393,6 +394,11 @@ static inline int khz_to_mhz_ceil(int khz)
return (khz + 999) / 1000;
}
+static inline int khz_to_mhz_floor(int khz)
+{
+ return khz / 1000;
+}
+
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 901891316dfb..2ae7484d18af 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -26,6 +26,12 @@
#ifndef __DAL_DCHUBBUB_H__
#define __DAL_DCHUBBUB_H__
+/**
+ * DOC: overview
+ *
+ * There is only one common DCHUBBUB. It contains the common request and return
+ * blocks for the Data Fabric Interface that are not clock/power gated.
+ */
enum dcc_control {
dcc_control__256_256_xxx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index f4aa76e02518..0f24afbf4388 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -27,6 +27,31 @@
#ifndef __DAL_DPP_H__
#define __DAL_DPP_H__
+/**
+ * DOC: overview
+ *
+ * The DPP (Display Pipe and Plane) block is the unified display data
+ * processing engine in DCN for processing graphic or video data on per DPP
+ * rectangle base. This rectangle can be a part of SLS (Single Large Surface),
+ * or a layer to be blended with other DPP, or a rectangle associated with a
+ * display tile.
+ *
+ * It provides various functions including:
+ * - graphic color keyer
+ * - graphic cursor compositing
+ * - graphic or video image source to destination scaling
+ * - image sharping
+ * - video format conversion from 4:2:0 or 4:2:2 to 4:4:4
+ * - Color Space Conversion
+ * - Host LUT gamma adjustment
+ * - Color Gamut Remap
+ * - brightness and contrast adjustment.
+ *
+ * DPP pipe consists of Converter and Cursor (CNVC), Scaler (DSCL), Color
+ * Management (CM), Output Buffer (OBUF) and Digital Bypass (DPB) module
+ * connected in a video/graphics pipeline.
+ */
+
#include "transform.h"
#include "cursor_reg_cache.h"
@@ -141,6 +166,7 @@ struct dcn_dpp_state {
uint32_t igam_input_format;
uint32_t dgam_lut_mode;
uint32_t rgam_lut_mode;
+ // gamut_remap data for dcn10_get_cm_states()
uint32_t gamut_remap_mode;
uint32_t gamut_remap_c11_c12;
uint32_t gamut_remap_c13_c14;
@@ -148,6 +174,16 @@ struct dcn_dpp_state {
uint32_t gamut_remap_c23_c24;
uint32_t gamut_remap_c31_c32;
uint32_t gamut_remap_c33_c34;
+ // gamut_remap data for dcn*_log_color_state()
+ struct dpp_grph_csc_adjustment gamut_remap;
+ uint32_t shaper_lut_mode;
+ uint32_t lut3d_mode;
+ uint32_t lut3d_bit_depth;
+ uint32_t lut3d_size;
+ uint32_t blnd_lut_mode;
+ uint32_t pre_dgam_mode;
+ uint32_t pre_dgam_select;
+ uint32_t gamcor_mode;
};
struct CM_bias_params {
@@ -290,6 +326,9 @@ struct dpp_funcs {
void (*dpp_cnv_set_alpha_keyer)(
struct dpp *dpp_base,
struct cnv_color_keyer_params *color_keyer);
+
+ void (*dpp_get_gamut_remap)(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 7f3f9b69e903..72610cd7eae0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -26,13 +26,24 @@
#ifndef __DAL_HUBP_H__
#define __DAL_HUBP_H__
+/**
+ * DOC: overview
+ *
+ * Display Controller Hub (DCHUB) is the gateway between the Scalable Data Port
+ * (SDP) and DCN. This component has multiple features, such as memory
+ * arbitration, rotation, and cursor manipulation.
+ *
+ * There is one HUBP allocated per pipe, which fetches data and converts
+ * different pixel formats (i.e. ARGB8888, NV12, etc) into linear, interleaved
+ * and fixed-depth streams of pixel data.
+ */
+
#include "mem_input.h"
#include "cursor_reg_cache.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
-
enum cursor_pitch {
CURSOR_PITCH_64_PIXELS = 0,
CURSOR_PITCH_128_PIXELS,
@@ -146,9 +157,7 @@ struct hubp_funcs {
void (*set_blank)(struct hubp *hubp, bool blank);
void (*set_blank_regs)(struct hubp *hubp, bool blank);
-#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_hubp_post_enable)(struct hubp *hubp);
-#endif
void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
void (*set_cursor_attributes)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 61a2406dcc53..ba9b942ce09f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -23,13 +23,28 @@
*/
/**
- * DOC: mpc-overview
+ * DOC: overview
*
- * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline
+ * Multiple Pipe/Plane Combiner (MPC) is a component in the hardware pipeline
* that performs blending of multiple planes, using global and per-pixel alpha.
* It also performs post-blending color correction operations according to the
* hardware capabilities, such as color transformation matrix and gamma 1D and
* 3D LUT.
+ *
+ * MPC receives output from all DPP pipes and combines them to multiple outputs
+ * supporting "M MPC inputs -> N MPC outputs" flexible composition
+ * architecture. It features:
+ *
+ * - Programmable blending structure to allow software controlled blending and
+ * cascading;
+ * - Programmable window location of each DPP in active region of display;
+ * - Combining multiple DPP pipes in one active region when a single DPP pipe
+ * cannot process very large surface;
+ * - Combining multiple DPP from different SLS with blending;
+ * - Stereo formats from single DPP in top-bottom or side-by-side modes;
+ * - Stereo formats from 2 DPPs;
+ * - Alpha blending of multiple layers from different DPP pipes;
+ * - Programmable background color;
*/
#ifndef __DC_MPCC_H__
@@ -83,34 +98,66 @@ enum mpcc_alpha_blend_mode {
/**
* struct mpcc_blnd_cfg - MPCC blending configuration
- *
- * @black_color: background color
- * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE)
- * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the
- * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE)
- * @global_gain: used when blend mode considers both pixel alpha and plane
- * alpha value and assumes the global alpha value.
- * @global_alpha: plane alpha value
- * @overlap_only: whether overlapping of different planes is allowed
- * @bottom_gain_mode: blend mode for bottom gain setting
- * @background_color_bpc: background color for bpc
- * @top_gain: top gain setting
- * @bottom_inside_gain: blend mode for bottom inside
- * @bottom_outside_gain: blend mode for bottom outside
*/
struct mpcc_blnd_cfg {
- struct tg_color black_color; /* background color */
- enum mpcc_alpha_blend_mode alpha_mode; /* alpha blend mode */
- bool pre_multiplied_alpha; /* alpha pre-multiplied mode flag */
+ /**
+ * @black_color: background color.
+ */
+ struct tg_color black_color;
+
+ /**
+ * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE).
+ */
+ enum mpcc_alpha_blend_mode alpha_mode;
+
+ /***
+ * @@pre_multiplied_alpha:
+ *
+ * Whether pixel color values were pre-multiplied by the alpha channel
+ * (MPCC_ALPHA_MULTIPLIED_MODE).
+ */
+ bool pre_multiplied_alpha;
+
+ /**
+ * @global_gain: Used when blend mode considers both pixel alpha and plane.
+ */
int global_gain;
+
+ /**
+ * @global_alpha: Plane alpha value.
+ */
int global_alpha;
+
+ /**
+ * @@overlap_only: Whether overlapping of different planes is allowed.
+ */
bool overlap_only;
/* MPCC top/bottom gain settings */
+
+ /**
+ * @bottom_gain_mode: Blend mode for bottom gain setting.
+ */
int bottom_gain_mode;
+
+ /**
+ * @background_color_bpc: Background color for bpc.
+ */
int background_color_bpc;
+
+ /**
+ * @top_gain: Top gain setting.
+ */
int top_gain;
+
+ /**
+ * @bottom_inside_gain: Blend mode for bottom inside.
+ */
int bottom_inside_gain;
+
+ /**
+ * @bottom_outside_gain: Blend mode for bottom outside.
+ */
int bottom_outside_gain;
};
@@ -150,34 +197,58 @@ struct mpc_dwb_flow_control {
/**
* struct mpcc - MPCC connection and blending configuration for a single MPCC instance.
- * @mpcc_id: MPCC physical instance
- * @dpp_id: DPP input to this MPCC
- * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected.
- * @blnd_cfg: the blending configuration for this MPCC
- * @sm_cfg: stereo mix setting for this MPCC
- * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
*
* This struct is used as a node in an MPC tree.
*/
struct mpcc {
- int mpcc_id; /* MPCC physical instance */
- int dpp_id; /* DPP input to this MPCC */
- struct mpcc *mpcc_bot; /* pointer to bottom layer MPCC. NULL when not connected */
- struct mpcc_blnd_cfg blnd_cfg; /* The blending configuration for this MPCC */
- struct mpcc_sm_cfg sm_cfg; /* stereo mix setting for this MPCC */
- bool shared_bottom; /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */
+ /**
+ * @mpcc_id: MPCC physical instance.
+ */
+ int mpcc_id;
+
+ /**
+ * @dpp_id: DPP input to this MPCC
+ */
+ int dpp_id;
+
+ /**
+ * @mpcc_bot: Pointer to bottom layer MPCC. NULL when not connected.
+ */
+ struct mpcc *mpcc_bot;
+
+ /**
+ * @blnd_cfg: The blending configuration for this MPCC.
+ */
+ struct mpcc_blnd_cfg blnd_cfg;
+
+ /**
+ * @sm_cfg: stereo mix setting for this MPCC
+ */
+ struct mpcc_sm_cfg sm_cfg;
+
+ /**
+ * @shared_bottom:
+ *
+ * If MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
+ */
+ bool shared_bottom;
};
/**
* struct mpc_tree - MPC tree represents all MPCC connections for a pipe.
*
- * @opp_id: the OPP instance that owns this MPC tree
- * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
*
*/
struct mpc_tree {
- int opp_id; /* The OPP instance that owns this MPC tree */
- struct mpcc *opp_list; /* The top MPCC layer of the MPC tree that outputs to OPP endpoint */
+ /**
+ * @opp_id: The OPP instance that owns this MPC tree.
+ */
+ int opp_id;
+
+ /**
+ * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
+ */
+ struct mpcc *opp_list;
};
struct mpc {
@@ -199,6 +270,13 @@ struct mpcc_state {
uint32_t overlap_only;
uint32_t idle;
uint32_t busy;
+ uint32_t shaper_lut_mode;
+ uint32_t lut3d_mode;
+ uint32_t lut3d_bit_depth;
+ uint32_t lut3d_size;
+ uint32_t rgam_mode;
+ uint32_t rgam_lut;
+ struct mpc_grph_gamut_adjustment gamut_remap;
};
/**
@@ -217,16 +295,20 @@ struct mpc_funcs {
* Only used for planes that are part of blending chain for OPP output
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be added to.
- * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
- * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
- * stereo mix must disable for the very bottom layer of the tree config.
- * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
- * [in] dpp_id - DPP instance for the plane to be added.
- * [in] mpcc_id - The MPCC physical instance to use for blending.
- *
- * Return: struct mpcc* - MPCC that was added.
+ *
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be added to.
+ * - [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * - [in] insert_above_mpcc - Insert new plane above this MPCC.
+ * If NULL, insert as bottom plane.
+ * - [in] dpp_id - DPP instance for the plane to be added.
+ * - [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return:
+ *
+ * struct mpcc* - MPCC that was added.
*/
struct mpcc* (*insert_plane)(
struct mpc *mpc,
@@ -243,11 +325,14 @@ struct mpc_funcs {
* Remove a specified MPCC from the MPC tree.
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be removed from.
- * [in/out] mpcc - MPCC to be removed from tree.
*
- * Return: void
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be removed from.
+ * - [in/out] mpcc - MPCC to be removed from tree.
+ *
+ * Return:
+ *
+ * void
*/
void (*remove_mpcc)(
struct mpc *mpc,
@@ -260,9 +345,12 @@ struct mpc_funcs {
* Reset the MPCC HW status by disconnecting all muxes.
*
* Parameters:
- * [in/out] mpc - MPC context.
*
- * Return: void
+ * - [in/out] mpc - MPC context.
+ *
+ * Return:
+ *
+ * void
*/
void (*mpc_init)(struct mpc *mpc);
void (*mpc_init_single_inst)(
@@ -275,11 +363,14 @@ struct mpc_funcs {
* Update the blending configuration for a specified MPCC.
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in] blnd_cfg - MPCC blending configuration.
- * [in] mpcc_id - The MPCC physical instance.
*
- * Return: void
+ * - [in/out] mpc - MPC context.
+ * - [in] blnd_cfg - MPCC blending configuration.
+ * - [in] mpcc_id - The MPCC physical instance.
+ *
+ * Return:
+ *
+ * void
*/
void (*update_blending)(
struct mpc *mpc,
@@ -289,15 +380,18 @@ struct mpc_funcs {
/**
* @cursor_lock:
*
- * Lock cursor updates for the specified OPP.
- * OPP defines the set of MPCC that are locked together for cursor.
+ * Lock cursor updates for the specified OPP. OPP defines the set of
+ * MPCC that are locked together for cursor.
*
* Parameters:
- * [in] mpc - MPC context.
- * [in] opp_id - The OPP to lock cursor updates on
- * [in] lock - lock/unlock the OPP
*
- * Return: void
+ * - [in] mpc - MPC context.
+ * - [in] opp_id - The OPP to lock cursor updates on
+ * - [in] lock - lock/unlock the OPP
+ *
+ * Return:
+ *
+ * void
*/
void (*cursor_lock)(
struct mpc *mpc,
@@ -307,20 +401,25 @@ struct mpc_funcs {
/**
* @insert_plane_to_secondary:
*
- * Add DPP into secondary MPC tree based on specified blending position.
- * Only used for planes that are part of blending chain for DWB output
+ * Add DPP into secondary MPC tree based on specified blending
+ * position. Only used for planes that are part of blending chain for
+ * DWB output
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be added to.
- * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
- * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
- * stereo mix must disable for the very bottom layer of the tree config.
- * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
- * [in] dpp_id - DPP instance for the plane to be added.
- * [in] mpcc_id - The MPCC physical instance to use for blending.
- *
- * Return: struct mpcc* - MPCC that was added.
+ *
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be added to.
+ * - [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * - [in] insert_above_mpcc - Insert new plane above this MPCC. If
+ * NULL, insert as bottom plane.
+ * - [in] dpp_id - DPP instance for the plane to be added.
+ * - [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return:
+ *
+ * struct mpcc* - MPCC that was added.
*/
struct mpcc* (*insert_plane_to_secondary)(
struct mpc *mpc,
@@ -337,10 +436,14 @@ struct mpc_funcs {
* Remove a specified DPP from the 'secondary' MPC tree.
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be removed from.
- * [in] mpcc - MPCC to be removed from tree.
- * Return: void
+ *
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be removed from.
+ * - [in] mpcc - MPCC to be removed from tree.
+ *
+ * Return:
+ *
+ * void
*/
void (*remove_mpcc_from_secondary)(
struct mpc *mpc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7617fabbd16e..aee5372e292c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -23,6 +23,22 @@
*
*/
+/**
+ * DOC: overview
+ *
+ * The Output Plane Processor (OPP) block groups have functions that format
+ * pixel streams such that they are suitable for display at the display device.
+ * The key functions contained in the OPP are:
+ *
+ * - Adaptive Backlight Modulation (ABM)
+ * - Formatter (FMT) which provide pixel-by-pixel operations for format the
+ * incoming pixel stream.
+ * - Output Buffer that provide pixel replication, and overlapping.
+ * - Interface between MPC and OPTC.
+ * - Clock and reset generation.
+ * - CRC generation.
+ */
+
#ifndef __DAL_OPP_H__
#define __DAL_OPP_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 9a00a99317b2..d98d72f35be5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -182,9 +182,7 @@ struct timing_generator_funcs {
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
-#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_crtc_post_enable)(struct timing_generator *tg);
-#endif
void (*disable_phantom_crtc)(struct timing_generator *tg);
bool (*immediate_disable_crtc)(struct timing_generator *tg);
bool (*is_counter_moving)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 2d152b68a501..22b24749c9d2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -61,22 +61,6 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
}
}
-static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern)
-{
- return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&
- test_pattern <= DP_TEST_PATTERN_SQUARE_END);
-}
-
-static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
-{
- if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
- test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
- test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
- return true;
- else
- return false;
-}
-
static void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
@@ -361,7 +345,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
- if (is_dp_phy_sqaure_pattern(test_pattern)) {
+ if (IS_DP_PHY_SQUARE_PATTERN(test_pattern)) {
test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
core_link_read_dpcd(
link,
@@ -623,6 +607,8 @@ bool dp_set_test_pattern(
if (pipe_ctx == NULL)
return false;
+ link->pending_test_pattern = test_pattern;
+
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
@@ -643,12 +629,13 @@ bool dp_set_test_pattern(
/* Reset Test Pattern state */
link->test_pattern_enabled = false;
link->current_test_pattern = test_pattern;
+ link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
return true;
}
/* Check for PHY Test Patterns */
- if (is_dp_phy_pattern(test_pattern)) {
+ if (IS_DP_PHY_PATTERN(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
@@ -681,6 +668,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
link->current_test_pattern = test_pattern;
+ link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
if (p_link_settings != NULL)
dpcd_set_link_settings(link,
p_link_settings);
@@ -756,7 +744,7 @@ bool dp_set_test_pattern(
return false;
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- if (is_dp_phy_sqaure_pattern(test_pattern))
+ if (IS_DP_PHY_SQUARE_PATTERN(test_pattern))
core_link_write_dpcd(link,
DP_LINK_SQUARE_PATTERN,
p_custom_pattern,
@@ -884,6 +872,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
link->current_test_pattern = test_pattern;
+ link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index f4633d3cf9b9..a1f72fe378ee 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -22,6 +22,16 @@
* Authors: AMD
*
*/
+
+/**
+ * DOC: overview
+ *
+ * Display Input Output (DIO), is the display input and output unit in DCN. It
+ * includes output encoders to support different display output, like
+ * DisplayPort, HDMI, DVI interface, and others. It also includes the control
+ * and status channels for these interfaces.
+ */
+
#ifndef __LINK_HWSS_DIO_H__
#define __LINK_HWSS_DIO_H__
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
index b659baa23147..348ea4cb832d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -80,21 +80,23 @@ static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
if (tp_params == NULL)
return false;
- if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
- link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) {
+ if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))
// Deprogram overrides from previous test pattern
dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
- }
switch (tp_params->dp_phy_pattern) {
case DP_TEST_PATTERN_80BIT_CUSTOM:
if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern,
pltpat_custom, tp_params->custom_pattern_size) != 0)
return false;
+ hw_tp_params.custom_pattern = tp_params->custom_pattern;
+ hw_tp_params.custom_pattern_size = tp_params->custom_pattern_size;
break;
case DP_TEST_PATTERN_D102:
break;
@@ -185,13 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
{
- if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
- return false;
-
- if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
- return false;
-
- return true;
+ return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
}
const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index b621b97711b6..3e6c7be7e278 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -74,13 +74,16 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
+ uint8_t clk_src = 0x4C;
+ uint8_t pattern = 0x4F; /* SQ128 */
+
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
- const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0};
- const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0};
+ const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, clk_src};
+ const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, clk_src};
const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21};
const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21};
- const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F};
- const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F};
+ const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, pattern};
+ const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, pattern};
const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20};
const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20};
const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20};
@@ -123,18 +126,20 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link
struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 };
const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
if (tp_params == NULL)
return false;
- if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN ||
- tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) {
+ if (!IS_DP_PHY_SQUARE_PATTERN(tp_params->dp_phy_pattern)) {
// Deprogram overrides from previously set square wave override
if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
link->current_test_pattern == DP_TEST_PATTERN_D102)
link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_exit_manual_automation_0[0],
sizeof(vendor_lttpr_exit_manual_automation_0));
- else
+ else if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))
dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
return false;
@@ -148,8 +153,6 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link
dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params);
- dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]);
-
return true;
}
@@ -170,16 +173,18 @@ static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
- link_res->hpo_dp_link_enc->funcs->set_ffe(
- link_res->hpo_dp_link_enc,
- link_settings,
- lane_settings[0].FFE_PRESET.raw);
-
- // FFE is programmed when retimer is programmed for SQ128, but explicit
- // programming needed here as well in case FFE-only update is requested
- if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
- link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END)
- dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
+ // Don't update our HW FFE when outputting phy test patterns
+ if (IS_DP_PHY_PATTERN(link->pending_test_pattern)) {
+ // Directly program FIXED_VS retimer FFE for SQ128 override
+ if (IS_DP_PHY_SQUARE_PATTERN(link->pending_test_pattern)) {
+ dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
+ }
+ } else {
+ link_res->hpo_dp_link_enc->funcs->set_ffe(
+ link_res->hpo_dp_link_enc,
+ link_settings,
+ lane_settings[0].FFE_PRESET.raw);
+ }
}
static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link,
@@ -214,13 +219,7 @@ static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = {
bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link)
{
- if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
- return false;
-
- if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
- return false;
-
- return true;
+ return requires_fixed_vs_pe_retimer_dio_link_hwss(link);
}
const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 24153b0df503..b8c4a04dd175 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -41,6 +41,7 @@
#include "protocols/link_dp_dpia.h"
#include "protocols/link_dp_phy.h"
#include "protocols/link_dp_training.h"
+#include "protocols/link_dp_dpia_bw.h"
#include "accessories/link_dp_trace.h"
#include "link_enc_cfg.h"
@@ -991,6 +992,23 @@ static bool detect_link_and_local_sink(struct dc_link *link,
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ /*
+ * If this is DP over USB4 link then we need to:
+ * - Enable BW ALLOC support on DPtx if applicable
+ */
+ if (dc->config.usb4_bw_alloc_support) {
+ if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
+ /* update with non reduced link cap if bw allocation mode is supported */
+ if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
+ link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate =
+ link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count =
+ link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
+ }
+ }
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 3cbfbf8d107e..a72de44a5747 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -2197,6 +2197,64 @@ static enum dc_status enable_link(
static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
{
+ struct dc_link *link = stream->sink->link;
+ int req_bw = bw;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (!link->dpia_bw_alloc_config.bw_alloc_enabled)
+ return false;
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ int sink_index = 0;
+ int i = 0;
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == NULL)
+ continue;
+
+ if (stream->sink->sink_id != link->remote_sinks[i]->sink_id)
+ req_bw += link->dpia_bw_alloc_config.remote_sink_req_bw[i];
+ else
+ sink_index = i;
+ }
+
+ link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw;
+ }
+
+ /* get dp overhead for dp tunneling */
+ link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
+ req_bw += link->dpia_bw_alloc_config.dp_overhead;
+
+ if (link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw)) {
+ if (req_bw <= link->dpia_bw_alloc_config.allocated_bw) {
+ DC_LOG_DEBUG("%s, Success in allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
+ __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.dp_overhead);
+ } else {
+ // Cannot get the required bandwidth.
+ DC_LOG_ERROR("%s, Failed to allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
+ __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.dp_overhead);
+ return false;
+ }
+ } else {
+ DC_LOG_DEBUG("%s, usb4 request bw timeout\n", __func__);
+ return false;
+ }
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ int i = 0;
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == NULL)
+ continue;
+ DC_LOG_DEBUG("%s, remote_sink=%s, request_bw=%d\n", __func__,
+ (const char *)(&link->remote_sinks[i]->edid_caps.display_name[0]),
+ link->dpia_bw_alloc_config.remote_sink_req_bw[i]);
+ }
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 5b0bc7f6a188..1aed55b0ab6a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -125,11 +125,9 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
struct dc_crtc_timing outputTiming = *timing;
-#if defined(CONFIG_DRM_AMD_DC_FP)
if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
outputTiming.flags.DSC = 0;
-#endif
if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) >
dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index 0050e0a06cbc..2fa4e64e2430 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -37,6 +37,7 @@
#include "clk_mgr.h"
#include "resource.h"
#include "link_enc_cfg.h"
+#include "atomfirmware.h"
#define DC_LOGGER \
link->ctx->logger
@@ -100,8 +101,11 @@ void dp_set_hw_lane_settings(
{
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ // Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b
if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
- !is_immediate_downstream(link, offset))
+ !is_immediate_downstream(link, offset) &&
+ (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
+ link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING))
return;
if (link_hwss->ext.set_dp_lane_settings)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 16a62e018712..e538c67d3ed9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1508,10 +1508,7 @@ enum link_training_result dp_perform_link_training(
* Non-LT AUX transactions inside training mode.
*/
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
- if (link->dc->config.use_old_fixed_vs_sequence)
- status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, &lt_settings);
- else
- status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
+ status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
else if (encoding == DP_128b_132b_ENCODING)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 7087cdc9e977..b5cf75975fff 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -186,356 +186,6 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
return status;
}
-
-enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
- const uint8_t offset = dp_parse_lttpr_repeater_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
- const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
- uint32_t pre_disable_intercept_delay_ms = 0;
- uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
- uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
- const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
- const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
- const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
- const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
- const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87};
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t lane = 0;
- union down_spread_ctrl downspread = {0};
- union lane_count_set lane_count_set = {0};
- uint8_t toggle_rate;
- uint8_t rate;
-
- /* Only 8b/10b is supported */
- ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING);
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
- status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
- return status;
- }
-
- if (offset != 0xFF) {
- if (offset == 2) {
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
-
- /* Certain display and cable configuration require extra delay */
- } else if (offset > 2) {
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
- }
- }
-
- /* Vendor specific: Reset lane settings */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
- /* Vendor specific: Enable intercept */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en));
-
-
- /* 1. set link rate, lane count and spread. */
-
- downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
-
- lane_count_set.bits.LANE_COUNT_SET =
- lt_settings->link_settings.lane_count;
-
- lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
-
- if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
- link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- }
-
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
-
- core_link_write_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, 1);
-
- rate = get_dpcd_link_rate(&lt_settings->link_settings);
-
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
-
- if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &toggle_rate,
- 1);
- }
-
- link->vendor_specific_lttpr_link_rate_wa = rate;
-
- core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
-
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- lt_settings->enhanced_framing,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
-
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_dpmf[0],
- sizeof(vendor_lttpr_write_data_dpmf));
-
- if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
- }
-
- /* 2. Perform link training */
-
- /* Perform Clock Recovery Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- const uint8_t max_vendor_dpcd_retries = 10;
- uint32_t retries_cr;
- uint32_t retry_count;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
- union lane_align_status_updated dpcd_lane_status_updated;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- uint8_t i = 0;
-
- retries_cr = 0;
- retry_count = 0;
-
- memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
- memset(&dpcd_lane_status_updated, '\0',
- sizeof(dpcd_lane_status_updated));
-
- while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
-
-
- /* 1. call HWSS to set lane settings */
- dp_set_hw_lane_settings(
- link,
- link_res,
- lt_settings,
- 0);
-
- /* 2. update DPCD of the receiver */
- if (!retry_count) {
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration.
- */
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- lt_settings->pattern_for_cr,
- 0);
- /* Vendor specific: Disable intercept */
- for (i = 0; i < max_vendor_dpcd_retries; i++) {
- if (pre_disable_intercept_delay_ms != 0)
- msleep(pre_disable_intercept_delay_ms);
- if (link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis)))
- break;
-
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
- }
- } else {
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
- dpcd_set_lane_settings(
- link,
- lt_settings,
- 0);
- }
-
- /* 3. wait receiver to lock-on*/
- wait_time_microsec = lt_settings->cr_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested drive
- * settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 6. max VS reached*/
- if (dp_is_max_vs_reached(lt_settings))
- break;
-
- /* 7. same lane settings */
- /* Note: settings are the same for all lanes,
- * so comparing first lane is sufficient
- */
- if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
- dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
- retries_cr++;
- else
- retries_cr = 0;
-
- /* 8. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- retry_count++;
- }
-
- if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
- ASSERT(0);
- DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
- __func__,
- LINK_TRAINING_MAX_CR_RETRY);
-
- }
-
- status = dp_get_cr_failure(lane_count, dpcd_lane_status);
- }
-
- /* Perform Channel EQ Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- enum dc_dp_training_pattern tr_pattern;
- uint32_t retries_ch_eq;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- /* Note: also check that TPS4 is a supported feature*/
- tr_pattern = lt_settings->pattern_for_eq;
-
- dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
-
- status = LINK_TRAINING_EQ_FAIL_EQ;
-
- for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
- retries_ch_eq++) {
-
- dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
-
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
- /* 2. update DPCD*/
- if (!retries_ch_eq)
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration
- */
-
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- tr_pattern, 0);
- else
- dpcd_set_lane_settings(link, lt_settings, 0);
-
- /* 3. wait for receiver to lock-on*/
- wait_time_microsec = lt_settings->eq_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested
- * drive settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_EQ_FAIL_CR;
- break;
- }
-
- /* 6. check CHEQ done*/
- if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 7. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- }
- }
-
- return status;
-}
-
enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
struct dc_link *link,
const struct link_resource *link_res,
@@ -620,18 +270,20 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
rate = get_dpcd_link_rate(&lt_settings->link_settings);
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) {
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
- if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &toggle_rate,
- 1);
- }
+ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
- link->vendor_specific_lttpr_link_rate_wa = rate;
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+ }
core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
index c0d6ea329504..e61970e27661 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
@@ -28,11 +28,6 @@
#define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__
#include "link_dp_training.h"
-enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings);
-
enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
struct dc_link *link,
const struct link_resource *link_res,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 046d3e205415..acfbbc638cc6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -287,7 +287,7 @@ bool set_default_brightness_aux(struct dc_link *link)
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
- // if < 1 nits or > 5000, it might be wrong readback
+ // if > 5000, it might be wrong readback. 0 nits is a valid default value for OLED panel.
if (default_backlight < 1000 || default_backlight > 5000000)
default_backlight = 150000;
@@ -892,7 +892,8 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
/* Set power optimization flag */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
- if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) {
+ if (replay != NULL && link->replay_settings.replay_feature_enabled &&
+ replay->funcs->replay_set_power_opt) {
replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
link->replay_settings.replay_power_opt_active = *power_opts;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 37a64186f324..ecc477ef8e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -2169,6 +2169,17 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
+ } else {
+ /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
+ * If it just so happens that the memory bandwidth is low enough such that
+ * all the optimal DCFCLK for each UCLK is lower than the smallest DCFCLK STA
+ * target, we need to populate the optimal UCLK for each DCFCLK STA target to
+ * be the max UCLK.
+ */
+ if (j == num_uclk_states - 1) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 31035fc3d868..04d142f97474 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1941,8 +1941,6 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
- dc->config.use_old_fixed_vs_sequence = true;
-
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 74412e5f03fe..6f832bf278cf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1760,6 +1760,7 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->config.dc_mode_clk_limit_support = true;
+ dc->config.enable_windowed_mpo_odm = false;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 5fdcda8f8602..5d52853cac96 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -701,7 +701,7 @@ static const struct dc_plane_cap plane_cap = {
// 6:1 downscaling ratio: 1000/6 = 166.666
.max_downscale_factor = {
- .argb8888 = 167,
+ .argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@@ -764,6 +764,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .minimum_z8_residency_time = 2100,
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
@@ -782,6 +783,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.psp_disabled_wa = true,
.ips2_eval_delay_us = 2000,
.ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = true,
.static_screen_wait_frames = 2,
};
@@ -1905,7 +1907,8 @@ static bool dcn35_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
-
+ /*HW default is to have all the FGCG enabled, SW no need to program them*/
+ dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index c78c9224ab60..ff2a65e67bd4 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -78,6 +78,12 @@ struct dmub_srv_dcn31_regs;
struct dmcub_trace_buf_entry;
+/* enum dmub_window_memory_type - memory location type specification for windows */
+enum dmub_window_memory_type {
+ DMUB_WINDOW_MEMORY_TYPE_FB = 0,
+ DMUB_WINDOW_MEMORY_TYPE_GART
+};
+
/* enum dmub_status - return code for dmcub functions */
enum dmub_status {
DMUB_STATUS_OK = 0,
@@ -119,6 +125,7 @@ enum dmub_window_id {
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_SCRATCH_MEM,
+ DMUB_WINDOW_SHARED_STATE,
DMUB_WINDOW_TOTAL,
};
@@ -203,7 +210,7 @@ struct dmub_srv_region_params {
uint32_t vbios_size;
const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data;
- bool is_mailbox_in_inbox;
+ const enum dmub_window_memory_type *window_memory_type;
};
/**
@@ -223,7 +230,7 @@ struct dmub_srv_region_params {
*/
struct dmub_srv_region_info {
uint32_t fb_size;
- uint32_t inbox_size;
+ uint32_t gart_size;
uint8_t num_regions;
struct dmub_region regions[DMUB_WINDOW_TOTAL];
};
@@ -239,9 +246,10 @@ struct dmub_srv_region_info {
struct dmub_srv_memory_params {
const struct dmub_srv_region_info *region_info;
void *cpu_fb_addr;
- void *cpu_inbox_addr;
+ void *cpu_gart_addr;
uint64_t gpu_fb_addr;
- uint64_t gpu_inbox_addr;
+ uint64_t gpu_gart_addr;
+ const enum dmub_window_memory_type *window_memory_type;
};
/**
@@ -361,7 +369,8 @@ struct dmub_srv_hw_funcs {
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void (*setup_mailbox)(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
@@ -443,7 +452,6 @@ struct dmub_srv_create_params {
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs *hw_funcs;
void *user_ctx;
- struct dc_context *dc_ctx;
enum dmub_asic asic;
uint32_t fw_version;
bool is_virtual;
@@ -455,6 +463,7 @@ struct dmub_srv_create_params {
* @user_ctx: user provided context for the dmub_srv
* @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
+ * @shared_state: dmub shared state between firmware and driver
* @fw_state: dmub firmware state pointer
*/
struct dmub_srv {
@@ -463,6 +472,7 @@ struct dmub_srv {
uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
+ volatile struct dmub_shared_state_feature_block *shared_state;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index e699731ee68e..a529e369b2ac 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -26,15 +26,6 @@
#ifndef DMUB_CMD_H
#define DMUB_CMD_H
-#if defined(_TEST_HARNESS) || defined(FPGA_USB4)
-#include "dmub_fw_types.h"
-#include "include_legacy/atomfirmware.h"
-
-#if defined(_TEST_HARNESS)
-#include <string.h>
-#endif
-#else
-
#include <asm/byteorder.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -42,8 +33,6 @@
#include "atomfirmware.h"
-#endif // defined(_TEST_HARNESS) || defined(FPGA_USB4)
-
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
@@ -403,15 +392,16 @@ union replay_debug_flags {
/**
* 0x400 (bit 10)
- * @force_disable_ips1: Force disable IPS1 state
+ * @enable_ips_visual_confirm: Enable IPS visual confirm when entering IPS
+ * If we enter IPS2, the Visual confirm bar will change to yellow
*/
- uint32_t force_disable_ips1 : 1;
+ uint32_t enable_ips_visual_confirm : 1;
/**
* 0x800 (bit 11)
- * @force_disable_ips2: Force disable IPS2 state
+ * @enable_ips_residency_profiling: Enable IPS residency profiling
*/
- uint32_t force_disable_ips2 : 1;
+ uint32_t enable_ips_residency_profiling : 1;
uint32_t reserved : 20;
} bitfields;
@@ -518,6 +508,8 @@ struct dmub_visual_confirm_color {
* @trace_buffer_size: size of the tracebuffer region
* @fw_version: the firmware version information
* @dal_fw: 1 if the firmware is DAL
+ * @shared_state_size: size of the shared state region in bytes
+ * @shared_state_features: number of shared state features
*/
struct dmub_fw_meta_info {
uint32_t magic_value; /**< magic value identifying DMUB firmware meta info */
@@ -526,6 +518,9 @@ struct dmub_fw_meta_info {
uint32_t fw_version; /**< the firmware version information */
uint8_t dal_fw; /**< 1 if the firmware is DAL */
uint8_t reserved[3]; /**< padding bits */
+ uint32_t shared_state_size; /**< size of the shared state region in bytes */
+ uint16_t shared_state_features; /**< number of shared state features */
+ uint16_t reserved2; /**< padding bytes */
};
/**
@@ -670,6 +665,116 @@ enum dmub_fw_boot_options_bit {
};
//==============================================================================
+//< DMUB_SHARED_STATE>==========================================================
+//==============================================================================
+
+/**
+ * Shared firmware state between driver and firmware for lockless communication
+ * in situations where the inbox/outbox may be unavailable.
+ *
+ * Each structure *must* be at most 256-bytes in size. The layout allocation is
+ * described below:
+ *
+ * [Header (256 Bytes)][Feature 1 (256 Bytes)][Feature 2 (256 Bytes)]...
+ */
+
+/**
+ * enum dmub_shared_state_feature_id - List of shared state features.
+ */
+enum dmub_shared_state_feature_id {
+ DMUB_SHARED_SHARE_FEATURE__INVALID = 0,
+ DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
+ DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
+ DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
+};
+
+/**
+ * struct dmub_shared_state_ips_fw - Firmware signals for IPS.
+ */
+union dmub_shared_state_ips_fw_signals {
+ struct {
+ uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
+ uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
+ uint32_t reserved_bits : 30; /**< Reversed */
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * struct dmub_shared_state_ips_signals - Firmware signals for IPS.
+ */
+union dmub_shared_state_ips_driver_signals {
+ struct {
+ uint32_t allow_pg : 1; /**< 1 if PG is allowed */
+ uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
+ uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
+ uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
+ uint32_t reserved_bits : 28; /**< Reversed bits */
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * IPS FW Version
+ */
+#define DMUB_SHARED_STATE__IPS_FW_VERSION 1
+
+/**
+ * struct dmub_shared_state_ips_fw - Firmware state for IPS.
+ */
+struct dmub_shared_state_ips_fw {
+ union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */
+ uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
+}; /* 248-bytes, fixed */
+
+/**
+ * IPS Driver Version
+ */
+#define DMUB_SHARED_STATE__IPS_DRIVER_VERSION 1
+
+/**
+ * struct dmub_shared_state_ips_driver - Driver state for IPS.
+ */
+struct dmub_shared_state_ips_driver {
+ union dmub_shared_state_ips_driver_signals signals; /**< 4 bytes, IPS signal bits */
+ uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
+}; /* 248-bytes, fixed */
+
+/**
+ * enum dmub_shared_state_feature_common - Generic payload.
+ */
+struct dmub_shared_state_feature_common {
+ uint32_t padding[62];
+}; /* 248-bytes, fixed */
+
+/**
+ * enum dmub_shared_state_feature_header - Feature description.
+ */
+struct dmub_shared_state_feature_header {
+ uint16_t id; /**< Feature ID */
+ uint16_t version; /**< Feature version */
+ uint32_t reserved; /**< Reserved bytes. */
+}; /* 8 bytes, fixed */
+
+/**
+ * struct dmub_shared_state_feature_block - Feature block.
+ */
+struct dmub_shared_state_feature_block {
+ struct dmub_shared_state_feature_header header; /**< Shared state header. */
+ union dmub_shared_feature_state_union {
+ struct dmub_shared_state_feature_common common; /**< Generic data */
+ struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
+ struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
+ } data; /**< Shared state data. */
+}; /* 256-bytes, fixed */
+
+/**
+ * Shared state size in bytes.
+ */
+#define DMUB_FW_HEADER_SHARED_STATE_SIZE \
+ ((DMUB_SHARED_STATE_FEATURE__LAST + 1) * sizeof(struct dmub_shared_state_feature_block))
+
+//==============================================================================
//</DMUB_STATUS>================================================================
//==============================================================================
//< DMUB_VBIOS>=================================================================
@@ -1270,11 +1375,11 @@ struct dmub_cmd_PLAT_54186_wa {
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; /**< reg value */
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; /**< reg value */
struct {
- uint8_t hubp_inst : 4; /**< HUBP instance */
- uint8_t tmz_surface : 1; /**< TMZ enable or disable */
- uint8_t immediate :1; /**< Immediate flip */
- uint8_t vmid : 4; /**< VMID */
- uint8_t grph_stereo : 1; /**< 1 if stereo */
+ uint32_t hubp_inst : 4; /**< HUBP instance */
+ uint32_t tmz_surface : 1; /**< TMZ enable or disable */
+ uint32_t immediate :1; /**< Immediate flip */
+ uint32_t vmid : 4; /**< VMID */
+ uint32_t grph_stereo : 1; /**< 1 if stereo */
uint32_t reserved : 21; /**< Reserved */
} flip_params; /**< Pageflip parameters */
uint32_t reserved[9]; /**< Reserved bits */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 98dad0d47e72..cae96fba6349 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -191,7 +191,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
uint64_t fb_base, fb_offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 1df128e57ed3..de287b101848 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -197,7 +197,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
index 81dae75e9ff8..a4abe951c838 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -124,7 +124,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
index 9a3afffd9b0f..066f35a50094 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
@@ -43,7 +43,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
#endif /* _DMUB_DCN30_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 094e9f864557..2bcf5fb87dd9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -187,7 +187,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index 4d520a893c7b..eccdab4986ce 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -199,7 +199,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 2daa1e0c8061..0d521eeda050 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -32,8 +32,6 @@
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
-#define DCN_BASE__INST0_SEG2 0x000034C0
-
#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define CTX dmub
#define REGS dmub->regs_dcn32
@@ -218,7 +216,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index b0cd8d29402f..29c1132951af 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -206,7 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 6d1fbea0f6ba..60223efc6fc8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -229,7 +229,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
@@ -275,6 +276,15 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
DMCUB_REGION3_CW6_ENABLE, 1);
+
+ offset = region6->offset;
+
+ REG_WRITE(DMCUB_REGION6_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION6_OFFSET_HIGH, offset.u.high_part);
+ REG_SET_2(DMCUB_REGION6_TOP_ADDRESS, 0,
+ DMCUB_REGION6_TOP_ADDRESS,
+ region6->region.top - region6->region.base - 1,
+ DMCUB_REGION6_ENABLE, 1);
}
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
index 129a7031d2ae..686e97c00ccc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
@@ -89,6 +89,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_REGION5_OFFSET) \
DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION6_OFFSET) \
+ DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SR(DMCUB_SCRATCH0) \
DMUB_SR(DMCUB_SCRATCH1) \
DMUB_SR(DMCUB_SCRATCH2) \
@@ -154,6 +157,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
@@ -214,7 +219,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 9ad738805320..fb66832dc996 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -78,6 +78,7 @@
#define DMUB_CW6_BASE (0x66000000)
#define DMUB_REGION5_BASE (0xA0000000)
+#define DMUB_REGION6_BASE (0xC0000000)
static struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs;
static struct dmub_srv_dcn35_regs dmub_srv_dcn35_regs;
@@ -417,58 +418,44 @@ void dmub_srv_destroy(struct dmub_srv *dmub)
dmub_memset(dmub, 0, sizeof(*dmub));
}
+static uint32_t dmub_srv_calc_regions_for_memory_type(const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out,
+ const uint32_t *window_sizes,
+ enum dmub_window_memory_type memory_type)
+{
+ uint32_t i, top = 0;
+
+ for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
+ if (params->window_memory_type[i] == memory_type) {
+ struct dmub_region *region = &out->regions[i];
+
+ region->base = dmub_align(top, 256);
+ region->top = region->base + dmub_align(window_sizes[i], 64);
+ top = region->top;
+ }
+ }
+
+ return dmub_align(top, 4096);
+}
+
enum dmub_status
-dmub_srv_calc_region_info(struct dmub_srv *dmub,
- const struct dmub_srv_region_params *params,
- struct dmub_srv_region_info *out)
+ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out)
{
- struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST];
- struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK];
- struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA];
- struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS];
- struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
- struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
- struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
- struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
const struct dmub_fw_meta_info *fw_info;
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
- uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
- uint32_t previous_top = 0;
+ uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 };
+
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
memset(out, 0, sizeof(*out));
+ memset(window_sizes, 0, sizeof(window_sizes));
out->num_regions = DMUB_NUM_WINDOWS;
- inst->base = 0x0;
- inst->top = inst->base + params->inst_const_size;
-
- data->base = dmub_align(inst->top, 256);
- data->top = data->base + params->bss_data_size;
-
- /*
- * All cache windows below should be aligned to the size
- * of the DMCUB cache line, 64 bytes.
- */
-
- stack->base = dmub_align(data->top, 256);
- stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
-
- bios->base = dmub_align(stack->top, 256);
- bios->top = bios->base + params->vbios_size;
-
- if (params->is_mailbox_in_inbox) {
- mail->base = 0;
- mail->top = mail->base + DMUB_MAILBOX_SIZE;
- previous_top = bios->top;
- } else {
- mail->base = dmub_align(bios->top, 256);
- mail->top = mail->base + DMUB_MAILBOX_SIZE;
- previous_top = mail->top;
- }
-
fw_info = dmub_get_fw_meta_info(params);
if (fw_info) {
@@ -486,19 +473,21 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
dmub->fw_version = fw_info->fw_version;
}
- trace_buff->base = dmub_align(previous_top, 256);
- trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
-
- fw_state->base = dmub_align(trace_buff->top, 256);
- fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
-
- scratch_mem->base = dmub_align(fw_state->top, 256);
- scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
+ window_sizes[DMUB_WINDOW_0_INST_CONST] = params->inst_const_size;
+ window_sizes[DMUB_WINDOW_1_STACK] = DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
+ window_sizes[DMUB_WINDOW_2_BSS_DATA] = params->bss_data_size;
+ window_sizes[DMUB_WINDOW_3_VBIOS] = params->vbios_size;
+ window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
+ window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
+ window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
+ window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE;
- out->fb_size = dmub_align(scratch_mem->top, 4096);
+ out->fb_size =
+ dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
- if (params->is_mailbox_in_inbox)
- out->inbox_size = dmub_align(mail->top, 4096);
+ out->gart_size =
+ dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_GART);
return DMUB_STATUS_OK;
}
@@ -507,8 +496,6 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out)
{
- uint8_t *cpu_base;
- uint64_t gpu_base;
uint32_t i;
if (!dmub->sw_init)
@@ -519,19 +506,16 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
return DMUB_STATUS_INVALID;
- cpu_base = (uint8_t *)params->cpu_fb_addr;
- gpu_base = params->gpu_fb_addr;
-
for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
const struct dmub_region *reg =
&params->region_info->regions[i];
- out->fb[i].cpu_addr = cpu_base + reg->base;
- out->fb[i].gpu_addr = gpu_base + reg->base;
-
- if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
- out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
- out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
+ if (params->window_memory_type[i] == DMUB_WINDOW_MEMORY_TYPE_GART) {
+ out->fb[i].cpu_addr = (uint8_t *)params->cpu_gart_addr + reg->base;
+ out->fb[i].gpu_addr = params->gpu_gart_addr + reg->base;
+ } else {
+ out->fb[i].cpu_addr = (uint8_t *)params->cpu_fb_addr + reg->base;
+ out->fb[i].gpu_addr = params->gpu_fb_addr + reg->base;
}
out->fb[i].size = reg->top - reg->base;
@@ -583,9 +567,10 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
- struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
+ struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
struct dmub_region inbox1, outbox1, outbox0;
if (!dmub->sw_init)
@@ -670,10 +655,16 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fw_state = fw_state_fb->cpu_addr;
+ region6.offset.quad_part = shared_state_fb->gpu_addr;
+ region6.region.base = DMUB_CW6_BASE;
+ region6.region.top = region6.region.base + shared_state_fb->size;
+
+ dmub->shared_state = shared_state_fb->cpu_addr;
+
dmub->scratch_mem_fb = *scratch_mem_fb;
if (dmub->hw_funcs.setup_windows)
- dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6);
+ dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
if (dmub->hw_funcs.setup_outbox0)
dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
@@ -809,11 +800,20 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
bool dmub_srv_is_hw_pwr_up(struct dmub_srv *dmub)
{
+ union dmub_fw_boot_status status;
+
if (!dmub->hw_funcs.is_hw_powered_up)
return true;
- return dmub->hw_funcs.is_hw_powered_up(dmub) &&
- dmub->hw_funcs.is_hw_init(dmub);
+ if (!dmub->hw_funcs.is_hw_powered_up(dmub))
+ return false;
+
+ if (!dmub->hw_funcs.is_hw_init(dmub))
+ return false;
+
+ status = dmub->hw_funcs.get_fw_status(dmub);
+
+ return status.bits.dal_fw && status.bits.mailbox_rdy;
}
enum dmub_status dmub_srv_wait_for_hw_pwr_up(struct dmub_srv *dmub,
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
index 915a031a43cb..e4a26143f14c 100644
--- a/drivers/gpu/drm/amd/display/include/audio_types.h
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -27,11 +27,21 @@
#define __AUDIO_TYPES_H__
#include "signal_types.h"
+#include "fixed31_32.h"
+#include "dc_dp_types.h"
#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
#define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18
#define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF
+struct audio_dp_link_info {
+ uint32_t link_bandwidth_kbps;
+ uint32_t hblank_min_symbol_width;
+ enum dp_link_encoding encoding;
+ enum dc_link_rate link_rate;
+ enum dc_lane_count lane_count;
+ bool is_mst;
+};
struct audio_crtc_info {
uint32_t h_total;
@@ -42,7 +52,10 @@ struct audio_crtc_info {
uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */
uint32_t refresh_rate;
enum dc_color_depth color_depth;
+ enum dc_pixel_encoding pixel_encoding;
bool interlaced;
+ uint32_t dsc_bits_per_pixel;
+ uint32_t dsc_num_slices;
};
struct azalia_clock_info {
uint32_t pixel_clock_in_10khz;
@@ -95,6 +108,8 @@ struct audio_output {
enum signal_type signal;
/* video timing */
struct audio_crtc_info crtc_info;
+ /* DP link info */
+ struct audio_dp_link_info dp_link_info;
/* PLL for audio */
struct audio_pll_info pll_info;
};
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 1b8ab20f1715..92dbff22a7c6 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -169,6 +169,15 @@ enum dp_test_pattern {
DP_TEST_PATTERN_UNSUPPORTED
};
+#define IS_DP_PHY_SQUARE_PATTERN(test_pattern)\
+ (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&\
+ test_pattern <= DP_TEST_PATTERN_SQUARE_END)
+
+#define IS_DP_PHY_PATTERN(test_pattern)\
+ ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&\
+ test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||\
+ test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+
enum dp_test_pattern_color_space {
DP_TEST_PATTERN_COLOR_SPACE_RGB,
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601,
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index df2c7ffe190f..b0a6256e89f4 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -174,6 +174,7 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_ATHUB (1 << 16)
#define AMD_PG_SUPPORT_JPEG (1 << 17)
#define AMD_PG_SUPPORT_IH_SRAM_PG (1 << 18)
+#define AMD_PG_SUPPORT_JPEG_DPG (1 << 19)
/**
* enum PP_FEATURE_MASK - Used to mask power play features.
@@ -244,6 +245,7 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+ DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
enum DC_DEBUG_MASK {
diff --git a/drivers/gpu/drm/amd/include/arct_ip_offset.h b/drivers/gpu/drm/amd/include/arct_ip_offset.h
index af1c46991429..7dd876f7df74 100644
--- a/drivers/gpu/drm/amd/include/arct_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/arct_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
} __maybe_unused;
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h
new file mode 100644
index 000000000000..84483366ab6a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _athub_4_1_0_OFFSET_HEADER
+#define _athub_4_1_0_OFFSET_HEADER
+
+
+
+// addressBlock: athub_xpbdec
+// base address: 0x3000
+#define regXPB_RTR_SRC_APRTR0 0x0000
+#define regXPB_RTR_SRC_APRTR0_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR1 0x0001
+#define regXPB_RTR_SRC_APRTR1_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR2 0x0002
+#define regXPB_RTR_SRC_APRTR2_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR3 0x0003
+#define regXPB_RTR_SRC_APRTR3_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR4 0x0004
+#define regXPB_RTR_SRC_APRTR4_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR5 0x0005
+#define regXPB_RTR_SRC_APRTR5_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR6 0x0006
+#define regXPB_RTR_SRC_APRTR6_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR7 0x0007
+#define regXPB_RTR_SRC_APRTR7_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR8 0x0008
+#define regXPB_RTR_SRC_APRTR8_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR9 0x0009
+#define regXPB_RTR_SRC_APRTR9_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR10 0x000a
+#define regXPB_RTR_SRC_APRTR10_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR11 0x000b
+#define regXPB_RTR_SRC_APRTR11_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR12 0x000c
+#define regXPB_RTR_SRC_APRTR12_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR13 0x000d
+#define regXPB_RTR_SRC_APRTR13_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP0 0x000e
+#define regXPB_RTR_DEST_MAP0_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP1 0x000f
+#define regXPB_RTR_DEST_MAP1_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP2 0x0010
+#define regXPB_RTR_DEST_MAP2_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP3 0x0011
+#define regXPB_RTR_DEST_MAP3_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP4 0x0012
+#define regXPB_RTR_DEST_MAP4_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP5 0x0013
+#define regXPB_RTR_DEST_MAP5_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP6 0x0014
+#define regXPB_RTR_DEST_MAP6_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP7 0x0015
+#define regXPB_RTR_DEST_MAP7_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP8 0x0016
+#define regXPB_RTR_DEST_MAP8_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP9 0x0017
+#define regXPB_RTR_DEST_MAP9_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP10 0x0018
+#define regXPB_RTR_DEST_MAP10_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP11 0x0019
+#define regXPB_RTR_DEST_MAP11_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP12 0x001a
+#define regXPB_RTR_DEST_MAP12_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP13 0x001b
+#define regXPB_RTR_DEST_MAP13_BASE_IDX 0
+#define regXPB_CLG_CFG0 0x001c
+#define regXPB_CLG_CFG0_BASE_IDX 0
+#define regXPB_CLG_CFG1 0x001d
+#define regXPB_CLG_CFG1_BASE_IDX 0
+#define regXPB_CLG_CFG2 0x001e
+#define regXPB_CLG_CFG2_BASE_IDX 0
+#define regXPB_CLG_CFG3 0x001f
+#define regXPB_CLG_CFG3_BASE_IDX 0
+#define regXPB_CLG_CFG4 0x0020
+#define regXPB_CLG_CFG4_BASE_IDX 0
+#define regXPB_CLG_CFG5 0x0021
+#define regXPB_CLG_CFG5_BASE_IDX 0
+#define regXPB_CLG_CFG6 0x0022
+#define regXPB_CLG_CFG6_BASE_IDX 0
+#define regXPB_CLG_CFG7 0x0023
+#define regXPB_CLG_CFG7_BASE_IDX 0
+#define regXPB_CLG_EXTRA0 0x0024
+#define regXPB_CLG_EXTRA0_BASE_IDX 0
+#define regXPB_CLG_EXTRA1 0x0025
+#define regXPB_CLG_EXTRA1_BASE_IDX 0
+#define regXPB_CLG_EXTRA_MSK 0x0026
+#define regXPB_CLG_EXTRA_MSK_BASE_IDX 0
+#define regXPB_LB_ADDR 0x0027
+#define regXPB_LB_ADDR_BASE_IDX 0
+#define regXPB_HST_CFG 0x0028
+#define regXPB_HST_CFG_BASE_IDX 0
+#define regXPB_P2P_BAR_CFG 0x0029
+#define regXPB_P2P_BAR_CFG_BASE_IDX 0
+#define regXPB_P2P_BAR0 0x002a
+#define regXPB_P2P_BAR0_BASE_IDX 0
+#define regXPB_P2P_BAR1 0x002b
+#define regXPB_P2P_BAR1_BASE_IDX 0
+#define regXPB_P2P_BAR2 0x002c
+#define regXPB_P2P_BAR2_BASE_IDX 0
+#define regXPB_P2P_BAR3 0x002d
+#define regXPB_P2P_BAR3_BASE_IDX 0
+#define regXPB_P2P_BAR4 0x002e
+#define regXPB_P2P_BAR4_BASE_IDX 0
+#define regXPB_P2P_BAR5 0x002f
+#define regXPB_P2P_BAR5_BASE_IDX 0
+#define regXPB_P2P_BAR6 0x0030
+#define regXPB_P2P_BAR6_BASE_IDX 0
+#define regXPB_P2P_BAR7 0x0031
+#define regXPB_P2P_BAR7_BASE_IDX 0
+#define regXPB_P2P_BAR_SETUP 0x0032
+#define regXPB_P2P_BAR_SETUP_BASE_IDX 0
+#define regXPB_P2P_BAR_DELTA_ABOVE 0x0034
+#define regXPB_P2P_BAR_DELTA_ABOVE_BASE_IDX 0
+#define regXPB_P2P_BAR_DELTA_BELOW 0x0035
+#define regXPB_P2P_BAR_DELTA_BELOW_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR0 0x0036
+#define regXPB_PEER_SYS_BAR0_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR1 0x0037
+#define regXPB_PEER_SYS_BAR1_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR2 0x0038
+#define regXPB_PEER_SYS_BAR2_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR3 0x0039
+#define regXPB_PEER_SYS_BAR3_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR4 0x003a
+#define regXPB_PEER_SYS_BAR4_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR5 0x003b
+#define regXPB_PEER_SYS_BAR5_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR6 0x003c
+#define regXPB_PEER_SYS_BAR6_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR7 0x003d
+#define regXPB_PEER_SYS_BAR7_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR8 0x003e
+#define regXPB_PEER_SYS_BAR8_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR9 0x003f
+#define regXPB_PEER_SYS_BAR9_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR10 0x0040
+#define regXPB_PEER_SYS_BAR10_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR11 0x0041
+#define regXPB_PEER_SYS_BAR11_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR12 0x0042
+#define regXPB_PEER_SYS_BAR12_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR13 0x0043
+#define regXPB_PEER_SYS_BAR13_BASE_IDX 0
+#define regXPB_CLK_GAT 0x0044
+#define regXPB_CLK_GAT_BASE_IDX 0
+#define regXPB_INTF_CFG 0x0045
+#define regXPB_INTF_CFG_BASE_IDX 0
+#define regXPB_INTF_STS 0x0046
+#define regXPB_INTF_STS_BASE_IDX 0
+#define regXPB_PIPE_STS 0x0047
+#define regXPB_PIPE_STS_BASE_IDX 0
+#define regXPB_WCB_STS 0x0048
+#define regXPB_WCB_STS_BASE_IDX 0
+#define regXPB_MAP_INVERT_FLUSH_NUM_LSB 0x0049
+#define regXPB_MAP_INVERT_FLUSH_NUM_LSB_BASE_IDX 0
+#define regXPB_STICKY 0x004a
+#define regXPB_STICKY_BASE_IDX 0
+#define regXPB_STICKY_W1C 0x004b
+#define regXPB_STICKY_W1C_BASE_IDX 0
+#define regXPB_SUB_CTRL 0x004c
+#define regXPB_SUB_CTRL_BASE_IDX 0
+#define regXPB_PERF_KNOBS 0x004d
+#define regXPB_PERF_KNOBS_BASE_IDX 0
+#define regXPB_MISC_CFG 0x004e
+#define regXPB_MISC_CFG_BASE_IDX 0
+#define regXPB_INTF_CFG2 0x004f
+#define regXPB_INTF_CFG2_BASE_IDX 0
+#define regXPB_CLG_EXTRA_RD 0x0050
+#define regXPB_CLG_EXTRA_RD_BASE_IDX 0
+#define regXPB_CLG_EXTRA_MSK_RD 0x0051
+#define regXPB_CLG_EXTRA_MSK_RD_BASE_IDX 0
+#define regXPB_CLG_GFX_MATCH 0x0052
+#define regXPB_CLG_GFX_MATCH_BASE_IDX 0
+#define regXPB_CLG_GFX_MATCH_VLD 0x0053
+#define regXPB_CLG_GFX_MATCH_VLD_BASE_IDX 0
+#define regXPB_CLG_GFX_MATCH_MSK 0x0054
+#define regXPB_CLG_GFX_MATCH_MSK_BASE_IDX 0
+#define regXPB_CLG_MM_MATCH 0x0055
+#define regXPB_CLG_MM_MATCH_BASE_IDX 0
+#define regXPB_CLG_MM_MATCH_VLD 0x0056
+#define regXPB_CLG_MM_MATCH_VLD_BASE_IDX 0
+#define regXPB_CLG_MM_MATCH_MSK 0x0057
+#define regXPB_CLG_MM_MATCH_MSK_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING0 0x005a
+#define regXPB_CLG_GFX_UNITID_MAPPING0_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING1 0x005b
+#define regXPB_CLG_GFX_UNITID_MAPPING1_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING2 0x005c
+#define regXPB_CLG_GFX_UNITID_MAPPING2_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING3 0x005d
+#define regXPB_CLG_GFX_UNITID_MAPPING3_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING4 0x005e
+#define regXPB_CLG_GFX_UNITID_MAPPING4_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING5 0x005f
+#define regXPB_CLG_GFX_UNITID_MAPPING5_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING6 0x0060
+#define regXPB_CLG_GFX_UNITID_MAPPING6_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING7 0x0061
+#define regXPB_CLG_GFX_UNITID_MAPPING7_BASE_IDX 0
+#define regXPB_CLG_MM_UNITID_MAPPING0 0x0062
+#define regXPB_CLG_MM_UNITID_MAPPING0_BASE_IDX 0
+#define regXPB_CLG_MM_UNITID_MAPPING1 0x0063
+#define regXPB_CLG_MM_UNITID_MAPPING1_BASE_IDX 0
+#define regXPB_CLG_MM_UNITID_MAPPING2 0x0064
+#define regXPB_CLG_MM_UNITID_MAPPING2_BASE_IDX 0
+#define regXPB_CLG_MM_UNITID_MAPPING3 0x0065
+#define regXPB_CLG_MM_UNITID_MAPPING3_BASE_IDX 0
+
+
+// addressBlock: athub_rpbdec
+// base address: 0x31d0
+#define regATHUB_SHARED_VIRT_RESET_REQ 0x0074
+#define regATHUB_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define regATHUB_MEM_POWER_LS 0x007f
+#define regATHUB_MEM_POWER_LS_BASE_IDX 0
+#define regATHUB_MISC_CNTL 0x0080
+#define regATHUB_MISC_CNTL_BASE_IDX 0
+#define regRPB_PASSPW_CONF 0x0081
+#define regRPB_PASSPW_CONF_BASE_IDX 0
+#define regRPB_BLOCKLEVEL_CONF 0x0082
+#define regRPB_BLOCKLEVEL_CONF_BASE_IDX 0
+#define regRPB_TAG_CONF 0x0083
+#define regRPB_TAG_CONF_BASE_IDX 0
+#define regRPB_ARB_CNTL 0x0085
+#define regRPB_ARB_CNTL_BASE_IDX 0
+#define regRPB_ARB_CNTL2 0x0086
+#define regRPB_ARB_CNTL2_BASE_IDX 0
+#define regRPB_BIF_CNTL 0x0087
+#define regRPB_BIF_CNTL_BASE_IDX 0
+#define regRPB_BIF_CNTL2 0x0088
+#define regRPB_BIF_CNTL2_BASE_IDX 0
+#define regRPB_SDPPORT_CNTL 0x0089
+#define regRPB_SDPPORT_CNTL_BASE_IDX 0
+#define regRPB_NBIF_SDPPORT_CNTL 0x008a
+#define regRPB_NBIF_SDPPORT_CNTL_BASE_IDX 0
+#define regRPB_DEINTRLV_COMBINE_CNTL 0x008c
+#define regRPB_DEINTRLV_COMBINE_CNTL_BASE_IDX 0
+#define regRPB_VC_SWITCH_RDWR 0x008d
+#define regRPB_VC_SWITCH_RDWR_BASE_IDX 0
+#define regRPB_ATS_CNTL3 0x008e
+#define regRPB_ATS_CNTL3_BASE_IDX 0
+#define regRPB_DF_SDPPORT_CNTL 0x008f
+#define regRPB_DF_SDPPORT_CNTL_BASE_IDX 0
+#define regRPB_ATS_CNTL 0x0090
+#define regRPB_ATS_CNTL_BASE_IDX 0
+#define regRPB_ATS_CNTL2 0x0091
+#define regRPB_ATS_CNTL2_BASE_IDX 0
+#define regRPB_PERFCOUNTER0_CFG 0x0092
+#define regRPB_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regRPB_PERFCOUNTER1_CFG 0x0093
+#define regRPB_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regRPB_PERFCOUNTER2_CFG 0x0094
+#define regRPB_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regRPB_PERFCOUNTER3_CFG 0x0095
+#define regRPB_PERFCOUNTER3_CFG_BASE_IDX 0
+#define regRPB_PERFCOUNTER_RSLT_CNTL 0x0096
+#define regRPB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regRPB_PERF_COUNTER_CNTL 0x0097
+#define regRPB_PERF_COUNTER_CNTL_BASE_IDX 0
+#define regRPB_PERFCOUNTER_HI 0x0098
+#define regRPB_PERFCOUNTER_HI_BASE_IDX 0
+#define regRPB_PERFCOUNTER_LO 0x0099
+#define regRPB_PERFCOUNTER_LO_BASE_IDX 0
+#define regRPB_PERF_COUNTER_STATUS 0x009a
+#define regRPB_PERF_COUNTER_STATUS_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h
new file mode 100644
index 000000000000..56499fd62239
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h
@@ -0,0 +1,1348 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _athub_4_1_0_SH_MASK_HEADER
+#define _athub_4_1_0_SH_MASK_HEADER
+
+
+// addressBlock: athub_xpbdec
+//XPB_RTR_SRC_APRTR0
+#define XPB_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR1
+#define XPB_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR2
+#define XPB_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR3
+#define XPB_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR4
+#define XPB_RTR_SRC_APRTR4__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR4__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR5
+#define XPB_RTR_SRC_APRTR5__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR5__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR6
+#define XPB_RTR_SRC_APRTR6__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR6__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR7
+#define XPB_RTR_SRC_APRTR7__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR7__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR8
+#define XPB_RTR_SRC_APRTR8__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR8__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR9
+#define XPB_RTR_SRC_APRTR9__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR9__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR10
+#define XPB_RTR_SRC_APRTR10__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR10__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR11
+#define XPB_RTR_SRC_APRTR11__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR11__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR12
+#define XPB_RTR_SRC_APRTR12__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR12__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR13
+#define XPB_RTR_SRC_APRTR13__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR13__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_DEST_MAP0
+#define XPB_RTR_DEST_MAP0__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP1
+#define XPB_RTR_DEST_MAP1__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP2
+#define XPB_RTR_DEST_MAP2__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP3
+#define XPB_RTR_DEST_MAP3__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP4
+#define XPB_RTR_DEST_MAP4__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP4__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP4__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP4__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP4__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP4__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP4__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP4__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP4__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP4__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP5
+#define XPB_RTR_DEST_MAP5__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP5__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP5__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP5__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP5__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP5__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP5__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP5__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP5__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP5__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP6
+#define XPB_RTR_DEST_MAP6__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP6__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP6__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP6__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP6__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP6__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP6__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP6__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP6__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP6__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP7
+#define XPB_RTR_DEST_MAP7__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP7__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP7__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP7__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP7__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP7__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP7__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP7__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP7__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP7__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP8
+#define XPB_RTR_DEST_MAP8__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP8__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP8__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP8__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP8__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP8__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP8__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP8__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP8__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP8__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP9
+#define XPB_RTR_DEST_MAP9__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP9__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP9__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP9__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP9__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP9__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP9__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP9__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP9__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP9__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP10
+#define XPB_RTR_DEST_MAP10__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP10__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP10__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP10__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP10__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP10__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP10__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP10__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP10__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP10__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP10__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP10__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP11
+#define XPB_RTR_DEST_MAP11__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP11__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP11__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP11__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP11__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP11__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP11__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP11__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP11__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP11__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP11__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP11__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP12
+#define XPB_RTR_DEST_MAP12__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP12__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP12__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP12__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP12__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP12__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP12__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP12__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP12__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP12__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP12__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP12__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP13
+#define XPB_RTR_DEST_MAP13__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP13__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP13__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP13__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP13__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP13__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP13__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP13__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP13__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP13__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP13__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP13__APRTR_SIZE_MASK 0x7C000000L
+//XPB_CLG_CFG0
+#define XPB_CLG_CFG0__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG0__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG0__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG0__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG0__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG0__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG0__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG0__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG0__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG0__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG1
+#define XPB_CLG_CFG1__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG1__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG1__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG1__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG1__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG1__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG1__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG1__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG1__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG1__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG2
+#define XPB_CLG_CFG2__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG2__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG2__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG2__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG2__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG2__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG2__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG2__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG2__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG2__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG3
+#define XPB_CLG_CFG3__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG3__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG3__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG3__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG3__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG3__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG3__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG3__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG3__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG3__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG4
+#define XPB_CLG_CFG4__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG4__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG4__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG4__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG4__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG4__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG4__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG4__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG4__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG4__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG5
+#define XPB_CLG_CFG5__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG5__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG5__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG5__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG5__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG5__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG5__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG5__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG5__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG5__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG6
+#define XPB_CLG_CFG6__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG6__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG6__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG6__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG6__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG6__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG6__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG6__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG6__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG6__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG7
+#define XPB_CLG_CFG7__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG7__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG7__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG7__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG7__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG7__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG7__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG7__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG7__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG7__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_EXTRA0
+#define XPB_CLG_EXTRA0__CMP0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA0__CMP0_LOW__SHIFT 0x8
+#define XPB_CLG_EXTRA0__VLD0__SHIFT 0xd
+#define XPB_CLG_EXTRA0__CLG0_NUM__SHIFT 0xe
+#define XPB_CLG_EXTRA0__CMP0_HIGH_MASK 0x000000FFL
+#define XPB_CLG_EXTRA0__CMP0_LOW_MASK 0x00001F00L
+#define XPB_CLG_EXTRA0__VLD0_MASK 0x00002000L
+#define XPB_CLG_EXTRA0__CLG0_NUM_MASK 0x0001C000L
+//XPB_CLG_EXTRA1
+#define XPB_CLG_EXTRA1__CMP1_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA1__CMP1_LOW__SHIFT 0x8
+#define XPB_CLG_EXTRA1__VLD1__SHIFT 0xd
+#define XPB_CLG_EXTRA1__CLG1_NUM__SHIFT 0xe
+#define XPB_CLG_EXTRA1__CMP1_HIGH_MASK 0x000000FFL
+#define XPB_CLG_EXTRA1__CMP1_LOW_MASK 0x00001F00L
+#define XPB_CLG_EXTRA1__VLD1_MASK 0x00002000L
+#define XPB_CLG_EXTRA1__CLG1_NUM_MASK 0x0001C000L
+//XPB_CLG_EXTRA_MSK
+#define XPB_CLG_EXTRA_MSK__MSK0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_MSK__MSK0_LOW__SHIFT 0x8
+#define XPB_CLG_EXTRA_MSK__MSK1_HIGH__SHIFT 0xd
+#define XPB_CLG_EXTRA_MSK__MSK1_LOW__SHIFT 0x15
+#define XPB_CLG_EXTRA_MSK__MSK0_HIGH_MASK 0x000000FFL
+#define XPB_CLG_EXTRA_MSK__MSK0_LOW_MASK 0x00001F00L
+#define XPB_CLG_EXTRA_MSK__MSK1_HIGH_MASK 0x001FE000L
+#define XPB_CLG_EXTRA_MSK__MSK1_LOW_MASK 0x03E00000L
+//XPB_LB_ADDR
+#define XPB_LB_ADDR__CMP0__SHIFT 0x0
+#define XPB_LB_ADDR__MASK0__SHIFT 0xa
+#define XPB_LB_ADDR__CMP1__SHIFT 0x14
+#define XPB_LB_ADDR__MASK1__SHIFT 0x1a
+#define XPB_LB_ADDR__CMP0_MASK 0x000003FFL
+#define XPB_LB_ADDR__MASK0_MASK 0x000FFC00L
+#define XPB_LB_ADDR__CMP1_MASK 0x03F00000L
+#define XPB_LB_ADDR__MASK1_MASK 0xFC000000L
+//XPB_HST_CFG
+#define XPB_HST_CFG__BAR_UP_WR_CMD__SHIFT 0x0
+#define XPB_HST_CFG__BAR_UP_WR_CMD_MASK 0x00000001L
+//XPB_P2P_BAR_CFG
+#define XPB_P2P_BAR_CFG__ADDR_SIZE__SHIFT 0x0
+#define XPB_P2P_BAR_CFG__SEND_BAR__SHIFT 0x4
+#define XPB_P2P_BAR_CFG__SNOOP__SHIFT 0x6
+#define XPB_P2P_BAR_CFG__SEND_DIS__SHIFT 0x7
+#define XPB_P2P_BAR_CFG__COMPRESS_DIS__SHIFT 0x8
+#define XPB_P2P_BAR_CFG__UPDATE_DIS__SHIFT 0x9
+#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR__SHIFT 0xa
+#define XPB_P2P_BAR_CFG__RD_EN__SHIFT 0xb
+#define XPB_P2P_BAR_CFG__ATC_TRANSLATED__SHIFT 0xc
+#define XPB_P2P_BAR_CFG__ADDR_SIZE_MASK 0x0000000FL
+#define XPB_P2P_BAR_CFG__SEND_BAR_MASK 0x00000030L
+#define XPB_P2P_BAR_CFG__SNOOP_MASK 0x00000040L
+#define XPB_P2P_BAR_CFG__SEND_DIS_MASK 0x00000080L
+#define XPB_P2P_BAR_CFG__COMPRESS_DIS_MASK 0x00000100L
+#define XPB_P2P_BAR_CFG__UPDATE_DIS_MASK 0x00000200L
+#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR_MASK 0x00000400L
+#define XPB_P2P_BAR_CFG__RD_EN_MASK 0x00000800L
+#define XPB_P2P_BAR_CFG__ATC_TRANSLATED_MASK 0x00001000L
+//XPB_P2P_BAR0
+#define XPB_P2P_BAR0__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR0__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR0__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR0__VALID__SHIFT 0xc
+#define XPB_P2P_BAR0__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR0__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR0__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR0__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR0__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR0__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR0__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR0__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR0__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR0__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR0__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR0__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR1
+#define XPB_P2P_BAR1__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR1__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR1__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR1__VALID__SHIFT 0xc
+#define XPB_P2P_BAR1__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR1__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR1__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR1__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR1__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR1__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR1__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR1__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR1__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR1__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR1__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR1__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR2
+#define XPB_P2P_BAR2__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR2__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR2__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR2__VALID__SHIFT 0xc
+#define XPB_P2P_BAR2__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR2__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR2__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR2__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR2__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR2__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR2__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR2__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR2__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR2__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR2__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR2__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR3
+#define XPB_P2P_BAR3__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR3__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR3__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR3__VALID__SHIFT 0xc
+#define XPB_P2P_BAR3__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR3__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR3__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR3__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR3__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR3__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR3__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR3__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR3__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR3__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR3__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR3__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR4
+#define XPB_P2P_BAR4__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR4__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR4__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR4__VALID__SHIFT 0xc
+#define XPB_P2P_BAR4__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR4__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR4__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR4__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR4__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR4__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR4__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR4__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR4__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR4__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR4__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR4__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR5
+#define XPB_P2P_BAR5__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR5__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR5__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR5__VALID__SHIFT 0xc
+#define XPB_P2P_BAR5__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR5__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR5__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR5__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR5__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR5__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR5__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR5__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR5__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR5__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR5__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR5__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR6
+#define XPB_P2P_BAR6__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR6__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR6__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR6__VALID__SHIFT 0xc
+#define XPB_P2P_BAR6__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR6__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR6__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR6__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR6__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR6__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR6__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR6__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR6__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR6__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR6__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR6__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR7
+#define XPB_P2P_BAR7__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR7__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR7__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR7__VALID__SHIFT 0xc
+#define XPB_P2P_BAR7__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR7__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR7__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR7__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR7__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR7__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR7__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR7__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR7__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR7__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR7__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR7__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR_SETUP
+#define XPB_P2P_BAR_SETUP__SEL__SHIFT 0x0
+#define XPB_P2P_BAR_SETUP__REG_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR_SETUP__VALID__SHIFT 0xc
+#define XPB_P2P_BAR_SETUP__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR_SETUP__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR_SETUP__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR_SETUP__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR_SETUP__SEL_MASK 0x000000FFL
+#define XPB_P2P_BAR_SETUP__REG_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR_SETUP__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR_SETUP__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR_SETUP__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR_SETUP__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR_SETUP__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR_DELTA_ABOVE
+#define XPB_P2P_BAR_DELTA_ABOVE__EN__SHIFT 0x0
+#define XPB_P2P_BAR_DELTA_ABOVE__DELTA__SHIFT 0x8
+#define XPB_P2P_BAR_DELTA_ABOVE__EN_MASK 0x000000FFL
+#define XPB_P2P_BAR_DELTA_ABOVE__DELTA_MASK 0x0FFFFF00L
+//XPB_P2P_BAR_DELTA_BELOW
+#define XPB_P2P_BAR_DELTA_BELOW__EN__SHIFT 0x0
+#define XPB_P2P_BAR_DELTA_BELOW__DELTA__SHIFT 0x8
+#define XPB_P2P_BAR_DELTA_BELOW__EN_MASK 0x000000FFL
+#define XPB_P2P_BAR_DELTA_BELOW__DELTA_MASK 0x0FFFFF00L
+//XPB_PEER_SYS_BAR0
+#define XPB_PEER_SYS_BAR0__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR0__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR1
+#define XPB_PEER_SYS_BAR1__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR1__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR2
+#define XPB_PEER_SYS_BAR2__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR2__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR3
+#define XPB_PEER_SYS_BAR3__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR3__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR4
+#define XPB_PEER_SYS_BAR4__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR4__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR4__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR4__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR5
+#define XPB_PEER_SYS_BAR5__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR5__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR5__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR5__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR6
+#define XPB_PEER_SYS_BAR6__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR6__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR6__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR6__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR7
+#define XPB_PEER_SYS_BAR7__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR7__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR7__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR7__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR8
+#define XPB_PEER_SYS_BAR8__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR8__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR8__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR8__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR9
+#define XPB_PEER_SYS_BAR9__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR9__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR9__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR9__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR10
+#define XPB_PEER_SYS_BAR10__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR10__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR10__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR10__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR11
+#define XPB_PEER_SYS_BAR11__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR11__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR11__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR11__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR12
+#define XPB_PEER_SYS_BAR12__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR12__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR12__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR12__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR13
+#define XPB_PEER_SYS_BAR13__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR13__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR13__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR13__ADDR_MASK 0xFFFFFFFEL
+//XPB_CLK_GAT
+#define XPB_CLK_GAT__ONDLY__SHIFT 0x0
+#define XPB_CLK_GAT__OFFDLY__SHIFT 0x6
+#define XPB_CLK_GAT__RDYDLY__SHIFT 0xc
+#define XPB_CLK_GAT__ENABLE__SHIFT 0x12
+#define XPB_CLK_GAT__MEM_LS_ENABLE__SHIFT 0x13
+#define XPB_CLK_GAT__ONDLY_MASK 0x0000003FL
+#define XPB_CLK_GAT__OFFDLY_MASK 0x00000FC0L
+#define XPB_CLK_GAT__RDYDLY_MASK 0x0003F000L
+#define XPB_CLK_GAT__ENABLE_MASK 0x00040000L
+#define XPB_CLK_GAT__MEM_LS_ENABLE_MASK 0x00080000L
+//XPB_INTF_CFG
+#define XPB_INTF_CFG__RPB_WRREQ_CRD__SHIFT 0x0
+#define XPB_INTF_CFG__MC_WRRET_ASK__SHIFT 0x8
+#define XPB_INTF_CFG__XSP_REQ_CRD__SHIFT 0x10
+#define XPB_INTF_CFG__P2P_WR_CHAIN_BREAK__SHIFT 0x17
+#define XPB_INTF_CFG__XSP_SNOOP_SEL__SHIFT 0x1b
+#define XPB_INTF_CFG__XSP_SNOOP_VAL__SHIFT 0x1d
+#define XPB_INTF_CFG__XSP_ORDERING_SEL__SHIFT 0x1e
+#define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA__SHIFT 0x1f
+#define XPB_INTF_CFG__RPB_WRREQ_CRD_MASK 0x000000FFL
+#define XPB_INTF_CFG__MC_WRRET_ASK_MASK 0x0000FF00L
+#define XPB_INTF_CFG__XSP_REQ_CRD_MASK 0x007F0000L
+#define XPB_INTF_CFG__P2P_WR_CHAIN_BREAK_MASK 0x00800000L
+#define XPB_INTF_CFG__XSP_SNOOP_SEL_MASK 0x18000000L
+#define XPB_INTF_CFG__XSP_SNOOP_VAL_MASK 0x20000000L
+#define XPB_INTF_CFG__XSP_ORDERING_SEL_MASK 0x40000000L
+#define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA_MASK 0x80000000L
+//XPB_INTF_STS
+#define XPB_INTF_STS__RPB_WRREQ_CRD__SHIFT 0x0
+#define XPB_INTF_STS__XSP_REQ_CRD__SHIFT 0x8
+#define XPB_INTF_STS__HOP_DATA_BUF_FULL__SHIFT 0xf
+#define XPB_INTF_STS__HOP_ATTR_BUF_FULL__SHIFT 0x10
+#define XPB_INTF_STS__CNS_BUF_FULL__SHIFT 0x11
+#define XPB_INTF_STS__CNS_BUF_BUSY__SHIFT 0x12
+#define XPB_INTF_STS__RPB_RDREQ_CRD__SHIFT 0x13
+#define XPB_INTF_STS__RPB_WRREQ_CRD_MASK 0x000000FFL
+#define XPB_INTF_STS__XSP_REQ_CRD_MASK 0x00007F00L
+#define XPB_INTF_STS__HOP_DATA_BUF_FULL_MASK 0x00008000L
+#define XPB_INTF_STS__HOP_ATTR_BUF_FULL_MASK 0x00010000L
+#define XPB_INTF_STS__CNS_BUF_FULL_MASK 0x00020000L
+#define XPB_INTF_STS__CNS_BUF_BUSY_MASK 0x00040000L
+#define XPB_INTF_STS__RPB_RDREQ_CRD_MASK 0x07F80000L
+//XPB_PIPE_STS
+#define XPB_PIPE_STS__WCB_ANY_PBUF__SHIFT 0x0
+#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x1
+#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x8
+#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL__SHIFT 0xf
+#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL__SHIFT 0x10
+#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL__SHIFT 0x11
+#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL__SHIFT 0x12
+#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL__SHIFT 0x13
+#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL__SHIFT 0x14
+#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL__SHIFT 0x15
+#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL__SHIFT 0x16
+#define XPB_PIPE_STS__RET_BUF_FULL__SHIFT 0x17
+#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS__SHIFT 0x18
+#define XPB_PIPE_STS__WCB_ANY_PBUF_MASK 0x00000001L
+#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT_MASK 0x000000FEL
+#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT_MASK 0x00007F00L
+#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL_MASK 0x00008000L
+#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL_MASK 0x00010000L
+#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL_MASK 0x00020000L
+#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL_MASK 0x00040000L
+#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL_MASK 0x00080000L
+#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL_MASK 0x00100000L
+#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL_MASK 0x00200000L
+#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL_MASK 0x00400000L
+#define XPB_PIPE_STS__RET_BUF_FULL_MASK 0x00800000L
+#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS_MASK 0xFF000000L
+//XPB_WCB_STS
+#define XPB_WCB_STS__PBUF_VLD__SHIFT 0x0
+#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x10
+#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x17
+#define XPB_WCB_STS__PBUF_VLD_MASK 0x0000FFFFL
+#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT_MASK 0x007F0000L
+#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT_MASK 0x3F800000L
+//XPB_MAP_INVERT_FLUSH_NUM_LSB
+#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM__SHIFT 0x0
+#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM_MASK 0x0000FFFFL
+//XPB_STICKY
+#define XPB_STICKY__BITS__SHIFT 0x0
+#define XPB_STICKY__BITS_MASK 0xFFFFFFFFL
+//XPB_STICKY_W1C
+#define XPB_STICKY_W1C__BITS__SHIFT 0x0
+#define XPB_STICKY_W1C__BITS_MASK 0xFFFFFFFFL
+//XPB_SUB_CTRL
+#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB__SHIFT 0x0
+#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ__SHIFT 0x1
+#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ__SHIFT 0x2
+#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ__SHIFT 0x3
+#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ__SHIFT 0x4
+#define XPB_SUB_CTRL__STALL_WCB_SID_REQ__SHIFT 0x5
+#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND__SHIFT 0x6
+#define XPB_SUB_CTRL__STALL_WCB_HST_REQ__SHIFT 0x7
+#define XPB_SUB_CTRL__STALL_HST_HOP_REQ__SHIFT 0x8
+#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR__SHIFT 0x9
+#define XPB_SUB_CTRL__RESET_CNS__SHIFT 0xa
+#define XPB_SUB_CTRL__RESET_RTR__SHIFT 0xb
+#define XPB_SUB_CTRL__RESET_RET__SHIFT 0xc
+#define XPB_SUB_CTRL__RESET_MAP__SHIFT 0xd
+#define XPB_SUB_CTRL__RESET_WCB__SHIFT 0xe
+#define XPB_SUB_CTRL__RESET_HST__SHIFT 0xf
+#define XPB_SUB_CTRL__RESET_HOP__SHIFT 0x10
+#define XPB_SUB_CTRL__RESET_SID__SHIFT 0x11
+#define XPB_SUB_CTRL__RESET_SRB__SHIFT 0x12
+#define XPB_SUB_CTRL__RESET_CGR__SHIFT 0x13
+#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB_MASK 0x00000001L
+#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ_MASK 0x00000002L
+#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ_MASK 0x00000004L
+#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ_MASK 0x00000008L
+#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ_MASK 0x00000010L
+#define XPB_SUB_CTRL__STALL_WCB_SID_REQ_MASK 0x00000020L
+#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND_MASK 0x00000040L
+#define XPB_SUB_CTRL__STALL_WCB_HST_REQ_MASK 0x00000080L
+#define XPB_SUB_CTRL__STALL_HST_HOP_REQ_MASK 0x00000100L
+#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR_MASK 0x00000200L
+#define XPB_SUB_CTRL__RESET_CNS_MASK 0x00000400L
+#define XPB_SUB_CTRL__RESET_RTR_MASK 0x00000800L
+#define XPB_SUB_CTRL__RESET_RET_MASK 0x00001000L
+#define XPB_SUB_CTRL__RESET_MAP_MASK 0x00002000L
+#define XPB_SUB_CTRL__RESET_WCB_MASK 0x00004000L
+#define XPB_SUB_CTRL__RESET_HST_MASK 0x00008000L
+#define XPB_SUB_CTRL__RESET_HOP_MASK 0x00010000L
+#define XPB_SUB_CTRL__RESET_SID_MASK 0x00020000L
+#define XPB_SUB_CTRL__RESET_SRB_MASK 0x00040000L
+#define XPB_SUB_CTRL__RESET_CGR_MASK 0x00080000L
+//XPB_PERF_KNOBS
+#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH__SHIFT 0x0
+#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH__SHIFT 0x6
+#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH__SHIFT 0xc
+#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH_MASK 0x0000003FL
+#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH_MASK 0x00000FC0L
+#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH_MASK 0x0003F000L
+//XPB_MISC_CFG
+#define XPB_MISC_CFG__FIELDNAME0__SHIFT 0x0
+#define XPB_MISC_CFG__FIELDNAME1__SHIFT 0x8
+#define XPB_MISC_CFG__FIELDNAME2__SHIFT 0x10
+#define XPB_MISC_CFG__FIELDNAME3__SHIFT 0x18
+#define XPB_MISC_CFG__TRIGGERNAME__SHIFT 0x1f
+#define XPB_MISC_CFG__FIELDNAME0_MASK 0x000000FFL
+#define XPB_MISC_CFG__FIELDNAME1_MASK 0x0000FF00L
+#define XPB_MISC_CFG__FIELDNAME2_MASK 0x00FF0000L
+#define XPB_MISC_CFG__FIELDNAME3_MASK 0x7F000000L
+#define XPB_MISC_CFG__TRIGGERNAME_MASK 0x80000000L
+//XPB_INTF_CFG2
+#define XPB_INTF_CFG2__RPB_RDREQ_CRD__SHIFT 0x0
+#define XPB_INTF_CFG2__RPB_RDREQ_CRD_MASK 0x000000FFL
+//XPB_CLG_EXTRA_RD
+#define XPB_CLG_EXTRA_RD__CMP0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_RD__CMP0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA_RD__VLD0__SHIFT 0xb
+#define XPB_CLG_EXTRA_RD__CLG0_NUM__SHIFT 0xc
+#define XPB_CLG_EXTRA_RD__CMP1_HIGH__SHIFT 0xf
+#define XPB_CLG_EXTRA_RD__CMP1_LOW__SHIFT 0x15
+#define XPB_CLG_EXTRA_RD__VLD1__SHIFT 0x1a
+#define XPB_CLG_EXTRA_RD__CLG1_NUM__SHIFT 0x1b
+#define XPB_CLG_EXTRA_RD__CMP0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA_RD__CMP0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA_RD__VLD0_MASK 0x00000800L
+#define XPB_CLG_EXTRA_RD__CLG0_NUM_MASK 0x00007000L
+#define XPB_CLG_EXTRA_RD__CMP1_HIGH_MASK 0x001F8000L
+#define XPB_CLG_EXTRA_RD__CMP1_LOW_MASK 0x03E00000L
+#define XPB_CLG_EXTRA_RD__VLD1_MASK 0x04000000L
+#define XPB_CLG_EXTRA_RD__CLG1_NUM_MASK 0x38000000L
+//XPB_CLG_EXTRA_MSK_RD
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH__SHIFT 0xb
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW__SHIFT 0x11
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH_MASK 0x0001F800L
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW_MASK 0x003E0000L
+//XPB_CLG_GFX_MATCH
+#define XPB_CLG_GFX_MATCH__FARBIRC0_ID__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH__FARBIRC1_ID__SHIFT 0x8
+#define XPB_CLG_GFX_MATCH__FARBIRC2_ID__SHIFT 0x10
+#define XPB_CLG_GFX_MATCH__FARBIRC3_ID__SHIFT 0x18
+#define XPB_CLG_GFX_MATCH__FARBIRC0_ID_MASK 0x000000FFL
+#define XPB_CLG_GFX_MATCH__FARBIRC1_ID_MASK 0x0000FF00L
+#define XPB_CLG_GFX_MATCH__FARBIRC2_ID_MASK 0x00FF0000L
+#define XPB_CLG_GFX_MATCH__FARBIRC3_ID_MASK 0xFF000000L
+//XPB_CLG_GFX_MATCH_VLD
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC0_VLD__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC1_VLD__SHIFT 0x1
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC2_VLD__SHIFT 0x2
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC3_VLD__SHIFT 0x3
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC0_VLD_MASK 0x00000001L
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC1_VLD_MASK 0x00000002L
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC2_VLD_MASK 0x00000004L
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC3_VLD_MASK 0x00000008L
+//XPB_CLG_GFX_MATCH_MSK
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x8
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0x10
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x18
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x000000FFL
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x0000FF00L
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x00FF0000L
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0xFF000000L
+//XPB_CLG_MM_MATCH
+#define XPB_CLG_MM_MATCH__FARBIRC0_ID__SHIFT 0x0
+#define XPB_CLG_MM_MATCH__FARBIRC1_ID__SHIFT 0x8
+#define XPB_CLG_MM_MATCH__FARBIRC2_ID__SHIFT 0x10
+#define XPB_CLG_MM_MATCH__FARBIRC3_ID__SHIFT 0x18
+#define XPB_CLG_MM_MATCH__FARBIRC0_ID_MASK 0x000000FFL
+#define XPB_CLG_MM_MATCH__FARBIRC1_ID_MASK 0x0000FF00L
+#define XPB_CLG_MM_MATCH__FARBIRC2_ID_MASK 0x00FF0000L
+#define XPB_CLG_MM_MATCH__FARBIRC3_ID_MASK 0xFF000000L
+//XPB_CLG_MM_MATCH_VLD
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC0_VLD__SHIFT 0x0
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC1_VLD__SHIFT 0x1
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC2_VLD__SHIFT 0x2
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC3_VLD__SHIFT 0x3
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC0_VLD_MASK 0x00000001L
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC1_VLD_MASK 0x00000002L
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC2_VLD_MASK 0x00000004L
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC3_VLD_MASK 0x00000008L
+//XPB_CLG_MM_MATCH_MSK
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x8
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0x10
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x18
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x000000FFL
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x0000FF00L
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x00FF0000L
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0xFF000000L
+//XPB_CLG_GFX_UNITID_MAPPING0
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING1
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING2
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING3
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING4
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING5
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING6
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING7
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING0
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING1
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING2
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING3
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
+
+
+// addressBlock: athub_rpbdec
+//ATHUB_SHARED_VIRT_RESET_REQ
+#define ATHUB_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define ATHUB_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define ATHUB_SHARED_VIRT_RESET_REQ__VF_MASK 0x7FFFFFFFL
+#define ATHUB_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//ATHUB_MEM_POWER_LS
+#define ATHUB_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATHUB_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATHUB_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATHUB_MEM_POWER_LS__LS_HOLD_MASK 0x0007FFC0L
+//ATHUB_MISC_CNTL
+#define ATHUB_MISC_CNTL__CG_OFFDLY__SHIFT 0x0
+#define ATHUB_MISC_CNTL__CG_ENABLE__SHIFT 0x6
+#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE__SHIFT 0x7
+#define ATHUB_MISC_CNTL__PG_ENABLE__SHIFT 0x8
+#define ATHUB_MISC_CNTL__PG_OFFDLY__SHIFT 0x9
+#define ATHUB_MISC_CNTL__ALWAYS_BUSY__SHIFT 0xf
+#define ATHUB_MISC_CNTL__CG_STATUS__SHIFT 0x10
+#define ATHUB_MISC_CNTL__PG_STATUS__SHIFT 0x11
+#define ATHUB_MISC_CNTL__RPB_BUSY__SHIFT 0x12
+#define ATHUB_MISC_CNTL__XPB_BUSY__SHIFT 0x13
+#define ATHUB_MISC_CNTL__ATS_BUSY__SHIFT 0x14
+#define ATHUB_MISC_CNTL__SDPNCS_BUSY__SHIFT 0x15
+#define ATHUB_MISC_CNTL__DFPORT_BUSY__SHIFT 0x16
+#define ATHUB_MISC_CNTL__SWITCH_CNTL__SHIFT 0x17
+#define ATHUB_MISC_CNTL__LS_DELAY_ENABLE__SHIFT 0x18
+#define ATHUB_MISC_CNTL__LS_DELAY_TIME__SHIFT 0x19
+#define ATHUB_MISC_CNTL__RESETB_PG_CLK_GATING_ENABLE__SHIFT 0x1e
+#define ATHUB_MISC_CNTL__CG_OFFDLY_MASK 0x0000003FL
+#define ATHUB_MISC_CNTL__CG_ENABLE_MASK 0x00000040L
+#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK 0x00000080L
+#define ATHUB_MISC_CNTL__PG_ENABLE_MASK 0x00000100L
+#define ATHUB_MISC_CNTL__PG_OFFDLY_MASK 0x00007E00L
+#define ATHUB_MISC_CNTL__ALWAYS_BUSY_MASK 0x00008000L
+#define ATHUB_MISC_CNTL__CG_STATUS_MASK 0x00010000L
+#define ATHUB_MISC_CNTL__PG_STATUS_MASK 0x00020000L
+#define ATHUB_MISC_CNTL__RPB_BUSY_MASK 0x00040000L
+#define ATHUB_MISC_CNTL__XPB_BUSY_MASK 0x00080000L
+#define ATHUB_MISC_CNTL__ATS_BUSY_MASK 0x00100000L
+#define ATHUB_MISC_CNTL__SDPNCS_BUSY_MASK 0x00200000L
+#define ATHUB_MISC_CNTL__DFPORT_BUSY_MASK 0x00400000L
+#define ATHUB_MISC_CNTL__SWITCH_CNTL_MASK 0x00800000L
+#define ATHUB_MISC_CNTL__LS_DELAY_ENABLE_MASK 0x01000000L
+#define ATHUB_MISC_CNTL__LS_DELAY_TIME_MASK 0x3E000000L
+#define ATHUB_MISC_CNTL__RESETB_PG_CLK_GATING_ENABLE_MASK 0x40000000L
+//RPB_PASSPW_CONF
+#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE__SHIFT 0x0
+#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE__SHIFT 0x1
+#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE__SHIFT 0x2
+#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_EN__SHIFT 0x3
+#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE__SHIFT 0x4
+#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_EN__SHIFT 0x5
+#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE__SHIFT 0x6
+#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_EN__SHIFT 0x7
+#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE__SHIFT 0x8
+#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_EN__SHIFT 0x9
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE__SHIFT 0xa
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN__SHIFT 0xb
+#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE__SHIFT 0xc
+#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_EN__SHIFT 0xd
+#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE__SHIFT 0xe
+#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE__SHIFT 0xf
+#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE__SHIFT 0x10
+#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE__SHIFT 0x11
+#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE__SHIFT 0x12
+#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE__SHIFT 0x13
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE__SHIFT 0x14
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN__SHIFT 0x15
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE__SHIFT 0x16
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN__SHIFT 0x17
+#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE_MASK 0x00000001L
+#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE_MASK 0x00000002L
+#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_MASK 0x00000004L
+#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_EN_MASK 0x00000008L
+#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_MASK 0x00000010L
+#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_EN_MASK 0x00000020L
+#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_MASK 0x00000040L
+#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_EN_MASK 0x00000080L
+#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_MASK 0x00000100L
+#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_EN_MASK 0x00000200L
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_MASK 0x00000400L
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN_MASK 0x00000800L
+#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_MASK 0x00001000L
+#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_EN_MASK 0x00002000L
+#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE_MASK 0x00004000L
+#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE_MASK 0x00008000L
+#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE_MASK 0x00010000L
+#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE_MASK 0x00020000L
+#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE_MASK 0x00040000L
+#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE_MASK 0x00080000L
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_MASK 0x00100000L
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN_MASK 0x00200000L
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_MASK 0x00400000L
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN_MASK 0x00800000L
+//RPB_BLOCKLEVEL_CONF
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE__SHIFT 0x0
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x2
+#define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL__SHIFT 0x3
+#define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL__SHIFT 0x5
+#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL__SHIFT 0x7
+#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL__SHIFT 0x9
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE__SHIFT 0xb
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0xd
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE__SHIFT 0xe
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x10
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE__SHIFT 0x11
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x13
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_MASK 0x00000003L
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00000004L
+#define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL_MASK 0x00000018L
+#define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL_MASK 0x00000060L
+#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL_MASK 0x00000180L
+#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL_MASK 0x00000600L
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_MASK 0x00001800L
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00002000L
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_MASK 0x0000C000L
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00010000L
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_MASK 0x00060000L
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00080000L
+//RPB_TAG_CONF
+#define RPB_TAG_CONF__RPB_IO_RD__SHIFT 0x0
+#define RPB_TAG_CONF__RPB_IO_WR__SHIFT 0xa
+#define RPB_TAG_CONF__RPB_IO_MAX_LIMIT__SHIFT 0x14
+#define RPB_TAG_CONF__RPB_IO_RD_MASK 0x000003FFL
+#define RPB_TAG_CONF__RPB_IO_WR_MASK 0x000FFC00L
+#define RPB_TAG_CONF__RPB_IO_MAX_LIMIT_MASK 0x7FF00000L
+//RPB_ARB_CNTL
+#define RPB_ARB_CNTL__RD_SWITCH_NUM__SHIFT 0x0
+#define RPB_ARB_CNTL__WR_SWITCH_NUM__SHIFT 0x8
+#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM__SHIFT 0x10
+#define RPB_ARB_CNTL__ARB_MODE__SHIFT 0x18
+#define RPB_ARB_CNTL__SWITCH_NUM_MODE__SHIFT 0x19
+#define RPB_ARB_CNTL__RPB_VC0_CRD__SHIFT 0x1a
+#define RPB_ARB_CNTL__DISABLE_FED__SHIFT 0x1f
+#define RPB_ARB_CNTL__RD_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_ARB_CNTL__WR_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM_MASK 0x00FF0000L
+#define RPB_ARB_CNTL__ARB_MODE_MASK 0x01000000L
+#define RPB_ARB_CNTL__SWITCH_NUM_MODE_MASK 0x02000000L
+#define RPB_ARB_CNTL__RPB_VC0_CRD_MASK 0x7C000000L
+#define RPB_ARB_CNTL__DISABLE_FED_MASK 0x80000000L
+//RPB_ARB_CNTL2
+#define RPB_ARB_CNTL2__P2P_SWITCH_NUM__SHIFT 0x0
+#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM__SHIFT 0x8
+#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM__SHIFT 0x10
+#define RPB_ARB_CNTL2__RPB_VC1_CRD__SHIFT 0x18
+#define RPB_ARB_CNTL2__P2P_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM_MASK 0x00FF0000L
+#define RPB_ARB_CNTL2__RPB_VC1_CRD_MASK 0x1F000000L
+//RPB_BIF_CNTL
+#define RPB_BIF_CNTL__VC0_SWITCH_NUM__SHIFT 0x0
+#define RPB_BIF_CNTL__VC1_SWITCH_NUM__SHIFT 0x8
+#define RPB_BIF_CNTL__VC2_SWITCH_NUM__SHIFT 0x10
+#define RPB_BIF_CNTL__NBIF_DMA_ORIGCLKCTL_EN__SHIFT 0x18
+#define RPB_BIF_CNTL__TR_QOS_VC__SHIFT 0x19
+#define RPB_BIF_CNTL__RESERVE__SHIFT 0x1c
+#define RPB_BIF_CNTL__VC0_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_BIF_CNTL__VC1_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_BIF_CNTL__VC2_SWITCH_NUM_MASK 0x00FF0000L
+#define RPB_BIF_CNTL__NBIF_DMA_ORIGCLKCTL_EN_MASK 0x01000000L
+#define RPB_BIF_CNTL__TR_QOS_VC_MASK 0x0E000000L
+#define RPB_BIF_CNTL__RESERVE_MASK 0xF0000000L
+//RPB_BIF_CNTL2
+#define RPB_BIF_CNTL2__ARB_MODE__SHIFT 0x0
+#define RPB_BIF_CNTL2__DRAIN_VC_NUM__SHIFT 0x1
+#define RPB_BIF_CNTL2__SWITCH_ENABLE__SHIFT 0x3
+#define RPB_BIF_CNTL2__SWITCH_THRESHOLD__SHIFT 0x4
+#define RPB_BIF_CNTL2__PAGE_PRI_EN__SHIFT 0xc
+#define RPB_BIF_CNTL2__VC5_TR_PRI_EN__SHIFT 0xd
+#define RPB_BIF_CNTL2__VC0_TR_PRI_EN__SHIFT 0xe
+#define RPB_BIF_CNTL2__VC0_CHAINED_OVERRIDE__SHIFT 0xf
+#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE__SHIFT 0x10
+#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE_EN__SHIFT 0x11
+#define RPB_BIF_CNTL2__NBIF_HST_COMPCLKCTL_EN__SHIFT 0x12
+#define RPB_BIF_CNTL2__ATHUB_NBIF_UNITID__SHIFT 0x13
+#define RPB_BIF_CNTL2__RESERVE__SHIFT 0x1e
+#define RPB_BIF_CNTL2__ARB_MODE_MASK 0x00000001L
+#define RPB_BIF_CNTL2__DRAIN_VC_NUM_MASK 0x00000006L
+#define RPB_BIF_CNTL2__SWITCH_ENABLE_MASK 0x00000008L
+#define RPB_BIF_CNTL2__SWITCH_THRESHOLD_MASK 0x00000FF0L
+#define RPB_BIF_CNTL2__PAGE_PRI_EN_MASK 0x00001000L
+#define RPB_BIF_CNTL2__VC5_TR_PRI_EN_MASK 0x00002000L
+#define RPB_BIF_CNTL2__VC0_TR_PRI_EN_MASK 0x00004000L
+#define RPB_BIF_CNTL2__VC0_CHAINED_OVERRIDE_MASK 0x00008000L
+#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE_MASK 0x00010000L
+#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE_EN_MASK 0x00020000L
+#define RPB_BIF_CNTL2__NBIF_HST_COMPCLKCTL_EN_MASK 0x00040000L
+#define RPB_BIF_CNTL2__ATHUB_NBIF_UNITID_MASK 0x3FF80000L
+#define RPB_BIF_CNTL2__RESERVE_MASK 0xC0000000L
+//RPB_SDPPORT_CNTL
+#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE__SHIFT 0x0
+#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE__SHIFT 0x1
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT__SHIFT 0x3
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER__SHIFT 0x4
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS__SHIFT 0x5
+#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD__SHIFT 0x6
+#define RPB_SDPPORT_CNTL__RESERVE1__SHIFT 0xa
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN__SHIFT 0x16
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV__SHIFT 0x17
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN__SHIFT 0x18
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x19
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN__SHIFT 0x1a
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV__SHIFT 0x1b
+#define RPB_SDPPORT_CNTL__CG_BUSY_PORT__SHIFT 0x1c
+#define RPB_SDPPORT_CNTL__RESERVE__SHIFT 0x1d
+#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE_MASK 0x00000001L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE_MASK 0x00000006L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT_MASK 0x00000008L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER_MASK 0x00000010L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS_MASK 0x00000020L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD_MASK 0x000003C0L
+#define RPB_SDPPORT_CNTL__RESERVE1_MASK 0x003FFC00L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN_MASK 0x00400000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV_MASK 0x00800000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN_MASK 0x01000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV_MASK 0x02000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN_MASK 0x04000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV_MASK 0x08000000L
+#define RPB_SDPPORT_CNTL__CG_BUSY_PORT_MASK 0x10000000L
+#define RPB_SDPPORT_CNTL__RESERVE_MASK 0xE0000000L
+//RPB_NBIF_SDPPORT_CNTL
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_WRRSP_CRD__SHIFT 0x0
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_RDRSP_CRD__SHIFT 0x8
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_REQ_CRD__SHIFT 0x10
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_DATA_CRD__SHIFT 0x18
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_WRRSP_CRD_MASK 0x000000FFL
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_RDRSP_CRD_MASK 0x0000FF00L
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_REQ_CRD_MASK 0x00FF0000L
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_DATA_CRD_MASK 0xFF000000L
+//RPB_DEINTRLV_COMBINE_CNTL
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER__SHIFT 0x0
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN__SHIFT 0x4
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE__SHIFT 0x5
+#define RPB_DEINTRLV_COMBINE_CNTL__XPB_WRREQ_CRD__SHIFT 0x6
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN__SHIFT 0xe
+#define RPB_DEINTRLV_COMBINE_CNTL__RESERVE__SHIFT 0xf
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER_MASK 0x0000000FL
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN_MASK 0x00000010L
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE_MASK 0x00000020L
+#define RPB_DEINTRLV_COMBINE_CNTL__XPB_WRREQ_CRD_MASK 0x00003FC0L
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN_MASK 0x00004000L
+#define RPB_DEINTRLV_COMBINE_CNTL__RESERVE_MASK 0xFFFF8000L
+//RPB_VC_SWITCH_RDWR
+#define RPB_VC_SWITCH_RDWR__MODE__SHIFT 0x0
+#define RPB_VC_SWITCH_RDWR__NUM_RD__SHIFT 0x2
+#define RPB_VC_SWITCH_RDWR__NUM_WR__SHIFT 0xa
+#define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD__SHIFT 0x12
+#define RPB_VC_SWITCH_RDWR__CENTER_MARGIN__SHIFT 0x1a
+#define RPB_VC_SWITCH_RDWR__MODE_MASK 0x00000003L
+#define RPB_VC_SWITCH_RDWR__NUM_RD_MASK 0x000003FCL
+#define RPB_VC_SWITCH_RDWR__NUM_WR_MASK 0x0003FC00L
+#define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD_MASK 0x03FC0000L
+#define RPB_VC_SWITCH_RDWR__CENTER_MARGIN_MASK 0xFC000000L
+//RPB_ATS_CNTL3
+#define RPB_ATS_CNTL3__RPB_ATS_VC5_TR__SHIFT 0x0
+#define RPB_ATS_CNTL3__RPB_ATS_VC0_TR__SHIFT 0x9
+#define RPB_ATS_CNTL3__RPB_ATS_PR__SHIFT 0x12
+#define RPB_ATS_CNTL3__RPB_ATS_VC5_TR_MASK 0x000001FFL
+#define RPB_ATS_CNTL3__RPB_ATS_VC0_TR_MASK 0x0003FE00L
+#define RPB_ATS_CNTL3__RPB_ATS_PR_MASK 0x07FC0000L
+//RPB_DF_SDPPORT_CNTL
+#define RPB_DF_SDPPORT_CNTL__DF_REQ_CRD__SHIFT 0x0
+#define RPB_DF_SDPPORT_CNTL__DF_DATA_CRD__SHIFT 0x6
+#define RPB_DF_SDPPORT_CNTL__DF_HALT_THRESHOLD__SHIFT 0xe
+#define RPB_DF_SDPPORT_CNTL__DF_RELEASE_CREDIT_MODE__SHIFT 0x12
+#define RPB_DF_SDPPORT_CNTL__DF_ORIG_ACK_TIMER__SHIFT 0x13
+#define RPB_DF_SDPPORT_CNTL__DF_RAW_EA_CHECK_ENABLE__SHIFT 0x1b
+#define RPB_DF_SDPPORT_CNTL__DF_RAW_CHECK_ENABLE__SHIFT 0x1c
+#define RPB_DF_SDPPORT_CNTL__DF_RAAT_CHECK_ENABLE__SHIFT 0x1d
+#define RPB_DF_SDPPORT_CNTL__DF_ATAR_CHECK_ENABLE__SHIFT 0x1e
+#define RPB_DF_SDPPORT_CNTL__DF_VC3_READ_CHECK__SHIFT 0x1f
+#define RPB_DF_SDPPORT_CNTL__DF_REQ_CRD_MASK 0x0000003FL
+#define RPB_DF_SDPPORT_CNTL__DF_DATA_CRD_MASK 0x00003FC0L
+#define RPB_DF_SDPPORT_CNTL__DF_HALT_THRESHOLD_MASK 0x0003C000L
+#define RPB_DF_SDPPORT_CNTL__DF_RELEASE_CREDIT_MODE_MASK 0x00040000L
+#define RPB_DF_SDPPORT_CNTL__DF_ORIG_ACK_TIMER_MASK 0x07F80000L
+#define RPB_DF_SDPPORT_CNTL__DF_RAW_EA_CHECK_ENABLE_MASK 0x08000000L
+#define RPB_DF_SDPPORT_CNTL__DF_RAW_CHECK_ENABLE_MASK 0x10000000L
+#define RPB_DF_SDPPORT_CNTL__DF_RAAT_CHECK_ENABLE_MASK 0x20000000L
+#define RPB_DF_SDPPORT_CNTL__DF_ATAR_CHECK_ENABLE_MASK 0x40000000L
+#define RPB_DF_SDPPORT_CNTL__DF_VC3_READ_CHECK_MASK 0x80000000L
+//RPB_ATS_CNTL
+#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE__SHIFT 0x0
+#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE__SHIFT 0x1
+#define RPB_ATS_CNTL__SWITCH_THRESHOLD__SHIFT 0x2
+#define RPB_ATS_CNTL__TIME_SLICE__SHIFT 0x7
+#define RPB_ATS_CNTL__ATCTR_VC0_SWITCH_NUM__SHIFT 0xf
+#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM__SHIFT 0x13
+#define RPB_ATS_CNTL__WR_AT__SHIFT 0x17
+#define RPB_ATS_CNTL__MM_TRANS_VC5_ENABLE__SHIFT 0x19
+#define RPB_ATS_CNTL__GC_TRANS_VC5_ENABLE__SHIFT 0x1a
+#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE_MASK 0x00000001L
+#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE_MASK 0x00000002L
+#define RPB_ATS_CNTL__SWITCH_THRESHOLD_MASK 0x0000007CL
+#define RPB_ATS_CNTL__TIME_SLICE_MASK 0x00007F80L
+#define RPB_ATS_CNTL__ATCTR_VC0_SWITCH_NUM_MASK 0x00078000L
+#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM_MASK 0x00780000L
+#define RPB_ATS_CNTL__WR_AT_MASK 0x01800000L
+#define RPB_ATS_CNTL__MM_TRANS_VC5_ENABLE_MASK 0x02000000L
+#define RPB_ATS_CNTL__GC_TRANS_VC5_ENABLE_MASK 0x04000000L
+//RPB_ATS_CNTL2
+#define RPB_ATS_CNTL2__INVAL_COM_CMD__SHIFT 0x0
+#define RPB_ATS_CNTL2__TRANS_CMD__SHIFT 0x6
+#define RPB_ATS_CNTL2__PAGE_REQ_CMD__SHIFT 0xc
+#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE__SHIFT 0x12
+#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE__SHIFT 0x15
+#define RPB_ATS_CNTL2__VENDOR_ID__SHIFT 0x18
+#define RPB_ATS_CNTL2__RPB_VC5_CRD__SHIFT 0x1a
+#define RPB_ATS_CNTL2__INVAL_COM_CMD_MASK 0x0000003FL
+#define RPB_ATS_CNTL2__TRANS_CMD_MASK 0x00000FC0L
+#define RPB_ATS_CNTL2__PAGE_REQ_CMD_MASK 0x0003F000L
+#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE_MASK 0x001C0000L
+#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE_MASK 0x00E00000L
+#define RPB_ATS_CNTL2__VENDOR_ID_MASK 0x03000000L
+#define RPB_ATS_CNTL2__RPB_VC5_CRD_MASK 0x7C000000L
+//RPB_PERFCOUNTER0_CFG
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER1_CFG
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER2_CFG
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER3_CFG
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER_RSLT_CNTL
+#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//RPB_PERF_COUNTER_CNTL
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER__SHIFT 0x2
+#define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS__SHIFT 0x3
+#define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION__SHIFT 0x4
+#define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS__SHIFT 0x5
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0__SHIFT 0x9
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1__SHIFT 0xe
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2__SHIFT 0x13
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3__SHIFT 0x18
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT_MASK 0x00000003L
+#define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER_MASK 0x00000004L
+#define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS_MASK 0x00000008L
+#define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION_MASK 0x00000010L
+#define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS_MASK 0x000001E0L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0_MASK 0x00003E00L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1_MASK 0x0007C000L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2_MASK 0x00F80000L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3_MASK 0x1F000000L
+//RPB_PERFCOUNTER_HI
+#define RPB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define RPB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define RPB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define RPB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//RPB_PERFCOUNTER_LO
+#define RPB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define RPB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//RPB_PERF_COUNTER_STATUS
+#define RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE__SHIFT 0x0
+#define RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h
index 222fa8d13269..a05bf8e4f58d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h
@@ -626,6 +626,8 @@
#define regDTBCLK_DTO2_MODULO_BASE_IDX 2
#define regDTBCLK_DTO3_MODULO 0x0022
#define regDTBCLK_DTO3_MODULO_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
#define regPHYASYMCLK_CLOCK_CNTL 0x0052
#define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYBSYMCLK_CLOCK_CNTL 0x0053
@@ -638,6 +640,8 @@
#define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYFSYMCLK_CLOCK_CNTL 0x0057
#define regPHYFSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regHDMISTREAMCLK_CNTL 0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX 2
#define regDCCG_GATE_DISABLE_CNTL3 0x005a
#define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2
#define regHDMISTREAMCLK0_DTO_PARAM 0x005b
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h
index 8ddb03a1dc39..df84941bbe5b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h
@@ -1933,6 +1933,11 @@
//DTBCLK_DTO3_MODULO
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
//PHYASYMCLK_CLOCK_CNTL
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4
@@ -1967,6 +1972,11 @@
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL__SHIFT 0x4
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_EN_MASK 0x00000001L
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL_MASK 0x00000030L
+//HDMISTREAMCLK_CNTL
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x10
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000003L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00010000L
//DCCG_GATE_DISABLE_CNTL3
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h
index 7cf0a625277b..33b5d9be06b1 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h
@@ -4802,6 +4802,10 @@
#define regCM0_CM_DEALPHA_BASE_IDX 2
#define regCM0_CM_COEF_FORMAT 0x0d8c
#define regCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5210,6 +5214,10 @@
#define regCM1_CM_DEALPHA_BASE_IDX 2
#define regCM1_CM_COEF_FORMAT 0x0ef7
#define regCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5618,6 +5626,10 @@
#define regCM2_CM_DEALPHA_BASE_IDX 2
#define regCM2_CM_COEF_FORMAT 0x1062
#define regCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_INDEX 0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_DATA 0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -6026,6 +6038,10 @@
#define regCM3_CM_DEALPHA_BASE_IDX 2
#define regCM3_CM_COEF_FORMAT 0x11cd
#define regCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_DATA 0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -10568,6 +10584,8 @@
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
@@ -10697,6 +10715,8 @@
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
@@ -10827,6 +10847,8 @@
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
// addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
@@ -10957,6 +10979,8 @@
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
// addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
index fca72e2ec929..ff77b71167eb 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
@@ -16556,6 +16556,13 @@
#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+
#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
@@ -27176,6 +27183,23 @@
#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+
+//DIG0_DIG_BE_CLK_CNTL
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET__SHIFT 0x6
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON__SHIFT 0xc
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET_MASK 0x00000040L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON_MASK 0x00001000L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+
#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
#define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
@@ -36716,6 +36740,17 @@
#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
@@ -38488,6 +38523,18 @@
#define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX_MASK 0x000001FFL
#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA__SHIFT 0x0
#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//DWB_OGAM_LUT_CONTROL
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG__SHIFT 0x8
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE__SHIFT 0x10
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000030L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG_MASK 0x00000100L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL_MASK 0x00001000L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE_MASK 0x00010000L
+
#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4
#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc
@@ -52008,6 +52055,14 @@
#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS__SHIFT 0x10
#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS__SHIFT 0x11
#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS__SHIFT 0x14
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS__SHIFT 0x15
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS__SHIFT 0x16
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS__SHIFT 0x17
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS__SHIFT 0x18
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS__SHIFT 0x19
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS__SHIFT 0x1a
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS__SHIFT 0x1b
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS__SHIFT 0x1c
#define DIO_CLK_CNTL__DIO_TEST_CLK_SEL_MASK 0x0000007FL
#define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS_MASK 0x00000200L
#define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS_MASK 0x00000400L
@@ -52019,6 +52074,16 @@
#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS_MASK 0x00010000L
#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS_MASK 0x00020000L
#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS_MASK 0x00100000L
+
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS_MASK 0x00200000L
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS_MASK 0x00400000L
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS_MASK 0x00800000L
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS_MASK 0x01000000L
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS_MASK 0x02000000L
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS_MASK 0x04000000L
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS_MASK 0x08000000L
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS_MASK 0x10000000L
+
#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS__SHIFT 0x0
#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE__SHIFT 0x1
#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS_MASK 0x00000001L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h
new file mode 100644
index 000000000000..9c16611af06b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_7_0_0_OFFSET_HEADER
+#define _hdp_7_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: hdp_hdpdec
+// base address: 0x3c80
+#define regHDP_MMHUB_TLVL 0x0008
+#define regHDP_MMHUB_TLVL_BASE_IDX 0
+#define regHDP_MMHUB_UNITID 0x0009
+#define regHDP_MMHUB_UNITID_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE 0x0040
+#define regHDP_NONSURFACE_BASE_BASE_IDX 0
+#define regHDP_NONSURFACE_INFO 0x0041
+#define regHDP_NONSURFACE_INFO_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE_HI 0x0042
+#define regHDP_NONSURFACE_BASE_HI_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS 0x00c4
+#define regHDP_SURFACE_WRITE_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS 0x00c5
+#define regHDP_SURFACE_READ_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS_CLR 0x00c6
+#define regHDP_SURFACE_WRITE_FLAGS_CLR_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS_CLR 0x00c7
+#define regHDP_SURFACE_READ_FLAGS_CLR_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS 0x00c8
+#define regHDP_NONSURF_FLAGS_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS_CLR 0x00c9
+#define regHDP_NONSURF_FLAGS_CLR_BASE_IDX 0
+#define regHDP_SW_SEMAPHORE 0x00cd
+#define regHDP_SW_SEMAPHORE_BASE_IDX 0
+#define regHDP_DEBUG0 0x00ce
+#define regHDP_DEBUG0_BASE_IDX 0
+#define regHDP_LAST_SURFACE_HIT 0x00d0
+#define regHDP_LAST_SURFACE_HIT_BASE_IDX 0
+#define regHDP_OUTSTANDING_REQ 0x00d1
+#define regHDP_OUTSTANDING_REQ_BASE_IDX 0
+#define regHDP_HOST_PATH_CNTL 0x00d2
+#define regHDP_HOST_PATH_CNTL_BASE_IDX 0
+#define regHDP_MISC_CNTL 0x00d3
+#define regHDP_MISC_CNTL_BASE_IDX 0
+#define regHDP_MEM_POWER_CTRL 0x00d4
+#define regHDP_MEM_POWER_CTRL_BASE_IDX 0
+#define regHDP_CLK_CNTL 0x00d5
+#define regHDP_CLK_CNTL_BASE_IDX 0
+#define regHDP_MMHUB_CNTL 0x00d6
+#define regHDP_MMHUB_CNTL_BASE_IDX 0
+#define regHDP_XDP_BUSY_STS 0x00d7
+#define regHDP_XDP_BUSY_STS_BASE_IDX 0
+#define regHDP_XDP_MMHUB_ERROR 0x00d8
+#define regHDP_XDP_MMHUB_ERROR_BASE_IDX 0
+#define regHDP_XDP_MMHUB_ERROR_CLR 0x00da
+#define regHDP_XDP_MMHUB_ERROR_CLR_BASE_IDX 0
+#define regHDP_VERSION 0x00db
+#define regHDP_VERSION_BASE_IDX 0
+#define regHDP_MEMIO_CNTL 0x00f6
+#define regHDP_MEMIO_CNTL_BASE_IDX 0
+#define regHDP_MEMIO_ADDR 0x00f7
+#define regHDP_MEMIO_ADDR_BASE_IDX 0
+#define regHDP_MEMIO_STATUS 0x00f8
+#define regHDP_MEMIO_STATUS_BASE_IDX 0
+#define regHDP_MEMIO_WR_DATA 0x00f9
+#define regHDP_MEMIO_WR_DATA_BASE_IDX 0
+#define regHDP_MEMIO_RD_DATA 0x00fa
+#define regHDP_MEMIO_RD_DATA_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_FIRST 0x0100
+#define regHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0
+#define regHDP_XDP_D2H_FLUSH 0x0101
+#define regHDP_XDP_D2H_FLUSH_BASE_IDX 0
+#define regHDP_XDP_D2H_BAR_UPDATE 0x0102
+#define regHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_3 0x0103
+#define regHDP_XDP_D2H_RSVD_3_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_4 0x0104
+#define regHDP_XDP_D2H_RSVD_4_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_5 0x0105
+#define regHDP_XDP_D2H_RSVD_5_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_6 0x0106
+#define regHDP_XDP_D2H_RSVD_6_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_7 0x0107
+#define regHDP_XDP_D2H_RSVD_7_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_8 0x0108
+#define regHDP_XDP_D2H_RSVD_8_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_9 0x0109
+#define regHDP_XDP_D2H_RSVD_9_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_10 0x010a
+#define regHDP_XDP_D2H_RSVD_10_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_11 0x010b
+#define regHDP_XDP_D2H_RSVD_11_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_12 0x010c
+#define regHDP_XDP_D2H_RSVD_12_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_13 0x010d
+#define regHDP_XDP_D2H_RSVD_13_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_14 0x010e
+#define regHDP_XDP_D2H_RSVD_14_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_15 0x010f
+#define regHDP_XDP_D2H_RSVD_15_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_16 0x0110
+#define regHDP_XDP_D2H_RSVD_16_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_17 0x0111
+#define regHDP_XDP_D2H_RSVD_17_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_18 0x0112
+#define regHDP_XDP_D2H_RSVD_18_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_19 0x0113
+#define regHDP_XDP_D2H_RSVD_19_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_20 0x0114
+#define regHDP_XDP_D2H_RSVD_20_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_21 0x0115
+#define regHDP_XDP_D2H_RSVD_21_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_22 0x0116
+#define regHDP_XDP_D2H_RSVD_22_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_23 0x0117
+#define regHDP_XDP_D2H_RSVD_23_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_24 0x0118
+#define regHDP_XDP_D2H_RSVD_24_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_25 0x0119
+#define regHDP_XDP_D2H_RSVD_25_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_26 0x011a
+#define regHDP_XDP_D2H_RSVD_26_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_27 0x011b
+#define regHDP_XDP_D2H_RSVD_27_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_28 0x011c
+#define regHDP_XDP_D2H_RSVD_28_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_29 0x011d
+#define regHDP_XDP_D2H_RSVD_29_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_30 0x011e
+#define regHDP_XDP_D2H_RSVD_30_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_31 0x011f
+#define regHDP_XDP_D2H_RSVD_31_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_32 0x0120
+#define regHDP_XDP_D2H_RSVD_32_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_33 0x0121
+#define regHDP_XDP_D2H_RSVD_33_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_34 0x0122
+#define regHDP_XDP_D2H_RSVD_34_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_LAST 0x0123
+#define regHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR_CFG 0x0124
+#define regHDP_XDP_P2P_BAR_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_OFFSET 0x0125
+#define regHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR0 0x0126
+#define regHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR1 0x0127
+#define regHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR2 0x0128
+#define regHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR3 0x0129
+#define regHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR4 0x012a
+#define regHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR5 0x012b
+#define regHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR6 0x012c
+#define regHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0
+#define regHDP_XDP_HDP_MBX_MC_CFG 0x012d
+#define regHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_MC_CFG 0x012e
+#define regHDP_XDP_HDP_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HST_CFG 0x012f
+#define regHDP_XDP_HST_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_IPH_CFG 0x0131
+#define regHDP_XDP_HDP_IPH_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR0 0x0134
+#define regHDP_XDP_P2P_BAR0_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR1 0x0135
+#define regHDP_XDP_P2P_BAR1_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR2 0x0136
+#define regHDP_XDP_P2P_BAR2_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR3 0x0137
+#define regHDP_XDP_P2P_BAR3_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR4 0x0138
+#define regHDP_XDP_P2P_BAR4_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR5 0x0139
+#define regHDP_XDP_P2P_BAR5_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR6 0x013a
+#define regHDP_XDP_P2P_BAR6_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR7 0x013b
+#define regHDP_XDP_P2P_BAR7_BASE_IDX 0
+#define regHDP_XDP_FLUSH_ARMED_STS 0x013c
+#define regHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0
+#define regHDP_XDP_FLUSH_CNTR0_STS 0x013d
+#define regHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0
+#define regHDP_XDP_STICKY 0x013f
+#define regHDP_XDP_STICKY_BASE_IDX 0
+#define regHDP_XDP_CHKN 0x0140
+#define regHDP_XDP_CHKN_BASE_IDX 0
+#define regHDP_XDP_BARS_ADDR_39_36 0x0144
+#define regHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2 0x0149
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h
new file mode 100644
index 000000000000..afb73c5a4018
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h
@@ -0,0 +1,735 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_7_0_0_SH_MASK_HEADER
+#define _hdp_7_0_0_SH_MASK_HEADER
+
+
+// addressBlock: hdp_hdpdec
+//HDP_MMHUB_TLVL
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL__SHIFT 0x0
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL__SHIFT 0x4
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL__SHIFT 0x8
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL__SHIFT 0xc
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL__SHIFT 0x10
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL_MASK 0x0000000FL
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL_MASK 0x000000F0L
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL_MASK 0x00000F00L
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL_MASK 0x0000F000L
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL_MASK 0x000F0000L
+//HDP_MMHUB_UNITID
+#define HDP_MMHUB_UNITID__HDP_UNITID__SHIFT 0x0
+#define HDP_MMHUB_UNITID__XDP_UNITID__SHIFT 0x8
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID__SHIFT 0x10
+#define HDP_MMHUB_UNITID__HDP_UNITID_MASK 0x0000003FL
+#define HDP_MMHUB_UNITID__XDP_UNITID_MASK 0x00003F00L
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID_MASK 0x003F0000L
+//HDP_NONSURFACE_BASE
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8__SHIFT 0x0
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8_MASK 0xFFFFFFFFL
+//HDP_NONSURFACE_INFO
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP__SHIFT 0x4
+#define HDP_NONSURFACE_INFO__NONSURF_VMID__SHIFT 0x8
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP_MASK 0x00000030L
+#define HDP_NONSURFACE_INFO__NONSURF_VMID_MASK 0x00000F00L
+//HDP_NONSURFACE_BASE_HI
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40__SHIFT 0x0
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40_MASK 0x000000FFL
+//HDP_SURFACE_WRITE_FLAGS
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG_MASK 0x00000002L
+//HDP_SURFACE_WRITE_FLAGS_CLR
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS_CLR
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_NONSURF_FLAGS
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x0
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x1
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
+//HDP_NONSURF_FLAGS_CLR
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_SW_SEMAPHORE
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x0
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xFFFFFFFFL
+//HDP_DEBUG0
+#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x0
+#define HDP_DEBUG0__HDP_DEBUG_MASK 0xFFFFFFFFL
+//HDP_LAST_SURFACE_HIT
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x0
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x00000003L
+//HDP_OUTSTANDING_REQ
+#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x0
+#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x8
+#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000FFL
+#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000FF00L
+//HDP_HOST_PATH_CNTL
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x9
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0xb
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x12
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x13
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x15
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN__SHIFT 0x16
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x1d
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00040000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN_MASK 0x00400000L
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
+//HDP_MISC_CNTL
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL__SHIFT 0x2
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x5
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE__SHIFT 0x8
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE__SHIFT 0x9
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0xb
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK__SHIFT 0xe
+#define HDP_MISC_CNTL__NACK_ENABLE__SHIFT 0x13
+#define HDP_MISC_CNTL__ATOMIC_NACK_ENABLE__SHIFT 0x14
+#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x15
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE__SHIFT 0x16
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY__SHIFT 0x17
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE__SHIFT 0x18
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE__SHIFT 0x1e
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL_MASK 0x0000000CL
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE_MASK 0x00000100L
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE_MASK 0x00000200L
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK_MASK 0x0000C000L
+#define HDP_MISC_CNTL__NACK_ENABLE_MASK 0x00080000L
+#define HDP_MISC_CNTL__ATOMIC_NACK_ENABLE_MASK 0x00100000L
+#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE_MASK 0x00400000L
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY_MASK 0x00800000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE_MASK 0x01000000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE_MASK 0x40000000L
+//HDP_MEM_POWER_CTRL
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN__SHIFT 0x1
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN__SHIFT 0x2
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN__SHIFT 0x3
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN__SHIFT 0x10
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN__SHIFT 0x11
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN__SHIFT 0x12
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN__SHIFT 0x13
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS__SHIFT 0x14
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0x1e
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK 0x00000004L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK 0x00000008L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN_MASK 0x00040000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN_MASK 0x00080000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_ENTER_DELAY_MASK 0xC0000000L
+//HDP_CLK_CNTL
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x0
+#define HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0000000FL
+#define HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//HDP_MMHUB_CNTL
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO__SHIFT 0x0
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC__SHIFT 0x1
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP__SHIFT 0x2
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE__SHIFT 0x4
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE__SHIFT 0x5
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE__SHIFT 0x6
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_MASK 0x00000001L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_MASK 0x00000002L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_MASK 0x00000004L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE_MASK 0x00000010L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE_MASK 0x00000020L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE_MASK 0x00000040L
+//HDP_XDP_BUSY_STS
+#define HDP_XDP_BUSY_STS__BUSY_BITS_0__SHIFT 0x0
+#define HDP_XDP_BUSY_STS__BUSY_BITS_1__SHIFT 0x1
+#define HDP_XDP_BUSY_STS__BUSY_BITS_2__SHIFT 0x2
+#define HDP_XDP_BUSY_STS__BUSY_BITS_3__SHIFT 0x3
+#define HDP_XDP_BUSY_STS__BUSY_BITS_4__SHIFT 0x4
+#define HDP_XDP_BUSY_STS__BUSY_BITS_5__SHIFT 0x5
+#define HDP_XDP_BUSY_STS__BUSY_BITS_6__SHIFT 0x6
+#define HDP_XDP_BUSY_STS__BUSY_BITS_7__SHIFT 0x7
+#define HDP_XDP_BUSY_STS__BUSY_BITS_8__SHIFT 0x8
+#define HDP_XDP_BUSY_STS__BUSY_BITS_9__SHIFT 0x9
+#define HDP_XDP_BUSY_STS__BUSY_BITS_10__SHIFT 0xa
+#define HDP_XDP_BUSY_STS__BUSY_BITS_11__SHIFT 0xb
+#define HDP_XDP_BUSY_STS__BUSY_BITS_12__SHIFT 0xc
+#define HDP_XDP_BUSY_STS__BUSY_BITS_13__SHIFT 0xd
+#define HDP_XDP_BUSY_STS__BUSY_BITS_14__SHIFT 0xe
+#define HDP_XDP_BUSY_STS__BUSY_BITS_15__SHIFT 0xf
+#define HDP_XDP_BUSY_STS__BUSY_BITS_16__SHIFT 0x10
+#define HDP_XDP_BUSY_STS__BUSY_BITS_17__SHIFT 0x11
+#define HDP_XDP_BUSY_STS__BUSY_BITS_18__SHIFT 0x12
+#define HDP_XDP_BUSY_STS__BUSY_BITS_19__SHIFT 0x13
+#define HDP_XDP_BUSY_STS__BUSY_BITS_20__SHIFT 0x14
+#define HDP_XDP_BUSY_STS__BUSY_BITS_21__SHIFT 0x15
+#define HDP_XDP_BUSY_STS__BUSY_BITS_22__SHIFT 0x16
+#define HDP_XDP_BUSY_STS__BUSY_BITS_23__SHIFT 0x17
+#define HDP_XDP_BUSY_STS__Z_FENCE_BIT__SHIFT 0x18
+#define HDP_XDP_BUSY_STS__BUSY_BITS_0_MASK 0x00000001L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_1_MASK 0x00000002L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_2_MASK 0x00000004L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_3_MASK 0x00000008L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_4_MASK 0x00000010L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_5_MASK 0x00000020L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_6_MASK 0x00000040L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_7_MASK 0x00000080L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_8_MASK 0x00000100L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_9_MASK 0x00000200L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_10_MASK 0x00000400L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_11_MASK 0x00000800L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_12_MASK 0x00001000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_13_MASK 0x00002000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_14_MASK 0x00004000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_15_MASK 0x00008000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_16_MASK 0x00010000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_17_MASK 0x00020000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_18_MASK 0x00040000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_19_MASK 0x00080000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_20_MASK 0x00100000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_21_MASK 0x00200000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_22_MASK 0x00400000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_23_MASK 0x00800000L
+#define HDP_XDP_BUSY_STS__Z_FENCE_BIT_MASK 0x01000000L
+//HDP_XDP_MMHUB_ERROR
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01__SHIFT 0x1
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10__SHIFT 0x2
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11__SHIFT 0x3
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01__SHIFT 0x5
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10__SHIFT 0x6
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11__SHIFT 0x7
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01__SHIFT 0x9
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10__SHIFT 0xa
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11__SHIFT 0xb
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01__SHIFT 0xd
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10__SHIFT 0xe
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11__SHIFT 0xf
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01__SHIFT 0x11
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10__SHIFT 0x12
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11__SHIFT 0x13
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01__SHIFT 0x15
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10__SHIFT 0x16
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11__SHIFT 0x17
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01_MASK 0x00000002L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10_MASK 0x00000004L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11_MASK 0x00000008L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01_MASK 0x00000020L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10_MASK 0x00000040L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11_MASK 0x00000080L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01_MASK 0x00000200L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10_MASK 0x00000400L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11_MASK 0x00000800L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01_MASK 0x00002000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10_MASK 0x00004000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11_MASK 0x00008000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01_MASK 0x00020000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10_MASK 0x00040000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11_MASK 0x00080000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01_MASK 0x00200000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10_MASK 0x00400000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11_MASK 0x00800000L
+//HDP_XDP_MMHUB_ERROR_CLR
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_01_CLR__SHIFT 0x1
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_10_CLR__SHIFT 0x2
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_11_CLR__SHIFT 0x3
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_FED_CLR__SHIFT 0x4
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_01_CLR__SHIFT 0x5
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_10_CLR__SHIFT 0x6
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_11_CLR__SHIFT 0x7
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_01_CLR__SHIFT 0x9
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_10_CLR__SHIFT 0xa
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_11_CLR__SHIFT 0xb
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_FED_CLR__SHIFT 0xc
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_01_CLR__SHIFT 0xd
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_10_CLR__SHIFT 0xe
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_11_CLR__SHIFT 0xf
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_WUSER_FED_CLR__SHIFT 0x10
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_01_CLR__SHIFT 0x11
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_10_CLR__SHIFT 0x12
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_11_CLR__SHIFT 0x13
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_01_CLR__SHIFT 0x15
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_10_CLR__SHIFT 0x16
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_11_CLR__SHIFT 0x17
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_01_CLR_MASK 0x00000002L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_10_CLR_MASK 0x00000004L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_11_CLR_MASK 0x00000008L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_FED_CLR_MASK 0x00000010L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_01_CLR_MASK 0x00000020L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_10_CLR_MASK 0x00000040L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_11_CLR_MASK 0x00000080L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_01_CLR_MASK 0x00000200L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_10_CLR_MASK 0x00000400L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_11_CLR_MASK 0x00000800L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_FED_CLR_MASK 0x00001000L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_01_CLR_MASK 0x00002000L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_10_CLR_MASK 0x00004000L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_11_CLR_MASK 0x00008000L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_WUSER_FED_CLR_MASK 0x00010000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_01_CLR_MASK 0x00020000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_10_CLR_MASK 0x00040000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_11_CLR_MASK 0x00080000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_01_CLR_MASK 0x00200000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_10_CLR_MASK 0x00400000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_11_CLR_MASK 0x00800000L
+//HDP_VERSION
+#define HDP_VERSION__MINVER__SHIFT 0x0
+#define HDP_VERSION__MAJVER__SHIFT 0x8
+#define HDP_VERSION__REV__SHIFT 0x10
+#define HDP_VERSION__MINVER_MASK 0x000000FFL
+#define HDP_VERSION__MAJVER_MASK 0x0000FF00L
+#define HDP_VERSION__REV_MASK 0x00FF0000L
+//HDP_MEMIO_CNTL
+#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x0
+#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x1
+#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x2
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x6
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x7
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x8
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0xe
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0xf
+#define HDP_MEMIO_CNTL__MEMIO_VF__SHIFT 0x10
+#define HDP_MEMIO_CNTL__MEMIO_VFID__SHIFT 0x11
+#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
+#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
+#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003CL
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003F00L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
+#define HDP_MEMIO_CNTL__MEMIO_VF_MASK 0x00010000L
+#define HDP_MEMIO_CNTL__MEMIO_VFID_MASK 0x003E0000L
+//HDP_MEMIO_ADDR
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x0
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xFFFFFFFFL
+//HDP_MEMIO_STATUS
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x0
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x1
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x2
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x3
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
+//HDP_MEMIO_WR_DATA
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x0
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xFFFFFFFFL
+//HDP_MEMIO_RD_DATA
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x0
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_FIRST
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_FLUSH
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x0
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x4
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x8
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0xb
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x10
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x12
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x13
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x14
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000FL
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000F0L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000F800L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
+//HDP_XDP_D2H_BAR_UPDATE
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x0
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x10
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x14
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000F0000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
+//HDP_XDP_D2H_RSVD_3
+#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_4
+#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_5
+#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_6
+#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_7
+#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_8
+#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_9
+#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_10
+#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_11
+#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_12
+#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_13
+#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_14
+#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_15
+#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_16
+#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_17
+#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_18
+#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_19
+#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_20
+#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_21
+#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_22
+#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_23
+#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_24
+#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_25
+#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_26
+#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_27
+#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_28
+#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_29
+#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_30
+#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_31
+#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_32
+#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_33
+#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_34
+#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_LAST
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_P2P_BAR_CFG
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x0
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x4
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000FL
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
+//HDP_XDP_P2P_MBX_OFFSET
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x0001FFFFL
+//HDP_XDP_P2P_MBX_ADDR0
+#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR1
+#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR2
+#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR3
+#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR4
+#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR5
+#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR6
+#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_HDP_MBX_MC_CFG
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS__SHIFT 0x0
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP__SHIFT 0xe
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS_MASK 0x0000000FL
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP_MASK 0x00004000L
+//HDP_XDP_HDP_MC_CFG
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE__SHIFT 0x0
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE__SHIFT 0x1
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE__SHIFT 0x2
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP__SHIFT 0x3
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0xe
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE_MASK 0x00000001L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE_MASK 0x00000002L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE_MASK 0x00000004L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_MASK 0x00000008L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000FC000L
+//HDP_XDP_HST_CFG
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x0
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x1
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN__SHIFT 0x3
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN__SHIFT 0x4
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x5
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN_MASK 0x00000008L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN_MASK 0x00000010L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00000020L
+//HDP_XDP_HDP_IPH_CFG
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0xc
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0xd
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
+//HDP_XDP_P2P_BAR0
+#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR1
+#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR2
+#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR3
+#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR4
+#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR5
+#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR6
+#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR7
+#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
+//HDP_XDP_FLUSH_ARMED_STS
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xFFFFFFFFL
+//HDP_XDP_FLUSH_CNTR0_STS
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03FFFFFFL
+//HDP_XDP_STICKY
+#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x0
+#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x10
+#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000FFFFL
+#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xFFFF0000L
+//HDP_XDP_CHKN
+#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x0
+#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x8
+#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x10
+#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x18
+#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000FFL
+#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000FF00L
+#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00FF0000L
+#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xFF000000L
+//HDP_XDP_BARS_ADDR_39_36
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x0
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x4
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x8
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0xc
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x10
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x18
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x1c
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000FL
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000F0L
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000F00L
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000F000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000F0000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0F000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xF0000000L
+//HDP_XDP_MC_VM_FB_LOCATION_BASE
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x03FFFFFFL
+//HDP_XDP_GPU_IOV_VIOLATION_LOG
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x01F00000L
+//HDP_XDP_GPU_IOV_VIOLATION_LOG2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h
new file mode 100644
index 000000000000..c783b8ea4698
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _lsdma_7_0_0_OFFSET_HEADER
+#define _lsdma_7_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: lsdma0_lsdma0dec
+// base address: 0x45000
+#define regLSDMA_UCODE_ADDR 0x0000
+#define regLSDMA_UCODE_ADDR_BASE_IDX 0
+#define regLSDMA_UCODE_DATA 0x0001
+#define regLSDMA_UCODE_DATA_BASE_IDX 0
+#define regLSDMA_ERROR_INJECT_CNTL 0x0004
+#define regLSDMA_ERROR_INJECT_CNTL_BASE_IDX 0
+#define regLSDMA_ERROR_INJECT_SELECT 0x0005
+#define regLSDMA_ERROR_INJECT_SELECT_BASE_IDX 0
+#define regLSDMA_CONTEXT_GROUP_BOUNDARY 0x001f
+#define regLSDMA_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
+#define regLSDMA_RB_RPTR_FETCH_HI 0x0020
+#define regLSDMA_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regLSDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define regLSDMA_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regLSDMA_RB_RPTR_FETCH 0x0022
+#define regLSDMA_RB_RPTR_FETCH_BASE_IDX 0
+#define regLSDMA_IB_OFFSET_FETCH 0x0023
+#define regLSDMA_IB_OFFSET_FETCH_BASE_IDX 0
+#define regLSDMA_PROGRAM 0x0024
+#define regLSDMA_PROGRAM_BASE_IDX 0
+#define regLSDMA_STATUS_REG 0x0025
+#define regLSDMA_STATUS_REG_BASE_IDX 0
+#define regLSDMA_STATUS1_REG 0x0026
+#define regLSDMA_STATUS1_REG_BASE_IDX 0
+#define regLSDMA_RD_BURST_CNTL 0x0027
+#define regLSDMA_RD_BURST_CNTL_BASE_IDX 0
+#define regLSDMA_HBM_PAGE_CONFIG 0x0028
+#define regLSDMA_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regLSDMA_UCODE_CHECKSUM 0x0029
+#define regLSDMA_UCODE_CHECKSUM_BASE_IDX 0
+#define regLSDMA_FREEZE 0x002b
+#define regLSDMA_FREEZE_BASE_IDX 0
+#define regLSDMA_DCC_CNTL 0x002d
+#define regLSDMA_DCC_CNTL_BASE_IDX 0
+#define regLSDMA_POWER_GATING 0x002e
+#define regLSDMA_POWER_GATING_BASE_IDX 0
+#define regLSDMA_PGFSM_CONFIG 0x002f
+#define regLSDMA_PGFSM_CONFIG_BASE_IDX 0
+#define regLSDMA_PGFSM_WRITE 0x0030
+#define regLSDMA_PGFSM_WRITE_BASE_IDX 0
+#define regLSDMA_PGFSM_READ 0x0031
+#define regLSDMA_PGFSM_READ_BASE_IDX 0
+#define regLSDMA_BA_THRESHOLD 0x0033
+#define regLSDMA_BA_THRESHOLD_BASE_IDX 0
+#define regLSDMA_ID 0x0034
+#define regLSDMA_ID_BASE_IDX 0
+#define regLSDMA_VERSION 0x0035
+#define regLSDMA_VERSION_BASE_IDX 0
+#define regLSDMA_EDC_COUNTER 0x0036
+#define regLSDMA_EDC_COUNTER_BASE_IDX 0
+#define regLSDMA_EDC_COUNTER2 0x0037
+#define regLSDMA_EDC_COUNTER2_BASE_IDX 0
+#define regLSDMA_STATUS2_REG 0x0038
+#define regLSDMA_STATUS2_REG_BASE_IDX 0
+#define regLSDMA_ATOMIC_CNTL 0x0039
+#define regLSDMA_ATOMIC_CNTL_BASE_IDX 0
+#define regLSDMA_ATOMIC_PREOP_LO 0x003a
+#define regLSDMA_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regLSDMA_ATOMIC_PREOP_HI 0x003b
+#define regLSDMA_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regLSDMA_UTCL1_CNTL 0x003c
+#define regLSDMA_UTCL1_CNTL_BASE_IDX 0
+#define regLSDMA_UTCL1_WATERMK 0x003d
+#define regLSDMA_UTCL1_WATERMK_BASE_IDX 0
+#define regLSDMA_UTCL1_RD_STATUS 0x003e
+#define regLSDMA_UTCL1_RD_STATUS_BASE_IDX 0
+#define regLSDMA_UTCL1_WR_STATUS 0x003f
+#define regLSDMA_UTCL1_WR_STATUS_BASE_IDX 0
+#define regLSDMA_UTCL1_INV0 0x0040
+#define regLSDMA_UTCL1_INV0_BASE_IDX 0
+#define regLSDMA_UTCL1_INV1 0x0041
+#define regLSDMA_UTCL1_INV1_BASE_IDX 0
+#define regLSDMA_UTCL1_INV2 0x0042
+#define regLSDMA_UTCL1_INV2_BASE_IDX 0
+#define regLSDMA_UTCL1_RD_XNACK0 0x0043
+#define regLSDMA_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regLSDMA_UTCL1_RD_XNACK1 0x0044
+#define regLSDMA_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regLSDMA_UTCL1_WR_XNACK0 0x0045
+#define regLSDMA_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regLSDMA_UTCL1_WR_XNACK1 0x0046
+#define regLSDMA_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regLSDMA_UTCL1_TIMEOUT 0x0047
+#define regLSDMA_UTCL1_TIMEOUT_BASE_IDX 0
+#define regLSDMA_UTCL1_PAGE 0x0048
+#define regLSDMA_UTCL1_PAGE_BASE_IDX 0
+#define regLSDMA_RELAX_ORDERING_LUT 0x004a
+#define regLSDMA_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regLSDMA_CHICKEN_BITS_2 0x004b
+#define regLSDMA_CHICKEN_BITS_2_BASE_IDX 0
+#define regLSDMA_STATUS3_REG 0x004c
+#define regLSDMA_STATUS3_REG_BASE_IDX 0
+#define regLSDMA_PHYSICAL_ADDR_LO 0x004d
+#define regLSDMA_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PHYSICAL_ADDR_HI 0x004e
+#define regLSDMA_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regLSDMA_ECC_CNTL 0x004f
+#define regLSDMA_ECC_CNTL_BASE_IDX 0
+#define regLSDMA_ERROR_LOG 0x0050
+#define regLSDMA_ERROR_LOG_BASE_IDX 0
+#define regLSDMA_PUB_DUMMY0 0x0051
+#define regLSDMA_PUB_DUMMY0_BASE_IDX 0
+#define regLSDMA_PUB_DUMMY1 0x0052
+#define regLSDMA_PUB_DUMMY1_BASE_IDX 0
+#define regLSDMA_PUB_DUMMY2 0x0053
+#define regLSDMA_PUB_DUMMY2_BASE_IDX 0
+#define regLSDMA_PUB_DUMMY3 0x0054
+#define regLSDMA_PUB_DUMMY3_BASE_IDX 0
+#define regLSDMA_F32_COUNTER 0x0055
+#define regLSDMA_F32_COUNTER_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER0_CFG 0x0057
+#define regLSDMA_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER1_CFG 0x0058
+#define regLSDMA_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x0059
+#define regLSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regLSDMA_PERFCNT_MISC_CNTL 0x005a
+#define regLSDMA_PERFCNT_MISC_CNTL_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER_LO 0x005b
+#define regLSDMA_PERFCNT_PERFCOUNTER_LO_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER_HI 0x005c
+#define regLSDMA_PERFCNT_PERFCOUNTER_HI_BASE_IDX 0
+#define regLSDMA_CRD_CNTL 0x005d
+#define regLSDMA_CRD_CNTL_BASE_IDX 0
+#define regLSDMA_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regLSDMA_ULV_CNTL 0x005f
+#define regLSDMA_ULV_CNTL_BASE_IDX 0
+#define regLSDMA_EA_DBIT_ADDR_DATA 0x0060
+#define regLSDMA_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regLSDMA_EA_DBIT_ADDR_INDEX 0x0061
+#define regLSDMA_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regLSDMA_STATUS4_REG 0x0063
+#define regLSDMA_STATUS4_REG_BASE_IDX 0
+#define regLSDMA_CE_CTRL 0x0066
+#define regLSDMA_CE_CTRL_BASE_IDX 0
+#define regLSDMA_EXCEPTION_STATUS 0x0067
+#define regLSDMA_EXCEPTION_STATUS_BASE_IDX 0
+#define regLSDMA_INT_CNTL 0x0069
+#define regLSDMA_INT_CNTL_BASE_IDX 0
+#define regLSDMA_MEM_POWER_CTRL 0x006a
+#define regLSDMA_MEM_POWER_CTRL_BASE_IDX 0
+#define regLSDMA_CLK_CTRL 0x006b
+#define regLSDMA_CLK_CTRL_BASE_IDX 0
+#define regLSDMA_CNTL 0x006c
+#define regLSDMA_CNTL_BASE_IDX 0
+#define regLSDMA_CHICKEN_BITS 0x006d
+#define regLSDMA_CHICKEN_BITS_BASE_IDX 0
+#define regLSDMA_PIO_SRC_ADDR_LO 0x0070
+#define regLSDMA_PIO_SRC_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PIO_SRC_ADDR_HI 0x0071
+#define regLSDMA_PIO_SRC_ADDR_HI_BASE_IDX 0
+#define regLSDMA_PIO_DST_ADDR_LO 0x0072
+#define regLSDMA_PIO_DST_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PIO_DST_ADDR_HI 0x0073
+#define regLSDMA_PIO_DST_ADDR_HI_BASE_IDX 0
+#define regLSDMA_PIO_COMMAND 0x0074
+#define regLSDMA_PIO_COMMAND_BASE_IDX 0
+#define regLSDMA_PIO_CONSTFILL_DATA 0x0075
+#define regLSDMA_PIO_CONSTFILL_DATA_BASE_IDX 0
+#define regLSDMA_PIO_CONTROL 0x0076
+#define regLSDMA_PIO_CONTROL_BASE_IDX 0
+#define regLSDMA_PIO_STATUS 0x007a
+#define regLSDMA_PIO_STATUS_BASE_IDX 0
+#define regLSDMA_PF_PIO_STATUS 0x007b
+#define regLSDMA_PF_PIO_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_CNTL 0x0080
+#define regLSDMA_QUEUE0_RB_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_BASE 0x0081
+#define regLSDMA_QUEUE0_RB_BASE_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_BASE_HI 0x0082
+#define regLSDMA_QUEUE0_RB_BASE_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_RPTR 0x0083
+#define regLSDMA_QUEUE0_RB_RPTR_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_RPTR_HI 0x0084
+#define regLSDMA_QUEUE0_RB_RPTR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR 0x0085
+#define regLSDMA_QUEUE0_RB_WPTR_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR_HI 0x0086
+#define regLSDMA_QUEUE0_RB_WPTR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_CNTL 0x0087
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI 0x0088
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO 0x0089
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_RPTR_ADDR_HI 0x008a
+#define regLSDMA_QUEUE0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_RPTR_ADDR_LO 0x008b
+#define regLSDMA_QUEUE0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_CNTL 0x008c
+#define regLSDMA_QUEUE0_IB_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_RPTR 0x008d
+#define regLSDMA_QUEUE0_IB_RPTR_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_OFFSET 0x008e
+#define regLSDMA_QUEUE0_IB_OFFSET_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_BASE_LO 0x008f
+#define regLSDMA_QUEUE0_IB_BASE_LO_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_BASE_HI 0x0090
+#define regLSDMA_QUEUE0_IB_BASE_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_SIZE 0x0091
+#define regLSDMA_QUEUE0_IB_SIZE_BASE_IDX 0
+#define regLSDMA_QUEUE0_SKIP_CNTL 0x0092
+#define regLSDMA_QUEUE0_SKIP_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_CSA_ADDR_LO 0x0093
+#define regLSDMA_QUEUE0_CSA_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE0_CSA_ADDR_HI 0x0094
+#define regLSDMA_QUEUE0_CSA_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_AQL_CNTL 0x0095
+#define regLSDMA_QUEUE0_RB_AQL_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_MINOR_PTR_UPDATE 0x0096
+#define regLSDMA_QUEUE0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regLSDMA_QUEUE0_CNTL 0x0097
+#define regLSDMA_QUEUE0_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_PREEMPT 0x0098
+#define regLSDMA_QUEUE0_RB_PREEMPT_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_SUB_REMAIN 0x0099
+#define regLSDMA_QUEUE0_IB_SUB_REMAIN_BASE_IDX 0
+#define regLSDMA_QUEUE0_PREEMPT 0x009a
+#define regLSDMA_QUEUE0_PREEMPT_BASE_IDX 0
+#define regLSDMA_QUEUE0_CONTEXT_STATUS 0x009b
+#define regLSDMA_QUEUE0_CONTEXT_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE0_STATUS 0x009c
+#define regLSDMA_QUEUE0_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE0_DOORBELL 0x009d
+#define regLSDMA_QUEUE0_DOORBELL_BASE_IDX 0
+#define regLSDMA_QUEUE0_DOORBELL_OFFSET 0x009e
+#define regLSDMA_QUEUE0_DOORBELL_OFFSET_BASE_IDX 0
+#define regLSDMA_QUEUE0_DOORBELL_LOG 0x009f
+#define regLSDMA_QUEUE0_DOORBELL_LOG_BASE_IDX 0
+#define regLSDMA_QUEUE0_WATERMARK 0x00a0
+#define regLSDMA_QUEUE0_WATERMARK_BASE_IDX 0
+#define regLSDMA_QUEUE0_DUMMY0 0x00a1
+#define regLSDMA_QUEUE0_DUMMY0_BASE_IDX 0
+#define regLSDMA_QUEUE0_DUMMY1 0x00a2
+#define regLSDMA_QUEUE0_DUMMY1_BASE_IDX 0
+#define regLSDMA_QUEUE0_DUMMY2 0x00a3
+#define regLSDMA_QUEUE0_DUMMY2_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA0 0x00c0
+#define regLSDMA_QUEUE0_MIDCMD_DATA0_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA1 0x00c1
+#define regLSDMA_QUEUE0_MIDCMD_DATA1_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA2 0x00c2
+#define regLSDMA_QUEUE0_MIDCMD_DATA2_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA3 0x00c3
+#define regLSDMA_QUEUE0_MIDCMD_DATA3_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA4 0x00c4
+#define regLSDMA_QUEUE0_MIDCMD_DATA4_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA5 0x00c5
+#define regLSDMA_QUEUE0_MIDCMD_DATA5_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA6 0x00c6
+#define regLSDMA_QUEUE0_MIDCMD_DATA6_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA7 0x00c7
+#define regLSDMA_QUEUE0_MIDCMD_DATA7_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA8 0x00c8
+#define regLSDMA_QUEUE0_MIDCMD_DATA8_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA9 0x00c9
+#define regLSDMA_QUEUE0_MIDCMD_DATA9_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA10 0x00ca
+#define regLSDMA_QUEUE0_MIDCMD_DATA10_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_CNTL 0x00cb
+#define regLSDMA_QUEUE0_MIDCMD_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_CNTL 0x00d8
+#define regLSDMA_QUEUE1_RB_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_BASE 0x00d9
+#define regLSDMA_QUEUE1_RB_BASE_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_BASE_HI 0x00da
+#define regLSDMA_QUEUE1_RB_BASE_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_RPTR 0x00db
+#define regLSDMA_QUEUE1_RB_RPTR_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_RPTR_HI 0x00dc
+#define regLSDMA_QUEUE1_RB_RPTR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR 0x00dd
+#define regLSDMA_QUEUE1_RB_WPTR_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR_HI 0x00de
+#define regLSDMA_QUEUE1_RB_WPTR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_CNTL 0x00df
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI 0x00e0
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO 0x00e1
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_RPTR_ADDR_HI 0x00e2
+#define regLSDMA_QUEUE1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_RPTR_ADDR_LO 0x00e3
+#define regLSDMA_QUEUE1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_CNTL 0x00e4
+#define regLSDMA_QUEUE1_IB_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_RPTR 0x00e5
+#define regLSDMA_QUEUE1_IB_RPTR_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_OFFSET 0x00e6
+#define regLSDMA_QUEUE1_IB_OFFSET_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_BASE_LO 0x00e7
+#define regLSDMA_QUEUE1_IB_BASE_LO_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_BASE_HI 0x00e8
+#define regLSDMA_QUEUE1_IB_BASE_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_SIZE 0x00e9
+#define regLSDMA_QUEUE1_IB_SIZE_BASE_IDX 0
+#define regLSDMA_QUEUE1_SKIP_CNTL 0x00ea
+#define regLSDMA_QUEUE1_SKIP_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_CSA_ADDR_LO 0x00eb
+#define regLSDMA_QUEUE1_CSA_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE1_CSA_ADDR_HI 0x00ec
+#define regLSDMA_QUEUE1_CSA_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_AQL_CNTL 0x00ed
+#define regLSDMA_QUEUE1_RB_AQL_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_MINOR_PTR_UPDATE 0x00ee
+#define regLSDMA_QUEUE1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regLSDMA_QUEUE1_CNTL 0x00ef
+#define regLSDMA_QUEUE1_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_PREEMPT 0x00f0
+#define regLSDMA_QUEUE1_RB_PREEMPT_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_SUB_REMAIN 0x00f1
+#define regLSDMA_QUEUE1_IB_SUB_REMAIN_BASE_IDX 0
+#define regLSDMA_QUEUE1_PREEMPT 0x00f2
+#define regLSDMA_QUEUE1_PREEMPT_BASE_IDX 0
+#define regLSDMA_QUEUE1_CONTEXT_STATUS 0x00f3
+#define regLSDMA_QUEUE1_CONTEXT_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE1_STATUS 0x00f4
+#define regLSDMA_QUEUE1_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE1_DOORBELL 0x00f5
+#define regLSDMA_QUEUE1_DOORBELL_BASE_IDX 0
+#define regLSDMA_QUEUE1_DOORBELL_OFFSET 0x00f6
+#define regLSDMA_QUEUE1_DOORBELL_OFFSET_BASE_IDX 0
+#define regLSDMA_QUEUE1_DOORBELL_LOG 0x00f7
+#define regLSDMA_QUEUE1_DOORBELL_LOG_BASE_IDX 0
+#define regLSDMA_QUEUE1_WATERMARK 0x00f8
+#define regLSDMA_QUEUE1_WATERMARK_BASE_IDX 0
+#define regLSDMA_QUEUE1_DUMMY0 0x00f9
+#define regLSDMA_QUEUE1_DUMMY0_BASE_IDX 0
+#define regLSDMA_QUEUE1_DUMMY1 0x00fa
+#define regLSDMA_QUEUE1_DUMMY1_BASE_IDX 0
+#define regLSDMA_QUEUE1_DUMMY2 0x00fb
+#define regLSDMA_QUEUE1_DUMMY2_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA0 0x0118
+#define regLSDMA_QUEUE1_MIDCMD_DATA0_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA1 0x0119
+#define regLSDMA_QUEUE1_MIDCMD_DATA1_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA2 0x011a
+#define regLSDMA_QUEUE1_MIDCMD_DATA2_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA3 0x011b
+#define regLSDMA_QUEUE1_MIDCMD_DATA3_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA4 0x011c
+#define regLSDMA_QUEUE1_MIDCMD_DATA4_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA5 0x011d
+#define regLSDMA_QUEUE1_MIDCMD_DATA5_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA6 0x011e
+#define regLSDMA_QUEUE1_MIDCMD_DATA6_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA7 0x011f
+#define regLSDMA_QUEUE1_MIDCMD_DATA7_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA8 0x0120
+#define regLSDMA_QUEUE1_MIDCMD_DATA8_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA9 0x0121
+#define regLSDMA_QUEUE1_MIDCMD_DATA9_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA10 0x0122
+#define regLSDMA_QUEUE1_MIDCMD_DATA10_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_CNTL 0x0123
+#define regLSDMA_QUEUE1_MIDCMD_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h
new file mode 100644
index 000000000000..644a5d066ab2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h
@@ -0,0 +1,1411 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _lsdma_7_0_0_SH_MASK_HEADER
+#define _lsdma_7_0_0_SH_MASK_HEADER
+
+
+// addressBlock: lsdma0_lsdma0dec
+//LSDMA_UCODE_ADDR
+#define LSDMA_UCODE_ADDR__VALUE__SHIFT 0x0
+#define LSDMA_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//LSDMA_UCODE_DATA
+#define LSDMA_UCODE_DATA__VALUE__SHIFT 0x0
+#define LSDMA_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//LSDMA_ERROR_INJECT_CNTL
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_IRRITATION__SHIFT 0x0
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x1
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x2
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_MEMHUB_READ_POISON_INJECT__SHIFT 0x8
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_MEMHUB_ATOMIC_POISON_INJECT__SHIFT 0x9
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_IRRITATION_MASK 0x00000001L
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000002L
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_ERROR_INJECT_MASK 0x0000000CL
+//LSDMA_ERROR_INJECT_SELECT
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF0__SHIFT 0x0
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF1__SHIFT 0x1
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF2__SHIFT 0x2
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF3__SHIFT 0x3
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF4__SHIFT 0x4
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF5__SHIFT 0x5
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF6__SHIFT 0x6
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF7__SHIFT 0x7
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF8__SHIFT 0x8
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF9__SHIFT 0x9
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF10__SHIFT 0xa
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF11__SHIFT 0xb
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF12__SHIFT 0xc
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF13__SHIFT 0xd
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF14__SHIFT 0xe
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF15__SHIFT 0xf
+#define LSDMA_ERROR_INJECT_SELECT__UCODE_BUF__SHIFT 0x10
+#define LSDMA_ERROR_INJECT_SELECT__RB_CMD_BUF__SHIFT 0x11
+#define LSDMA_ERROR_INJECT_SELECT__IB_CMD_BUF__SHIFT 0x12
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RD_FIFO__SHIFT 0x13
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RDBST_FIFO__SHIFT 0x14
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_WR_FIFO__SHIFT 0x15
+#define LSDMA_ERROR_INJECT_SELECT__DATA_LUT_FIFO__SHIFT 0x16
+#define LSDMA_ERROR_INJECT_SELECT__SPLIT_DATA_FIFO__SHIFT 0x17
+#define LSDMA_ERROR_INJECT_SELECT__MC_WR_ADDR_FIFO__SHIFT 0x18
+#define LSDMA_ERROR_INJECT_SELECT__MC_RDRET_BUF__SHIFT 0x19
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF0_MASK 0x00000001L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF1_MASK 0x00000002L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF2_MASK 0x00000004L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF3_MASK 0x00000008L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF4_MASK 0x00000010L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF5_MASK 0x00000020L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF6_MASK 0x00000040L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF7_MASK 0x00000080L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF8_MASK 0x00000100L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF9_MASK 0x00000200L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF10_MASK 0x00000400L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF11_MASK 0x00000800L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF12_MASK 0x00001000L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF13_MASK 0x00002000L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF14_MASK 0x00004000L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF15_MASK 0x00008000L
+#define LSDMA_ERROR_INJECT_SELECT__UCODE_BUF_MASK 0x00010000L
+#define LSDMA_ERROR_INJECT_SELECT__RB_CMD_BUF_MASK 0x00020000L
+#define LSDMA_ERROR_INJECT_SELECT__IB_CMD_BUF_MASK 0x00040000L
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RD_FIFO_MASK 0x00080000L
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RDBST_FIFO_MASK 0x00100000L
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_WR_FIFO_MASK 0x00200000L
+#define LSDMA_ERROR_INJECT_SELECT__DATA_LUT_FIFO_MASK 0x00400000L
+#define LSDMA_ERROR_INJECT_SELECT__SPLIT_DATA_FIFO_MASK 0x00800000L
+#define LSDMA_ERROR_INJECT_SELECT__MC_WR_ADDR_FIFO_MASK 0x01000000L
+#define LSDMA_ERROR_INJECT_SELECT__MC_RDRET_BUF_MASK 0x02000000L
+#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_ADDR__SHIFT 0x0
+#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_DATA__SHIFT 0x1
+#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_ADDR_MASK 0x00000001L
+#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_DATA_MASK 0x00000002L
+#define LSDMA_PUB_REG_TYPE3__LSDMA_CLK_CTRL__SHIFT 0xb
+#define LSDMA_PUB_REG_TYPE3__LSDMA_CLK_CTRL_MASK 0x00000800L
+//LSDMA_CONTEXT_GROUP_BOUNDARY
+#define LSDMA_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define LSDMA_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//LSDMA_RB_RPTR_FETCH_HI
+#define LSDMA_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define LSDMA_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_SEM_WAIT_FAIL_TIMER_CNTL
+#define LSDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define LSDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//LSDMA_RB_RPTR_FETCH
+#define LSDMA_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define LSDMA_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//LSDMA_IB_OFFSET_FETCH
+#define LSDMA_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define LSDMA_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//LSDMA_PROGRAM
+#define LSDMA_PROGRAM__STREAM__SHIFT 0x0
+#define LSDMA_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//LSDMA_STATUS_REG
+#define LSDMA_STATUS_REG__IDLE__SHIFT 0x0
+#define LSDMA_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define LSDMA_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define LSDMA_STATUS_REG__RB_FULL__SHIFT 0x3
+#define LSDMA_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define LSDMA_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define LSDMA_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define LSDMA_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define LSDMA_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define LSDMA_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define LSDMA_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define LSDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define LSDMA_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define LSDMA_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define LSDMA_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define LSDMA_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define LSDMA_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define LSDMA_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define LSDMA_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define LSDMA_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define LSDMA_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define LSDMA_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define LSDMA_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define LSDMA_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define LSDMA_STATUS_REG__Reserved__SHIFT 0x18
+#define LSDMA_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define LSDMA_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define LSDMA_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define LSDMA_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define LSDMA_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define LSDMA_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define LSDMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define LSDMA_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define LSDMA_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define LSDMA_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define LSDMA_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define LSDMA_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define LSDMA_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define LSDMA_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define LSDMA_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define LSDMA_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define LSDMA_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define LSDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define LSDMA_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define LSDMA_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define LSDMA_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define LSDMA_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define LSDMA_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define LSDMA_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define LSDMA_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define LSDMA_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define LSDMA_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define LSDMA_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define LSDMA_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define LSDMA_STATUS_REG__Reserved_MASK 0x01000000L
+#define LSDMA_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define LSDMA_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define LSDMA_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define LSDMA_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define LSDMA_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define LSDMA_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//LSDMA_STATUS1_REG
+#define LSDMA_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define LSDMA_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define LSDMA_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define LSDMA_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define LSDMA_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define LSDMA_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define LSDMA_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define LSDMA_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define LSDMA_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define LSDMA_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define LSDMA_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define LSDMA_STATUS1_REG__CE_INFO_FULL__SHIFT 0xb
+#define LSDMA_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xc
+#define LSDMA_STATUS1_REG__EX_START__SHIFT 0xd
+#define LSDMA_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0xe
+#define LSDMA_STATUS1_REG__CE_RD_STALL__SHIFT 0xf
+#define LSDMA_STATUS1_REG__CE_WR_STALL__SHIFT 0x10
+#define LSDMA_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define LSDMA_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define LSDMA_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define LSDMA_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define LSDMA_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define LSDMA_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define LSDMA_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define LSDMA_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define LSDMA_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define LSDMA_STATUS1_REG__CE_INFO_FULL_MASK 0x00000800L
+#define LSDMA_STATUS1_REG__CE_INFO1_FULL_MASK 0x00001000L
+#define LSDMA_STATUS1_REG__EX_START_MASK 0x00002000L
+#define LSDMA_STATUS1_REG__CE_RD_STALL_MASK 0x00008000L
+#define LSDMA_STATUS1_REG__CE_WR_STALL_MASK 0x00010000L
+//LSDMA_RD_BURST_CNTL
+#define LSDMA_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define LSDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define LSDMA_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define LSDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//LSDMA_HBM_PAGE_CONFIG
+#define LSDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define LSDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//LSDMA_UCODE_CHECKSUM
+#define LSDMA_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define LSDMA_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//LSDMA_FREEZE
+#define LSDMA_FREEZE__PREEMPT__SHIFT 0x0
+#define LSDMA_FREEZE__FREEZE__SHIFT 0x4
+#define LSDMA_FREEZE__FROZEN__SHIFT 0x5
+#define LSDMA_FREEZE__F32_FREEZE__SHIFT 0x6
+#define LSDMA_FREEZE__PREEMPT_MASK 0x00000001L
+#define LSDMA_FREEZE__FREEZE_MASK 0x00000010L
+#define LSDMA_FREEZE__FROZEN_MASK 0x00000020L
+#define LSDMA_FREEZE__F32_FREEZE_MASK 0x00000040L
+//LSDMA_DCC_CNTL
+#define LSDMA_DCC_CNTL__DCC_FORCE_BYPASS__SHIFT 0x0
+#define LSDMA_DCC_CNTL__DCC_FORCE_BYPASS_MASK 0x00000001L
+//LSDMA_POWER_GATING
+#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_CONDITION__SHIFT 0x0
+#define LSDMA_POWER_GATING__LSDMA_POWER_ON_CONDITION__SHIFT 0x1
+#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_REQ__SHIFT 0x2
+#define LSDMA_POWER_GATING__LSDMA_POWER_ON_REQ__SHIFT 0x3
+#define LSDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
+#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_CONDITION_MASK 0x00000001L
+#define LSDMA_POWER_GATING__LSDMA_POWER_ON_CONDITION_MASK 0x00000002L
+#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_REQ_MASK 0x00000004L
+#define LSDMA_POWER_GATING__LSDMA_POWER_ON_REQ_MASK 0x00000008L
+#define LSDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
+//LSDMA_PGFSM_CONFIG
+#define LSDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
+#define LSDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
+#define LSDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
+#define LSDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
+#define LSDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
+#define LSDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
+#define LSDMA_PGFSM_CONFIG__READ__SHIFT 0xd
+#define LSDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
+#define LSDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
+#define LSDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
+#define LSDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
+#define LSDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
+#define LSDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
+#define LSDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
+#define LSDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
+#define LSDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
+#define LSDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
+#define LSDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
+//LSDMA_PGFSM_WRITE
+#define LSDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
+#define LSDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
+//LSDMA_PGFSM_READ
+#define LSDMA_PGFSM_READ__VALUE__SHIFT 0x0
+#define LSDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
+//LSDMA_BA_THRESHOLD
+#define LSDMA_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define LSDMA_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define LSDMA_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define LSDMA_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//LSDMA_ID
+#define LSDMA_ID__DEVICE_ID__SHIFT 0x0
+#define LSDMA_ID__DEVICE_ID_MASK 0x000000FFL
+//LSDMA_VERSION
+#define LSDMA_VERSION__MINVER__SHIFT 0x0
+#define LSDMA_VERSION__MAJVER__SHIFT 0x8
+#define LSDMA_VERSION__REV__SHIFT 0x10
+#define LSDMA_VERSION__MINVER_MASK 0x0000007FL
+#define LSDMA_VERSION__MAJVER_MASK 0x00007F00L
+#define LSDMA_VERSION__REV_MASK 0x003F0000L
+//LSDMA_EDC_COUNTER
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF0_SED__SHIFT 0x0
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF1_SED__SHIFT 0x2
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF2_SED__SHIFT 0x4
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF3_SED__SHIFT 0x6
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF4_SED__SHIFT 0x8
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF5_SED__SHIFT 0xa
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF6_SED__SHIFT 0xc
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF8_SED__SHIFT 0x10
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF9_SED__SHIFT 0x12
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF10_SED__SHIFT 0x14
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF11_SED__SHIFT 0x16
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF12_SED__SHIFT 0x18
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF13_SED__SHIFT 0x1a
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF14_SED__SHIFT 0x1c
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF15_SED__SHIFT 0x1e
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF0_SED_MASK 0x00000003L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF1_SED_MASK 0x0000000CL
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF2_SED_MASK 0x00000030L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF3_SED_MASK 0x000000C0L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF4_SED_MASK 0x00000300L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF5_SED_MASK 0x00000C00L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF6_SED_MASK 0x00003000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF7_SED_MASK 0x0000C000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF8_SED_MASK 0x00030000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF9_SED_MASK 0x000C0000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF10_SED_MASK 0x00300000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF11_SED_MASK 0x00C00000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF12_SED_MASK 0x03000000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF13_SED_MASK 0x0C000000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF14_SED_MASK 0x30000000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF15_SED_MASK 0xC0000000L
+//LSDMA_EDC_COUNTER2
+#define LSDMA_EDC_COUNTER2__LSDMA_UCODE_BUF_SED__SHIFT 0x0
+#define LSDMA_EDC_COUNTER2__LSDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define LSDMA_EDC_COUNTER2__LSDMA_IB_CMD_BUF_SED__SHIFT 0x4
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RD_FIFO_SED__SHIFT 0x6
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x8
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_WR_FIFO_SED__SHIFT 0xa
+#define LSDMA_EDC_COUNTER2__LSDMA_DATA_LUT_FIFO_SED__SHIFT 0xc
+#define LSDMA_EDC_COUNTER2__LSDMA_SPLIT_DATA_BUF_SED__SHIFT 0xe
+#define LSDMA_EDC_COUNTER2__LSDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define LSDMA_EDC_COUNTER2__LSDMA_MC_RDRET_BUF_SED__SHIFT 0x12
+#define LSDMA_EDC_COUNTER2__LSDMA_UCODE_BUF_SED_MASK 0x00000003L
+#define LSDMA_EDC_COUNTER2__LSDMA_RB_CMD_BUF_SED_MASK 0x0000000CL
+#define LSDMA_EDC_COUNTER2__LSDMA_IB_CMD_BUF_SED_MASK 0x00000030L
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RD_FIFO_SED_MASK 0x000000C0L
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000300L
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_WR_FIFO_SED_MASK 0x00000C00L
+#define LSDMA_EDC_COUNTER2__LSDMA_DATA_LUT_FIFO_SED_MASK 0x00003000L
+#define LSDMA_EDC_COUNTER2__LSDMA_SPLIT_DATA_BUF_SED_MASK 0x0000C000L
+#define LSDMA_EDC_COUNTER2__LSDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00030000L
+#define LSDMA_EDC_COUNTER2__LSDMA_MC_RDRET_BUF_SED_MASK 0x000C0000L
+//LSDMA_STATUS2_REG
+#define LSDMA_STATUS2_REG__ID__SHIFT 0x0
+#define LSDMA_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define LSDMA_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define LSDMA_STATUS2_REG__ID_MASK 0x00000007L
+#define LSDMA_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define LSDMA_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//LSDMA_ATOMIC_CNTL
+#define LSDMA_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define LSDMA_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+//LSDMA_ATOMIC_PREOP_LO
+#define LSDMA_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define LSDMA_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//LSDMA_ATOMIC_PREOP_HI
+#define LSDMA_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define LSDMA_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_CNTL
+#define LSDMA_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define LSDMA_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define LSDMA_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define LSDMA_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define LSDMA_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define LSDMA_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define LSDMA_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define LSDMA_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define LSDMA_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define LSDMA_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define LSDMA_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define LSDMA_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//LSDMA_UTCL1_WATERMK
+#define LSDMA_UTCL1_WATERMK__REQ_WATERMK__SHIFT 0x0
+#define LSDMA_UTCL1_WATERMK__REQ_DEPTH__SHIFT 0x3
+#define LSDMA_UTCL1_WATERMK__PAGE_WATERMK__SHIFT 0x5
+#define LSDMA_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x8
+#define LSDMA_UTCL1_WATERMK__RESERVED__SHIFT 0x10
+#define LSDMA_UTCL1_WATERMK__REQ_WATERMK_MASK 0x00000007L
+#define LSDMA_UTCL1_WATERMK__REQ_DEPTH_MASK 0x00000018L
+#define LSDMA_UTCL1_WATERMK__PAGE_WATERMK_MASK 0x000000E0L
+#define LSDMA_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x0000FF00L
+#define LSDMA_UTCL1_WATERMK__RESERVED_MASK 0xFFFF0000L
+//LSDMA_UTCL1_RD_STATUS
+#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define LSDMA_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define LSDMA_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define LSDMA_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define LSDMA_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define LSDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define LSDMA_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define LSDMA_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define LSDMA_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define LSDMA_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define LSDMA_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define LSDMA_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define LSDMA_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define LSDMA_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define LSDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define LSDMA_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define LSDMA_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define LSDMA_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define LSDMA_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//LSDMA_UTCL1_WR_STATUS
+#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define LSDMA_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define LSDMA_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define LSDMA_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define LSDMA_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define LSDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define LSDMA_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define LSDMA_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define LSDMA_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define LSDMA_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define LSDMA_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define LSDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define LSDMA_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//LSDMA_UTCL1_INV0
+#define LSDMA_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define LSDMA_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define LSDMA_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define LSDMA_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define LSDMA_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define LSDMA_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define LSDMA_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define LSDMA_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define LSDMA_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define LSDMA_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define LSDMA_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define LSDMA_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define LSDMA_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define LSDMA_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define LSDMA_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define LSDMA_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define LSDMA_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define LSDMA_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define LSDMA_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define LSDMA_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define LSDMA_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define LSDMA_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define LSDMA_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define LSDMA_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define LSDMA_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define LSDMA_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define LSDMA_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define LSDMA_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//LSDMA_UTCL1_INV1
+#define LSDMA_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define LSDMA_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_INV2
+#define LSDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define LSDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_RD_XNACK0
+#define LSDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define LSDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_RD_XNACK1
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define LSDMA_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define LSDMA_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//LSDMA_UTCL1_WR_XNACK0
+#define LSDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define LSDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_WR_XNACK1
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define LSDMA_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define LSDMA_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//LSDMA_UTCL1_TIMEOUT
+#define LSDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define LSDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define LSDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define LSDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//LSDMA_UTCL1_PAGE
+#define LSDMA_UTCL1_PAGE__INVALID_ADDR__SHIFT 0x0
+#define LSDMA_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define LSDMA_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define LSDMA_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define LSDMA_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define LSDMA_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define LSDMA_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define LSDMA_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define LSDMA_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//LSDMA_RELAX_ORDERING_LUT
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define LSDMA_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define LSDMA_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define LSDMA_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define LSDMA_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define LSDMA_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define LSDMA_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define LSDMA_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define LSDMA_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define LSDMA_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define LSDMA_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define LSDMA_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define LSDMA_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define LSDMA_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define LSDMA_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define LSDMA_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define LSDMA_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define LSDMA_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define LSDMA_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define LSDMA_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define LSDMA_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define LSDMA_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define LSDMA_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define LSDMA_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define LSDMA_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define LSDMA_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define LSDMA_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define LSDMA_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define LSDMA_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//LSDMA_CHICKEN_BITS_2
+#define LSDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define LSDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define LSDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define LSDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+//LSDMA_STATUS3_REG
+#define LSDMA_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define LSDMA_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define LSDMA_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define LSDMA_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define LSDMA_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define LSDMA_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define LSDMA_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define LSDMA_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define LSDMA_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define LSDMA_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//LSDMA_PHYSICAL_ADDR_LO
+#define LSDMA_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define LSDMA_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define LSDMA_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define LSDMA_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define LSDMA_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define LSDMA_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define LSDMA_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define LSDMA_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//LSDMA_PHYSICAL_ADDR_HI
+#define LSDMA_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//LSDMA_ECC_CNTL
+#define LSDMA_ECC_CNTL__ECC_DISABLE__SHIFT 0x0
+#define LSDMA_ECC_CNTL__ECC_DISABLE_MASK 0x00000001L
+//LSDMA_ERROR_LOG
+#define LSDMA_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define LSDMA_ERROR_LOG__STATUS__SHIFT 0x10
+#define LSDMA_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define LSDMA_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//LSDMA_PUB_DUMMY0
+#define LSDMA_PUB_DUMMY0__DUMMY__SHIFT 0x0
+#define LSDMA_PUB_DUMMY0__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_PUB_DUMMY1
+#define LSDMA_PUB_DUMMY1__DUMMY__SHIFT 0x0
+#define LSDMA_PUB_DUMMY1__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_PUB_DUMMY2
+#define LSDMA_PUB_DUMMY2__DUMMY__SHIFT 0x0
+#define LSDMA_PUB_DUMMY2__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_PUB_DUMMY3
+#define LSDMA_PUB_DUMMY3__DUMMY__SHIFT 0x0
+#define LSDMA_PUB_DUMMY3__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_F32_COUNTER
+#define LSDMA_F32_COUNTER__VALUE__SHIFT 0x0
+#define LSDMA_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//LSDMA_PERFCNT_PERFCOUNTER0_CFG
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//LSDMA_PERFCNT_PERFCOUNTER1_CFG
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//LSDMA_PERFCNT_MISC_CNTL
+#define LSDMA_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define LSDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT__SHIFT 0x10
+#define LSDMA_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+#define LSDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT_MASK 0x00010000L
+//LSDMA_PERFCNT_PERFCOUNTER_LO
+#define LSDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//LSDMA_PERFCNT_PERFCOUNTER_HI
+#define LSDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define LSDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define LSDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//LSDMA_CRD_CNTL
+#define LSDMA_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define LSDMA_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define LSDMA_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define LSDMA_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define LSDMA_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//LSDMA_ULV_CNTL
+#define LSDMA_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define LSDMA_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define LSDMA_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define LSDMA_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define LSDMA_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define LSDMA_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define LSDMA_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define LSDMA_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define LSDMA_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define LSDMA_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define LSDMA_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define LSDMA_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//LSDMA_EA_DBIT_ADDR_DATA
+#define LSDMA_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define LSDMA_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//LSDMA_EA_DBIT_ADDR_INDEX
+#define LSDMA_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define LSDMA_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//LSDMA_STATUS4_REG
+#define LSDMA_STATUS4_REG__IDLE__SHIFT 0x0
+#define LSDMA_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define LSDMA_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define LSDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING__SHIFT 0x4
+#define LSDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING__SHIFT 0x5
+#define LSDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x6
+#define LSDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x7
+#define LSDMA_STATUS4_REG__REG_POLLING__SHIFT 0x8
+#define LSDMA_STATUS4_REG__MEM_POLLING__SHIFT 0x9
+#define LSDMA_STATUS4_REG__UTCL2_RD_XNACK__SHIFT 0xa
+#define LSDMA_STATUS4_REG__UTCL2_WR_XNACK__SHIFT 0xc
+#define LSDMA_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0xe
+#define LSDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x12
+#define LSDMA_STATUS4_REG__SRIOV_LSDMA_EXECUTING_CMD__SHIFT 0x13
+#define LSDMA_STATUS4_REG__IDLE_MASK 0x00000001L
+#define LSDMA_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define LSDMA_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define LSDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING_MASK 0x00000010L
+#define LSDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING_MASK 0x00000020L
+#define LSDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000040L
+#define LSDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000080L
+#define LSDMA_STATUS4_REG__REG_POLLING_MASK 0x00000100L
+#define LSDMA_STATUS4_REG__MEM_POLLING_MASK 0x00000200L
+#define LSDMA_STATUS4_REG__UTCL2_RD_XNACK_MASK 0x00000C00L
+#define LSDMA_STATUS4_REG__UTCL2_WR_XNACK_MASK 0x00003000L
+#define LSDMA_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x0003C000L
+#define LSDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00040000L
+#define LSDMA_STATUS4_REG__SRIOV_LSDMA_EXECUTING_CMD_MASK 0x00080000L
+//LSDMA_CE_CTRL
+#define LSDMA_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define LSDMA_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define LSDMA_CE_CTRL__RESERVED_7_5__SHIFT 0x5
+#define LSDMA_CE_CTRL__RESERVED__SHIFT 0x8
+#define LSDMA_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define LSDMA_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define LSDMA_CE_CTRL__RESERVED_7_5_MASK 0x000000E0L
+#define LSDMA_CE_CTRL__RESERVED_MASK 0xFFFFFF00L
+//LSDMA_EXCEPTION_STATUS
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_ECC__SHIFT 0x2
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_ECC__SHIFT 0x3
+#define LSDMA_EXCEPTION_STATUS__SRAM_ECC__SHIFT 0x6
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_GEN_ERR__SHIFT 0x8
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_GEN_ERR__SHIFT 0x9
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_GEN_ERR__SHIFT 0xa
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_GEN_ERR__SHIFT 0xb
+#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_GEN_ERR__SHIFT 0xd
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_PRT__SHIFT 0x10
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_PRT__SHIFT 0x11
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_PRT__SHIFT 0x12
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_PRT__SHIFT 0x13
+#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_PRT__SHIFT 0x15
+#define LSDMA_EXCEPTION_STATUS__INVALID_ADDR__SHIFT 0x18
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_ECC_MASK 0x00000004L
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_ECC_MASK 0x00000008L
+#define LSDMA_EXCEPTION_STATUS__SRAM_ECC_MASK 0x00000040L
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_GEN_ERR_MASK 0x00000100L
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_GEN_ERR_MASK 0x00000200L
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_GEN_ERR_MASK 0x00000400L
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_GEN_ERR_MASK 0x00000800L
+#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_GEN_ERR_MASK 0x00002000L
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_PRT_MASK 0x00010000L
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_PRT_MASK 0x00020000L
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_PRT_MASK 0x00040000L
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_PRT_MASK 0x00080000L
+#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_PRT_MASK 0x00200000L
+//LSDMA_INT_CNTL
+#define LSDMA_INT_CNTL__ATOMIC_RTN_DONE_INT_ENABLE__SHIFT 0x0
+#define LSDMA_INT_CNTL__TRAP_INT_ENABLE__SHIFT 0x1
+#define LSDMA_INT_CNTL__SRBM_WRITE_INT_ENABLE__SHIFT 0x2
+#define LSDMA_INT_CNTL__CTX_EMPTY_INT_ENABLE__SHIFT 0x3
+#define LSDMA_INT_CNTL__FROZEN_INT_ENABLE__SHIFT 0x4
+#define LSDMA_INT_CNTL__PREEMPT_INT_ENABLE__SHIFT 0x5
+#define LSDMA_INT_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x6
+#define LSDMA_INT_CNTL__ATOMIC_TIMEOUT_INT_ENABLE__SHIFT 0x7
+#define LSDMA_INT_CNTL__POLL_TIMEOUT_INT_ENABLE__SHIFT 0x8
+#define LSDMA_INT_CNTL__INVALID_ADDR_INT_ENABLE__SHIFT 0x9
+#define LSDMA_INT_CNTL__NACK_GEN_ERR_INT_ENABLE__SHIFT 0xa
+#define LSDMA_INT_CNTL__NACK_PRT_INT_ENABLE__SHIFT 0xb
+#define LSDMA_INT_CNTL__ECC_INT_ENABLE__SHIFT 0xc
+#define LSDMA_INT_CNTL__ATOMIC_RTN_DONE_INT_ENABLE_MASK 0x00000001L
+#define LSDMA_INT_CNTL__TRAP_INT_ENABLE_MASK 0x00000002L
+#define LSDMA_INT_CNTL__SRBM_WRITE_INT_ENABLE_MASK 0x00000004L
+#define LSDMA_INT_CNTL__CTX_EMPTY_INT_ENABLE_MASK 0x00000008L
+#define LSDMA_INT_CNTL__FROZEN_INT_ENABLE_MASK 0x00000010L
+#define LSDMA_INT_CNTL__PREEMPT_INT_ENABLE_MASK 0x00000020L
+#define LSDMA_INT_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x00000040L
+#define LSDMA_INT_CNTL__ATOMIC_TIMEOUT_INT_ENABLE_MASK 0x00000080L
+#define LSDMA_INT_CNTL__POLL_TIMEOUT_INT_ENABLE_MASK 0x00000100L
+#define LSDMA_INT_CNTL__NACK_GEN_ERR_INT_ENABLE_MASK 0x00000400L
+#define LSDMA_INT_CNTL__NACK_PRT_INT_ENABLE_MASK 0x00000800L
+#define LSDMA_INT_CNTL__ECC_INT_ENABLE_MASK 0x00001000L
+//LSDMA_MEM_POWER_CTRL
+#define LSDMA_MEM_POWER_CTRL__MEM_POWER_CTRL_EN__SHIFT 0x0
+#define LSDMA_MEM_POWER_CTRL__MEM_POWER_CTRL_EN_MASK 0x00000001L
+//LSDMA_CLK_CTRL
+#define LSDMA_CLK_CTRL__RESERVED__SHIFT 0x1
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define LSDMA_CLK_CTRL__RESERVED_MASK 0x00FFFFFEL
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//LSDMA_CNTL
+#define LSDMA_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define LSDMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define LSDMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define LSDMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define LSDMA_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define LSDMA_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x6
+#define LSDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define LSDMA_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define LSDMA_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define LSDMA_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define LSDMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define LSDMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define LSDMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define LSDMA_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define LSDMA_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000040L
+#define LSDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define LSDMA_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+//LSDMA_CHICKEN_BITS
+#define LSDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define LSDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define LSDMA_CHICKEN_BITS__F32_MGCG_ENABLE__SHIFT 0x3
+#define LSDMA_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define LSDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define LSDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define LSDMA_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define LSDMA_CHICKEN_BITS__T2L_256B_ENABLE__SHIFT 0x12
+#define LSDMA_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define LSDMA_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define LSDMA_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL__SHIFT 0x18
+#define LSDMA_CHICKEN_BITS__DRAM_ECC_NACK_F32_RESET_ENABLE__SHIFT 0x19
+#define LSDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define LSDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define LSDMA_CHICKEN_BITS__F32_MGCG_ENABLE_MASK 0x00000008L
+#define LSDMA_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define LSDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define LSDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define LSDMA_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define LSDMA_CHICKEN_BITS__T2L_256B_ENABLE_MASK 0x00040000L
+#define LSDMA_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define LSDMA_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+//LSDMA_PIO_SRC_ADDR_LO
+#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO__SHIFT 0x0
+#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_PIO_SRC_ADDR_HI
+#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0xFFFFFFFFL
+//LSDMA_PIO_DST_ADDR_LO
+#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO__SHIFT 0x0
+#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_PIO_DST_ADDR_HI
+#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI_MASK 0xFFFFFFFFL
+//LSDMA_PIO_COMMAND
+#define LSDMA_PIO_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define LSDMA_PIO_COMMAND__SRC_LOCATION__SHIFT 0x1a
+#define LSDMA_PIO_COMMAND__DST_LOCATION__SHIFT 0x1b
+#define LSDMA_PIO_COMMAND__SRC_ADDR_INC__SHIFT 0x1c
+#define LSDMA_PIO_COMMAND__DST_ADDR_INC__SHIFT 0x1d
+#define LSDMA_PIO_COMMAND__OVERLAP_DISABLE__SHIFT 0x1e
+#define LSDMA_PIO_COMMAND__CONSTANT_FILL__SHIFT 0x1f
+#define LSDMA_PIO_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define LSDMA_PIO_COMMAND__SRC_LOCATION_MASK 0x04000000L
+#define LSDMA_PIO_COMMAND__DST_LOCATION_MASK 0x08000000L
+#define LSDMA_PIO_COMMAND__SRC_ADDR_INC_MASK 0x10000000L
+#define LSDMA_PIO_COMMAND__DST_ADDR_INC_MASK 0x20000000L
+#define LSDMA_PIO_COMMAND__OVERLAP_DISABLE_MASK 0x40000000L
+#define LSDMA_PIO_COMMAND__CONSTANT_FILL_MASK 0x80000000L
+//LSDMA_PIO_CONSTFILL_DATA
+#define LSDMA_PIO_CONSTFILL_DATA__DATA__SHIFT 0x0
+#define LSDMA_PIO_CONSTFILL_DATA__DATA_MASK 0xFFFFFFFFL
+//LSDMA_PIO_CONTROL
+#define LSDMA_PIO_CONTROL__VMID__SHIFT 0x0
+#define LSDMA_PIO_CONTROL__DST_GPA__SHIFT 0x4
+#define LSDMA_PIO_CONTROL__DST_SYS__SHIFT 0x5
+#define LSDMA_PIO_CONTROL__DST_GCC__SHIFT 0x6
+#define LSDMA_PIO_CONTROL__DST_SNOOP__SHIFT 0x7
+#define LSDMA_PIO_CONTROL__DST_REUSE_HINT__SHIFT 0x8
+#define LSDMA_PIO_CONTROL__DST_COMP_EN__SHIFT 0xa
+#define LSDMA_PIO_CONTROL__SRC_GPA__SHIFT 0x14
+#define LSDMA_PIO_CONTROL__SRC_SYS__SHIFT 0x15
+#define LSDMA_PIO_CONTROL__SRC_SNOOP__SHIFT 0x17
+#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT__SHIFT 0x18
+#define LSDMA_PIO_CONTROL__SRC_COMP_EN__SHIFT 0x1a
+#define LSDMA_PIO_CONTROL__VMID_MASK 0x0000000FL
+#define LSDMA_PIO_CONTROL__DST_GPA_MASK 0x00000010L
+#define LSDMA_PIO_CONTROL__DST_SYS_MASK 0x00000020L
+#define LSDMA_PIO_CONTROL__DST_GCC_MASK 0x00000040L
+#define LSDMA_PIO_CONTROL__DST_SNOOP_MASK 0x00000080L
+#define LSDMA_PIO_CONTROL__DST_REUSE_HINT_MASK 0x00000300L
+#define LSDMA_PIO_CONTROL__DST_COMP_EN_MASK 0x00000400L
+#define LSDMA_PIO_CONTROL__SRC_GPA_MASK 0x00100000L
+#define LSDMA_PIO_CONTROL__SRC_SYS_MASK 0x00200000L
+#define LSDMA_PIO_CONTROL__SRC_SNOOP_MASK 0x00800000L
+#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT_MASK 0x03000000L
+#define LSDMA_PIO_CONTROL__SRC_COMP_EN_MASK 0x04000000L
+//LSDMA_PIO_STATUS
+#define LSDMA_PIO_STATUS__CMD_IN_FIFO__SHIFT 0x0
+#define LSDMA_PIO_STATUS__CMD_PROCESSING__SHIFT 0x3
+#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR__SHIFT 0x8
+#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT__SHIFT 0x9
+#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC__SHIFT 0xa
+#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC__SHIFT 0xb
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR__SHIFT 0xf
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR__SHIFT 0x10
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT__SHIFT 0x11
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT__SHIFT 0x12
+#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define LSDMA_PIO_STATUS__PIO_FIFO_FULL__SHIFT 0x1d
+#define LSDMA_PIO_STATUS__PIO_IDLE__SHIFT 0x1f
+#define LSDMA_PIO_STATUS__CMD_IN_FIFO_MASK 0x00000007L
+#define LSDMA_PIO_STATUS__CMD_PROCESSING_MASK 0x000000F8L
+#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR_MASK 0x00000100L
+#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT_MASK 0x00000200L
+#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC_MASK 0x00000400L
+#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC_MASK 0x00000800L
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR_MASK 0x00008000L
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR_MASK 0x00010000L
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT_MASK 0x00020000L
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT_MASK 0x00040000L
+#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define LSDMA_PIO_STATUS__PIO_FIFO_FULL_MASK 0x20000000L
+#define LSDMA_PIO_STATUS__PIO_IDLE_MASK 0x80000000L
+//LSDMA_PF_PIO_STATUS
+#define LSDMA_PF_PIO_STATUS__CMD_IN_FIFO__SHIFT 0x0
+#define LSDMA_PF_PIO_STATUS__CMD_PROCESSING__SHIFT 0x3
+#define LSDMA_PF_PIO_STATUS__ERROR_INVALID_ADDR__SHIFT 0x8
+#define LSDMA_PF_PIO_STATUS__ERROR_ZERO_COUNT__SHIFT 0x9
+#define LSDMA_PF_PIO_STATUS__ERROR_DRAM_ECC__SHIFT 0xa
+#define LSDMA_PF_PIO_STATUS__ERROR_SRAM_ECC__SHIFT 0xb
+#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR__SHIFT 0xf
+#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR__SHIFT 0x10
+#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_PRT__SHIFT 0x11
+#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_PRT__SHIFT 0x12
+#define LSDMA_PF_PIO_STATUS__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define LSDMA_PF_PIO_STATUS__PIO_FIFO_FULL__SHIFT 0x1d
+#define LSDMA_PF_PIO_STATUS__PIO_IDLE__SHIFT 0x1f
+#define LSDMA_PF_PIO_STATUS__CMD_IN_FIFO_MASK 0x00000007L
+#define LSDMA_PF_PIO_STATUS__CMD_PROCESSING_MASK 0x000000F8L
+#define LSDMA_PF_PIO_STATUS__ERROR_ZERO_COUNT_MASK 0x00000200L
+#define LSDMA_PF_PIO_STATUS__ERROR_DRAM_ECC_MASK 0x00000400L
+#define LSDMA_PF_PIO_STATUS__ERROR_SRAM_ECC_MASK 0x00000800L
+#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR_MASK 0x00008000L
+#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR_MASK 0x00010000L
+#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_PRT_MASK 0x00020000L
+#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_PRT_MASK 0x00040000L
+#define LSDMA_PF_PIO_STATUS__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define LSDMA_PF_PIO_STATUS__PIO_FIFO_FULL_MASK 0x20000000L
+#define LSDMA_PF_PIO_STATUS__PIO_IDLE_MASK 0x80000000L
+//LSDMA_QUEUE0_RB_CNTL
+#define LSDMA_QUEUE0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define LSDMA_QUEUE0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define LSDMA_QUEUE0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define LSDMA_QUEUE0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define LSDMA_QUEUE0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define LSDMA_QUEUE0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define LSDMA_QUEUE0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//LSDMA_QUEUE0_RB_BASE
+#define LSDMA_QUEUE0_RB_BASE__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_BASE_HI
+#define LSDMA_QUEUE0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//LSDMA_QUEUE0_RB_RPTR
+#define LSDMA_QUEUE0_RB_RPTR__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_RPTR_HI
+#define LSDMA_QUEUE0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_WPTR
+#define LSDMA_QUEUE0_RB_WPTR__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_WPTR_HI
+#define LSDMA_QUEUE0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_WPTR_POLL_CNTL
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI
+#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO
+#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE0_RB_RPTR_ADDR_HI
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_RPTR_ADDR_LO
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE0_IB_CNTL
+#define LSDMA_QUEUE0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define LSDMA_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define LSDMA_QUEUE0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define LSDMA_QUEUE0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define LSDMA_QUEUE0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define LSDMA_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define LSDMA_QUEUE0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//LSDMA_QUEUE0_IB_RPTR
+#define LSDMA_QUEUE0_IB_RPTR__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//LSDMA_QUEUE0_IB_OFFSET
+#define LSDMA_QUEUE0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//LSDMA_QUEUE0_IB_BASE_LO
+#define LSDMA_QUEUE0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define LSDMA_QUEUE0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//LSDMA_QUEUE0_IB_BASE_HI
+#define LSDMA_QUEUE0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_IB_SIZE
+#define LSDMA_QUEUE0_IB_SIZE__SIZE__SHIFT 0x0
+#define LSDMA_QUEUE0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//LSDMA_QUEUE0_SKIP_CNTL
+#define LSDMA_QUEUE0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define LSDMA_QUEUE0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//LSDMA_QUEUE0_CSA_ADDR_LO
+#define LSDMA_QUEUE0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE0_CSA_ADDR_HI
+#define LSDMA_QUEUE0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_AQL_CNTL
+#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define LSDMA_QUEUE0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define LSDMA_QUEUE0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//LSDMA_QUEUE0_MINOR_PTR_UPDATE
+#define LSDMA_QUEUE0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//LSDMA_QUEUE0_CNTL
+#define LSDMA_QUEUE0_CNTL__QUANTUM__SHIFT 0x0
+#define LSDMA_QUEUE0_CNTL__QUANTUM_MASK 0x000000FFL
+//LSDMA_QUEUE0_RB_PREEMPT
+#define LSDMA_QUEUE0_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//LSDMA_QUEUE0_IB_SUB_REMAIN
+#define LSDMA_QUEUE0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define LSDMA_QUEUE0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//LSDMA_QUEUE0_PREEMPT
+#define LSDMA_QUEUE0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define LSDMA_QUEUE0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//LSDMA_QUEUE0_CONTEXT_STATUS
+#define LSDMA_QUEUE0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define LSDMA_QUEUE0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define LSDMA_QUEUE0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define LSDMA_QUEUE0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define LSDMA_QUEUE0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//LSDMA_QUEUE0_STATUS
+#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//LSDMA_QUEUE0_DOORBELL
+#define LSDMA_QUEUE0_DOORBELL__ENABLE__SHIFT 0x1c
+#define LSDMA_QUEUE0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define LSDMA_QUEUE0_DOORBELL__ENABLE_MASK 0x10000000L
+#define LSDMA_QUEUE0_DOORBELL__CAPTURED_MASK 0x40000000L
+//LSDMA_QUEUE0_DOORBELL_OFFSET
+#define LSDMA_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//LSDMA_QUEUE0_DOORBELL_LOG
+#define LSDMA_QUEUE0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define LSDMA_QUEUE0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define LSDMA_QUEUE0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define LSDMA_QUEUE0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE0_WATERMARK
+#define LSDMA_QUEUE0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define LSDMA_QUEUE0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define LSDMA_QUEUE0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define LSDMA_QUEUE0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//LSDMA_QUEUE0_DUMMY0
+#define LSDMA_QUEUE0_DUMMY0__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE0_DUMMY0__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_DUMMY1
+#define LSDMA_QUEUE0_DUMMY1__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE0_DUMMY1__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_DUMMY2
+#define LSDMA_QUEUE0_DUMMY2__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE0_DUMMY2__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA0
+#define LSDMA_QUEUE0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA1
+#define LSDMA_QUEUE0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA2
+#define LSDMA_QUEUE0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA3
+#define LSDMA_QUEUE0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA4
+#define LSDMA_QUEUE0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA5
+#define LSDMA_QUEUE0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA6
+#define LSDMA_QUEUE0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA7
+#define LSDMA_QUEUE0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA8
+#define LSDMA_QUEUE0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA9
+#define LSDMA_QUEUE0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA10
+#define LSDMA_QUEUE0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_CNTL
+#define LSDMA_QUEUE0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define LSDMA_QUEUE0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define LSDMA_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define LSDMA_QUEUE0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define LSDMA_QUEUE0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define LSDMA_QUEUE0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define LSDMA_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//LSDMA_QUEUE1_RB_CNTL
+#define LSDMA_QUEUE1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define LSDMA_QUEUE1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define LSDMA_QUEUE1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define LSDMA_QUEUE1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define LSDMA_QUEUE1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define LSDMA_QUEUE1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define LSDMA_QUEUE1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//LSDMA_QUEUE1_RB_BASE
+#define LSDMA_QUEUE1_RB_BASE__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_BASE_HI
+#define LSDMA_QUEUE1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//LSDMA_QUEUE1_RB_RPTR
+#define LSDMA_QUEUE1_RB_RPTR__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_RPTR_HI
+#define LSDMA_QUEUE1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_WPTR
+#define LSDMA_QUEUE1_RB_WPTR__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_WPTR_HI
+#define LSDMA_QUEUE1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_WPTR_POLL_CNTL
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI
+#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO
+#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE1_RB_RPTR_ADDR_HI
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_RPTR_ADDR_LO
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE1_IB_CNTL
+#define LSDMA_QUEUE1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define LSDMA_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define LSDMA_QUEUE1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define LSDMA_QUEUE1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define LSDMA_QUEUE1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define LSDMA_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define LSDMA_QUEUE1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//LSDMA_QUEUE1_IB_RPTR
+#define LSDMA_QUEUE1_IB_RPTR__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//LSDMA_QUEUE1_IB_OFFSET
+#define LSDMA_QUEUE1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//LSDMA_QUEUE1_IB_BASE_LO
+#define LSDMA_QUEUE1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define LSDMA_QUEUE1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//LSDMA_QUEUE1_IB_BASE_HI
+#define LSDMA_QUEUE1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_IB_SIZE
+#define LSDMA_QUEUE1_IB_SIZE__SIZE__SHIFT 0x0
+#define LSDMA_QUEUE1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//LSDMA_QUEUE1_SKIP_CNTL
+#define LSDMA_QUEUE1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define LSDMA_QUEUE1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//LSDMA_QUEUE1_CSA_ADDR_LO
+#define LSDMA_QUEUE1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE1_CSA_ADDR_HI
+#define LSDMA_QUEUE1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_AQL_CNTL
+#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define LSDMA_QUEUE1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define LSDMA_QUEUE1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//LSDMA_QUEUE1_MINOR_PTR_UPDATE
+#define LSDMA_QUEUE1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//LSDMA_QUEUE1_CNTL
+#define LSDMA_QUEUE1_CNTL__QUANTUM__SHIFT 0x0
+#define LSDMA_QUEUE1_CNTL__QUANTUM_MASK 0x000000FFL
+//LSDMA_QUEUE1_RB_PREEMPT
+#define LSDMA_QUEUE1_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//LSDMA_QUEUE1_IB_SUB_REMAIN
+#define LSDMA_QUEUE1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define LSDMA_QUEUE1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//LSDMA_QUEUE1_PREEMPT
+#define LSDMA_QUEUE1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define LSDMA_QUEUE1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//LSDMA_QUEUE1_CONTEXT_STATUS
+#define LSDMA_QUEUE1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define LSDMA_QUEUE1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define LSDMA_QUEUE1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define LSDMA_QUEUE1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define LSDMA_QUEUE1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//LSDMA_QUEUE1_STATUS
+#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//LSDMA_QUEUE1_DOORBELL
+#define LSDMA_QUEUE1_DOORBELL__ENABLE__SHIFT 0x1c
+#define LSDMA_QUEUE1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define LSDMA_QUEUE1_DOORBELL__ENABLE_MASK 0x10000000L
+#define LSDMA_QUEUE1_DOORBELL__CAPTURED_MASK 0x40000000L
+//LSDMA_QUEUE1_DOORBELL_OFFSET
+#define LSDMA_QUEUE1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//LSDMA_QUEUE1_DOORBELL_LOG
+#define LSDMA_QUEUE1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define LSDMA_QUEUE1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define LSDMA_QUEUE1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define LSDMA_QUEUE1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE1_WATERMARK
+#define LSDMA_QUEUE1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define LSDMA_QUEUE1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define LSDMA_QUEUE1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define LSDMA_QUEUE1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//LSDMA_QUEUE1_DUMMY0
+#define LSDMA_QUEUE1_DUMMY0__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE1_DUMMY0__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_DUMMY1
+#define LSDMA_QUEUE1_DUMMY1__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE1_DUMMY1__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_DUMMY2
+#define LSDMA_QUEUE1_DUMMY2__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE1_DUMMY2__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA0
+#define LSDMA_QUEUE1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA1
+#define LSDMA_QUEUE1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA2
+#define LSDMA_QUEUE1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA3
+#define LSDMA_QUEUE1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA4
+#define LSDMA_QUEUE1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA5
+#define LSDMA_QUEUE1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA6
+#define LSDMA_QUEUE1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA7
+#define LSDMA_QUEUE1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA8
+#define LSDMA_QUEUE1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA9
+#define LSDMA_QUEUE1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA10
+#define LSDMA_QUEUE1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_CNTL
+#define LSDMA_QUEUE1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define LSDMA_QUEUE1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define LSDMA_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define LSDMA_QUEUE1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define LSDMA_QUEUE1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define LSDMA_QUEUE1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define LSDMA_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h
new file mode 100644
index 000000000000..6a1b7b524809
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_14_0_2_OFFSET_HEADER
+#define _mp_14_0_2_OFFSET_HEADER
+
+
+// addressBlock: mp_SmuMp1_SmnDec
+// base address: 0x0
+#define regMP1_SMN_C2PMSG_0 0x0040
+#define regMP1_SMN_C2PMSG_0_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_1 0x0041
+#define regMP1_SMN_C2PMSG_1_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_2 0x0042
+#define regMP1_SMN_C2PMSG_2_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_3 0x0043
+#define regMP1_SMN_C2PMSG_3_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_4 0x0044
+#define regMP1_SMN_C2PMSG_4_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_5 0x0045
+#define regMP1_SMN_C2PMSG_5_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_6 0x0046
+#define regMP1_SMN_C2PMSG_6_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_7 0x0047
+#define regMP1_SMN_C2PMSG_7_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_8 0x0048
+#define regMP1_SMN_C2PMSG_8_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_9 0x0049
+#define regMP1_SMN_C2PMSG_9_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_10 0x004a
+#define regMP1_SMN_C2PMSG_10_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_11 0x004b
+#define regMP1_SMN_C2PMSG_11_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_12 0x004c
+#define regMP1_SMN_C2PMSG_12_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_13 0x004d
+#define regMP1_SMN_C2PMSG_13_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_14 0x004e
+#define regMP1_SMN_C2PMSG_14_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_15 0x004f
+#define regMP1_SMN_C2PMSG_15_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_16 0x0050
+#define regMP1_SMN_C2PMSG_16_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_17 0x0051
+#define regMP1_SMN_C2PMSG_17_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_18 0x0052
+#define regMP1_SMN_C2PMSG_18_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_19 0x0053
+#define regMP1_SMN_C2PMSG_19_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_20 0x0054
+#define regMP1_SMN_C2PMSG_20_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_21 0x0055
+#define regMP1_SMN_C2PMSG_21_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_22 0x0056
+#define regMP1_SMN_C2PMSG_22_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_23 0x0057
+#define regMP1_SMN_C2PMSG_23_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_24 0x0058
+#define regMP1_SMN_C2PMSG_24_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_25 0x0059
+#define regMP1_SMN_C2PMSG_25_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_26 0x005a
+#define regMP1_SMN_C2PMSG_26_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_27 0x005b
+#define regMP1_SMN_C2PMSG_27_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_28 0x005c
+#define regMP1_SMN_C2PMSG_28_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_29 0x005d
+#define regMP1_SMN_C2PMSG_29_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_30 0x005e
+#define regMP1_SMN_C2PMSG_30_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_31 0x005f
+#define regMP1_SMN_C2PMSG_31_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_32 0x0060
+#define regMP1_SMN_C2PMSG_32_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_33 0x0061
+#define regMP1_SMN_C2PMSG_33_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_34 0x0062
+#define regMP1_SMN_C2PMSG_34_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_35 0x0063
+#define regMP1_SMN_C2PMSG_35_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_36 0x0064
+#define regMP1_SMN_C2PMSG_36_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_37 0x0065
+#define regMP1_SMN_C2PMSG_37_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_38 0x0066
+#define regMP1_SMN_C2PMSG_38_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_39 0x0067
+#define regMP1_SMN_C2PMSG_39_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_40 0x0068
+#define regMP1_SMN_C2PMSG_40_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_41 0x0069
+#define regMP1_SMN_C2PMSG_41_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_42 0x006a
+#define regMP1_SMN_C2PMSG_42_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_43 0x006b
+#define regMP1_SMN_C2PMSG_43_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_44 0x006c
+#define regMP1_SMN_C2PMSG_44_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_45 0x006d
+#define regMP1_SMN_C2PMSG_45_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_46 0x006e
+#define regMP1_SMN_C2PMSG_46_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_47 0x006f
+#define regMP1_SMN_C2PMSG_47_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_48 0x0070
+#define regMP1_SMN_C2PMSG_48_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_49 0x0071
+#define regMP1_SMN_C2PMSG_49_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_50 0x0072
+#define regMP1_SMN_C2PMSG_50_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_51 0x0073
+#define regMP1_SMN_C2PMSG_51_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_52 0x0074
+#define regMP1_SMN_C2PMSG_52_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_53 0x0075
+#define regMP1_SMN_C2PMSG_53_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_54 0x0076
+#define regMP1_SMN_C2PMSG_54_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_55 0x0077
+#define regMP1_SMN_C2PMSG_55_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_56 0x0078
+#define regMP1_SMN_C2PMSG_56_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_57 0x0079
+#define regMP1_SMN_C2PMSG_57_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_58 0x007a
+#define regMP1_SMN_C2PMSG_58_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_59 0x007b
+#define regMP1_SMN_C2PMSG_59_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_60 0x007c
+#define regMP1_SMN_C2PMSG_60_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_61 0x007d
+#define regMP1_SMN_C2PMSG_61_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_62 0x007e
+#define regMP1_SMN_C2PMSG_62_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_63 0x007f
+#define regMP1_SMN_C2PMSG_63_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_64 0x0080
+#define regMP1_SMN_C2PMSG_64_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_65 0x0081
+#define regMP1_SMN_C2PMSG_65_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_66 0x0082
+#define regMP1_SMN_C2PMSG_66_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_67 0x0083
+#define regMP1_SMN_C2PMSG_67_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_68 0x0084
+#define regMP1_SMN_C2PMSG_68_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_69 0x0085
+#define regMP1_SMN_C2PMSG_69_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_70 0x0086
+#define regMP1_SMN_C2PMSG_70_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_71 0x0087
+#define regMP1_SMN_C2PMSG_71_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_72 0x0088
+#define regMP1_SMN_C2PMSG_72_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_73 0x0089
+#define regMP1_SMN_C2PMSG_73_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_74 0x008a
+#define regMP1_SMN_C2PMSG_74_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_75 0x008b
+#define regMP1_SMN_C2PMSG_75_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_76 0x008c
+#define regMP1_SMN_C2PMSG_76_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_77 0x008d
+#define regMP1_SMN_C2PMSG_77_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_78 0x008e
+#define regMP1_SMN_C2PMSG_78_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_79 0x008f
+#define regMP1_SMN_C2PMSG_79_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_80 0x0090
+#define regMP1_SMN_C2PMSG_80_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_81 0x0091
+#define regMP1_SMN_C2PMSG_81_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_82 0x0092
+#define regMP1_SMN_C2PMSG_82_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_83 0x0093
+#define regMP1_SMN_C2PMSG_83_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_84 0x0094
+#define regMP1_SMN_C2PMSG_84_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_85 0x0095
+#define regMP1_SMN_C2PMSG_85_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_86 0x0096
+#define regMP1_SMN_C2PMSG_86_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_87 0x0097
+#define regMP1_SMN_C2PMSG_87_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_88 0x0098
+#define regMP1_SMN_C2PMSG_88_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_89 0x0099
+#define regMP1_SMN_C2PMSG_89_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_90 0x009a
+#define regMP1_SMN_C2PMSG_90_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_91 0x009b
+#define regMP1_SMN_C2PMSG_91_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_92 0x009c
+#define regMP1_SMN_C2PMSG_92_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_93 0x009d
+#define regMP1_SMN_C2PMSG_93_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_94 0x009e
+#define regMP1_SMN_C2PMSG_94_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_95 0x009f
+#define regMP1_SMN_C2PMSG_95_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_96 0x00a0
+#define regMP1_SMN_C2PMSG_96_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_97 0x00a1
+#define regMP1_SMN_C2PMSG_97_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_98 0x00a2
+#define regMP1_SMN_C2PMSG_98_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_99 0x00a3
+#define regMP1_SMN_C2PMSG_99_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_100 0x00a4
+#define regMP1_SMN_C2PMSG_100_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_101 0x00a5
+#define regMP1_SMN_C2PMSG_101_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_102 0x00a6
+#define regMP1_SMN_C2PMSG_102_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_103 0x00a7
+#define regMP1_SMN_C2PMSG_103_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_104 0x00a8
+#define regMP1_SMN_C2PMSG_104_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_105 0x00a9
+#define regMP1_SMN_C2PMSG_105_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_106 0x00aa
+#define regMP1_SMN_C2PMSG_106_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_107 0x00ab
+#define regMP1_SMN_C2PMSG_107_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_108 0x00ac
+#define regMP1_SMN_C2PMSG_108_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_109 0x00ad
+#define regMP1_SMN_C2PMSG_109_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_110 0x00ae
+#define regMP1_SMN_C2PMSG_110_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_111 0x00af
+#define regMP1_SMN_C2PMSG_111_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_112 0x00b0
+#define regMP1_SMN_C2PMSG_112_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_113 0x00b1
+#define regMP1_SMN_C2PMSG_113_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_114 0x00b2
+#define regMP1_SMN_C2PMSG_114_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_115 0x00b3
+#define regMP1_SMN_C2PMSG_115_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_116 0x00b4
+#define regMP1_SMN_C2PMSG_116_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_117 0x00b5
+#define regMP1_SMN_C2PMSG_117_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_118 0x00b6
+#define regMP1_SMN_C2PMSG_118_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_119 0x00b7
+#define regMP1_SMN_C2PMSG_119_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_120 0x00b8
+#define regMP1_SMN_C2PMSG_120_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_121 0x00b9
+#define regMP1_SMN_C2PMSG_121_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_122 0x00ba
+#define regMP1_SMN_C2PMSG_122_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_123 0x00bb
+#define regMP1_SMN_C2PMSG_123_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_124 0x00bc
+#define regMP1_SMN_C2PMSG_124_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_125 0x00bd
+#define regMP1_SMN_C2PMSG_125_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_126 0x00be
+#define regMP1_SMN_C2PMSG_126_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_127 0x00bf
+#define regMP1_SMN_C2PMSG_127_BASE_IDX 1
+#define regMP1_SMN_IH_CREDIT 0x0140
+#define regMP1_SMN_IH_CREDIT_BASE_IDX 1
+#define regMP1_SMN_IH_SW_INT 0x0141
+#define regMP1_SMN_IH_SW_INT_BASE_IDX 1
+#define regMP1_SMN_IH_SW_INT_CTRL 0x0142
+#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 1
+#define regMP1_SMN_FPS_CNT 0x0143
+#define regMP1_SMN_FPS_CNT_BASE_IDX 1
+#define regMP1_SMN_PUB_CTRL 0x0144
+#define regMP1_SMN_PUB_CTRL_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH0 0x01c0
+#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH1 0x01c1
+#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH2 0x01c2
+#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH3 0x01c3
+#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH4 0x01c4
+#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH5 0x01c5
+#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH6 0x01c6
+#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH7 0x01c7
+#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH8 0x01c8
+#define regMP1_SMN_EXT_SCRATCH8_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH9 0x01c9
+#define regMP1_SMN_EXT_SCRATCH9_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH10 0x01ca
+#define regMP1_SMN_EXT_SCRATCH10_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH11 0x01cb
+#define regMP1_SMN_EXT_SCRATCH11_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH12 0x01cc
+#define regMP1_SMN_EXT_SCRATCH12_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH13 0x01cd
+#define regMP1_SMN_EXT_SCRATCH13_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH14 0x01ce
+#define regMP1_SMN_EXT_SCRATCH14_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH15 0x01cf
+#define regMP1_SMN_EXT_SCRATCH15_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH16 0x01d0
+#define regMP1_SMN_EXT_SCRATCH16_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH17 0x01d1
+#define regMP1_SMN_EXT_SCRATCH17_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH18 0x01d2
+#define regMP1_SMN_EXT_SCRATCH18_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH19 0x01d3
+#define regMP1_SMN_EXT_SCRATCH19_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH20 0x01d4
+#define regMP1_SMN_EXT_SCRATCH20_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH21 0x01d5
+#define regMP1_SMN_EXT_SCRATCH21_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH22 0x01d6
+#define regMP1_SMN_EXT_SCRATCH22_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH23 0x01d7
+#define regMP1_SMN_EXT_SCRATCH23_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH24 0x01d8
+#define regMP1_SMN_EXT_SCRATCH24_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH25 0x01d9
+#define regMP1_SMN_EXT_SCRATCH25_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH26 0x01da
+#define regMP1_SMN_EXT_SCRATCH26_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH27 0x01db
+#define regMP1_SMN_EXT_SCRATCH27_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH28 0x01dc
+#define regMP1_SMN_EXT_SCRATCH28_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH29 0x01dd
+#define regMP1_SMN_EXT_SCRATCH29_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH30 0x01de
+#define regMP1_SMN_EXT_SCRATCH30_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH31 0x01df
+#define regMP1_SMN_EXT_SCRATCH31_BASE_IDX 1
+
+
+// addressBlock: mp_SmuMpASP_SmnDec
+// base address: 0x0
+#define regMPASP_SMN_C2PMSG_32 0x0060
+#define regMPASP_SMN_C2PMSG_32_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_33 0x0061
+#define regMPASP_SMN_C2PMSG_33_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_34 0x0062
+#define regMPASP_SMN_C2PMSG_34_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_35 0x0063
+#define regMPASP_SMN_C2PMSG_35_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_36 0x0064
+#define regMPASP_SMN_C2PMSG_36_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_37 0x0065
+#define regMPASP_SMN_C2PMSG_37_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_38 0x0066
+#define regMPASP_SMN_C2PMSG_38_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_39 0x0067
+#define regMPASP_SMN_C2PMSG_39_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_60 0x007c
+#define regMPASP_SMN_C2PMSG_60_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_61 0x007d
+#define regMPASP_SMN_C2PMSG_61_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_62 0x007e
+#define regMPASP_SMN_C2PMSG_62_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_63 0x007f
+#define regMPASP_SMN_C2PMSG_63_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_64 0x0080
+#define regMPASP_SMN_C2PMSG_64_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_65 0x0081
+#define regMPASP_SMN_C2PMSG_65_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_66 0x0082
+#define regMPASP_SMN_C2PMSG_66_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_67 0x0083
+#define regMPASP_SMN_C2PMSG_67_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_68 0x0084
+#define regMPASP_SMN_C2PMSG_68_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_69 0x0085
+#define regMPASP_SMN_C2PMSG_69_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_70 0x0086
+#define regMPASP_SMN_C2PMSG_70_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_71 0x0087
+#define regMPASP_SMN_C2PMSG_71_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_72 0x0088
+#define regMPASP_SMN_C2PMSG_72_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_73 0x0089
+#define regMPASP_SMN_C2PMSG_73_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_74 0x008a
+#define regMPASP_SMN_C2PMSG_74_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_75 0x008b
+#define regMPASP_SMN_C2PMSG_75_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_76 0x008c
+#define regMPASP_SMN_C2PMSG_76_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_77 0x008d
+#define regMPASP_SMN_C2PMSG_77_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_78 0x008e
+#define regMPASP_SMN_C2PMSG_78_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_79 0x008f
+#define regMPASP_SMN_C2PMSG_79_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_80 0x0090
+#define regMPASP_SMN_C2PMSG_80_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_81 0x0091
+#define regMPASP_SMN_C2PMSG_81_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_82 0x0092
+#define regMPASP_SMN_C2PMSG_82_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_83 0x0093
+#define regMPASP_SMN_C2PMSG_83_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_84 0x0094
+#define regMPASP_SMN_C2PMSG_84_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_85 0x0095
+#define regMPASP_SMN_C2PMSG_85_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_86 0x0096
+#define regMPASP_SMN_C2PMSG_86_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_87 0x0097
+#define regMPASP_SMN_C2PMSG_87_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_88 0x0098
+#define regMPASP_SMN_C2PMSG_88_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_89 0x0099
+#define regMPASP_SMN_C2PMSG_89_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_100 0x00a4
+#define regMPASP_SMN_C2PMSG_100_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_101 0x00a5
+#define regMPASP_SMN_C2PMSG_101_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_102 0x00a6
+#define regMPASP_SMN_C2PMSG_102_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_103 0x00a7
+#define regMPASP_SMN_C2PMSG_103_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_109 0x00ad
+#define regMPASP_SMN_C2PMSG_109_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_115 0x00b3
+#define regMPASP_SMN_C2PMSG_115_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_116 0x00b4
+#define regMPASP_SMN_C2PMSG_116_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_119_BASE_IDX 0
+#define regMPASP_SMN_IH_CREDIT 0x0140
+#define regMPASP_SMN_IH_CREDIT_BASE_IDX 0
+#define regMPASP_SMN_IH_SW_INT 0x0141
+#define regMPASP_SMN_IH_SW_INT_BASE_IDX 0
+#define regMPASP_SMN_IH_SW_INT_CTRL 0x0142
+#define regMPASP_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+
+
+// addressBlock: Mp1MmioPublic_SmuMp1Pub_CruDec
+// base address: 0x3b00000
+#define regMP1_CRU1_MP1_FIRMWARE_FLAGS 0x4009
+#define regMP1_CRU1_MP1_FIRMWARE_FLAGS_BASE_IDX 7
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h
new file mode 100644
index 000000000000..3ba269da1463
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h
@@ -0,0 +1,692 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_14_0_2_SH_MASK_HEADER
+#define _mp_14_0_2_SH_MASK_HEADER
+
+
+// addressBlock: mp_SmuMp1_SmnDec
+//MP1_SMN_C2PMSG_0
+#define MP1_SMN_C2PMSG_0__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_1
+#define MP1_SMN_C2PMSG_1__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_2
+#define MP1_SMN_C2PMSG_2__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_3
+#define MP1_SMN_C2PMSG_3__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_4
+#define MP1_SMN_C2PMSG_4__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_5
+#define MP1_SMN_C2PMSG_5__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_6
+#define MP1_SMN_C2PMSG_6__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_7
+#define MP1_SMN_C2PMSG_7__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_8
+#define MP1_SMN_C2PMSG_8__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_9
+#define MP1_SMN_C2PMSG_9__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_10
+#define MP1_SMN_C2PMSG_10__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_11
+#define MP1_SMN_C2PMSG_11__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_12
+#define MP1_SMN_C2PMSG_12__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_13
+#define MP1_SMN_C2PMSG_13__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_14
+#define MP1_SMN_C2PMSG_14__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_15
+#define MP1_SMN_C2PMSG_15__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_16
+#define MP1_SMN_C2PMSG_16__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_17
+#define MP1_SMN_C2PMSG_17__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_18
+#define MP1_SMN_C2PMSG_18__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_19
+#define MP1_SMN_C2PMSG_19__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_20
+#define MP1_SMN_C2PMSG_20__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_21
+#define MP1_SMN_C2PMSG_21__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_22
+#define MP1_SMN_C2PMSG_22__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_23
+#define MP1_SMN_C2PMSG_23__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_24
+#define MP1_SMN_C2PMSG_24__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_25
+#define MP1_SMN_C2PMSG_25__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_26
+#define MP1_SMN_C2PMSG_26__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_27
+#define MP1_SMN_C2PMSG_27__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_28
+#define MP1_SMN_C2PMSG_28__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_29
+#define MP1_SMN_C2PMSG_29__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_30
+#define MP1_SMN_C2PMSG_30__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_31
+#define MP1_SMN_C2PMSG_31__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_32
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_33
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_34
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_35
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_36
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_37
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_38
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_39
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_40
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_41
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_42
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_43
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_44
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_45
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_46
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_47
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_48
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_49
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_50
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_51
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_52
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_53
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_54
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_55
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_56
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_57
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_58
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_59
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_60
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_61
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_62
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_63
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_64
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_65
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_66
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_67
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_68
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_69
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_70
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_71
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_72
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_73
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_74
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_75
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_76
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_77
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_78
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_79
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_80
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_81
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_82
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_83
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_84
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_85
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_86
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_87
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_88
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_89
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_90
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_91
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_92
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_93
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_94
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_95
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_96
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_97
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_98
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_99
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_100
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_101
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_102
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_103
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_104
+#define MP1_SMN_C2PMSG_104__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_104__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_105
+#define MP1_SMN_C2PMSG_105__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_105__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_106
+#define MP1_SMN_C2PMSG_106__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_106__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_107
+#define MP1_SMN_C2PMSG_107__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_107__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_108
+#define MP1_SMN_C2PMSG_108__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_108__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_109
+#define MP1_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_110
+#define MP1_SMN_C2PMSG_110__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_110__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_111
+#define MP1_SMN_C2PMSG_111__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_111__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_112
+#define MP1_SMN_C2PMSG_112__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_112__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_113
+#define MP1_SMN_C2PMSG_113__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_113__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_114
+#define MP1_SMN_C2PMSG_114__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_114__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_115
+#define MP1_SMN_C2PMSG_115__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_116
+#define MP1_SMN_C2PMSG_116__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_117
+#define MP1_SMN_C2PMSG_117__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_117__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_118
+#define MP1_SMN_C2PMSG_118__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_118__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_119
+#define MP1_SMN_C2PMSG_119__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_119__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_120
+#define MP1_SMN_C2PMSG_120__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_120__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_121
+#define MP1_SMN_C2PMSG_121__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_121__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_122
+#define MP1_SMN_C2PMSG_122__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_122__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_123
+#define MP1_SMN_C2PMSG_123__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_123__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_124
+#define MP1_SMN_C2PMSG_124__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_124__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_125
+#define MP1_SMN_C2PMSG_125__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_125__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_126
+#define MP1_SMN_C2PMSG_126__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_126__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_127
+#define MP1_SMN_C2PMSG_127__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_127__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_IH_CREDIT
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_SMN_IH_SW_INT
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP1_SMN_IH_SW_INT_CTRL
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+//MP1_SMN_FPS_CNT
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+//MP1_SMN_PUB_CTRL
+#define MP1_SMN_PUB_CTRL__LX3_RESET__SHIFT 0x0
+#define MP1_SMN_PUB_CTRL__LX3_RESET_MASK 0x00000001L
+//MP1_SMN_EXT_SCRATCH0
+#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH1
+#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH2
+#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH3
+#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH4
+#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH5
+#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH6
+#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH7
+#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH8
+#define MP1_SMN_EXT_SCRATCH8__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH9
+#define MP1_SMN_EXT_SCRATCH9__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH9__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH10
+#define MP1_SMN_EXT_SCRATCH10__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH10__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH11
+#define MP1_SMN_EXT_SCRATCH11__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH11__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH12
+#define MP1_SMN_EXT_SCRATCH12__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH12__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH13
+#define MP1_SMN_EXT_SCRATCH13__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH13__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH14
+#define MP1_SMN_EXT_SCRATCH14__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH14__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH15
+#define MP1_SMN_EXT_SCRATCH15__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH15__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH16
+#define MP1_SMN_EXT_SCRATCH16__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH16__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH17
+#define MP1_SMN_EXT_SCRATCH17__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH17__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH18
+#define MP1_SMN_EXT_SCRATCH18__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH18__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH19
+#define MP1_SMN_EXT_SCRATCH19__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH19__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH20
+#define MP1_SMN_EXT_SCRATCH20__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH20__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH21
+#define MP1_SMN_EXT_SCRATCH21__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH21__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH22
+#define MP1_SMN_EXT_SCRATCH22__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH22__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH23
+#define MP1_SMN_EXT_SCRATCH23__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH23__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH24
+#define MP1_SMN_EXT_SCRATCH24__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH24__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH25
+#define MP1_SMN_EXT_SCRATCH25__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH25__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH26
+#define MP1_SMN_EXT_SCRATCH26__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH26__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH27
+#define MP1_SMN_EXT_SCRATCH27__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH27__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH28
+#define MP1_SMN_EXT_SCRATCH28__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH28__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH29
+#define MP1_SMN_EXT_SCRATCH29__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH29__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH30
+#define MP1_SMN_EXT_SCRATCH30__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH30__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH31
+#define MP1_SMN_EXT_SCRATCH31__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH31__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mp_SmuMpASP_SmnDec
+//MPASP_SMN_C2PMSG_32
+#define MPASP_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_33
+#define MPASP_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_34
+#define MPASP_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_35
+#define MPASP_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_36
+#define MPASP_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_37
+#define MPASP_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_38
+#define MPASP_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_39
+#define MPASP_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_60
+#define MPASP_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_61
+#define MPASP_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_62
+#define MPASP_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_63
+#define MPASP_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_64
+#define MPASP_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_65
+#define MPASP_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_66
+#define MPASP_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_67
+#define MPASP_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_68
+#define MPASP_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_69
+#define MPASP_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_70
+#define MPASP_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_71
+#define MPASP_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_72
+#define MPASP_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_73
+#define MPASP_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_74
+#define MPASP_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_75
+#define MPASP_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_76
+#define MPASP_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_77
+#define MPASP_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_78
+#define MPASP_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_79
+#define MPASP_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_80
+#define MPASP_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_81
+#define MPASP_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_82
+#define MPASP_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_83
+#define MPASP_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_84
+#define MPASP_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_85
+#define MPASP_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_86
+#define MPASP_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_87
+#define MPASP_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_88
+#define MPASP_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_89
+#define MPASP_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_100
+#define MPASP_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_101
+#define MPASP_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_102
+#define MPASP_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_103
+#define MPASP_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_109
+#define MPASP_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_115
+#define MPASP_SMN_C2PMSG_115__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_116
+#define MPASP_SMN_C2PMSG_116__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_IH_CREDIT
+#define MPASP_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MPASP_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MPASP_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MPASP_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MPASP_SMN_IH_SW_INT
+#define MPASP_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MPASP_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MPASP_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MPASP_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MPASP_SMN_IH_SW_INT_CTRL
+#define MPASP_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MPASP_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MPASP_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MPASP_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+
+
+// addressBlock: Mp1MmioPublic_SmuMp1Pub_CruDec
+//MP1_CRU1_MP1_FIRMWARE_FLAGS
+#define MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define MP1_CRU1_MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
+#define MP1_CRU1_MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
index 6f80bfa7e41a..5ebe4cb40f9d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
@@ -8900,6 +8900,8 @@
#define regGDC0_BIF_IH_DOORBELL_RANGE_BASE_IDX 3
#define regGDC0_BIF_VCN0_DOORBELL_RANGE 0x4f0af3
#define regGDC0_BIF_VCN0_DOORBELL_RANGE_BASE_IDX 3
+#define regGDC0_BIF_VPE1_DOORBELL_RANGE 0x4f0af4
+#define regGDC0_BIF_VPE1_DOORBELL_RANGE_BASE_IDX 3
#define regGDC0_BIF_RLC_DOORBELL_RANGE 0x4f0af5
#define regGDC0_BIF_RLC_DOORBELL_RANGE_BASE_IDX 3
#define regGDC0_BIF_SDMA2_DOORBELL_RANGE 0x4f0af6
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h
new file mode 100644
index 000000000000..45a961ef74ff
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_7_0_0_OFFSET_HEADER
+#define _osssys_7_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: osssys_osssysdec
+// base address: 0x4280
+#define regIH_VMID_0_LUT 0x0000
+#define regIH_VMID_0_LUT_BASE_IDX 0
+#define regIH_VMID_1_LUT 0x0001
+#define regIH_VMID_1_LUT_BASE_IDX 0
+#define regIH_VMID_2_LUT 0x0002
+#define regIH_VMID_2_LUT_BASE_IDX 0
+#define regIH_VMID_3_LUT 0x0003
+#define regIH_VMID_3_LUT_BASE_IDX 0
+#define regIH_VMID_4_LUT 0x0004
+#define regIH_VMID_4_LUT_BASE_IDX 0
+#define regIH_VMID_5_LUT 0x0005
+#define regIH_VMID_5_LUT_BASE_IDX 0
+#define regIH_VMID_6_LUT 0x0006
+#define regIH_VMID_6_LUT_BASE_IDX 0
+#define regIH_VMID_7_LUT 0x0007
+#define regIH_VMID_7_LUT_BASE_IDX 0
+#define regIH_VMID_8_LUT 0x0008
+#define regIH_VMID_8_LUT_BASE_IDX 0
+#define regIH_VMID_9_LUT 0x0009
+#define regIH_VMID_9_LUT_BASE_IDX 0
+#define regIH_VMID_10_LUT 0x000a
+#define regIH_VMID_10_LUT_BASE_IDX 0
+#define regIH_VMID_11_LUT 0x000b
+#define regIH_VMID_11_LUT_BASE_IDX 0
+#define regIH_VMID_12_LUT 0x000c
+#define regIH_VMID_12_LUT_BASE_IDX 0
+#define regIH_VMID_13_LUT 0x000d
+#define regIH_VMID_13_LUT_BASE_IDX 0
+#define regIH_VMID_14_LUT 0x000e
+#define regIH_VMID_14_LUT_BASE_IDX 0
+#define regIH_VMID_15_LUT 0x000f
+#define regIH_VMID_15_LUT_BASE_IDX 0
+#define regIH_VMID_0_LUT_MM 0x0010
+#define regIH_VMID_0_LUT_MM_BASE_IDX 0
+#define regIH_VMID_1_LUT_MM 0x0011
+#define regIH_VMID_1_LUT_MM_BASE_IDX 0
+#define regIH_VMID_2_LUT_MM 0x0012
+#define regIH_VMID_2_LUT_MM_BASE_IDX 0
+#define regIH_VMID_3_LUT_MM 0x0013
+#define regIH_VMID_3_LUT_MM_BASE_IDX 0
+#define regIH_VMID_4_LUT_MM 0x0014
+#define regIH_VMID_4_LUT_MM_BASE_IDX 0
+#define regIH_VMID_5_LUT_MM 0x0015
+#define regIH_VMID_5_LUT_MM_BASE_IDX 0
+#define regIH_VMID_6_LUT_MM 0x0016
+#define regIH_VMID_6_LUT_MM_BASE_IDX 0
+#define regIH_VMID_7_LUT_MM 0x0017
+#define regIH_VMID_7_LUT_MM_BASE_IDX 0
+#define regIH_VMID_8_LUT_MM 0x0018
+#define regIH_VMID_8_LUT_MM_BASE_IDX 0
+#define regIH_VMID_9_LUT_MM 0x0019
+#define regIH_VMID_9_LUT_MM_BASE_IDX 0
+#define regIH_VMID_10_LUT_MM 0x001a
+#define regIH_VMID_10_LUT_MM_BASE_IDX 0
+#define regIH_VMID_11_LUT_MM 0x001b
+#define regIH_VMID_11_LUT_MM_BASE_IDX 0
+#define regIH_VMID_12_LUT_MM 0x001c
+#define regIH_VMID_12_LUT_MM_BASE_IDX 0
+#define regIH_VMID_13_LUT_MM 0x001d
+#define regIH_VMID_13_LUT_MM_BASE_IDX 0
+#define regIH_VMID_14_LUT_MM 0x001e
+#define regIH_VMID_14_LUT_MM_BASE_IDX 0
+#define regIH_VMID_15_LUT_MM 0x001f
+#define regIH_VMID_15_LUT_MM_BASE_IDX 0
+#define regIH_COOKIE_0 0x0020
+#define regIH_COOKIE_0_BASE_IDX 0
+#define regIH_COOKIE_1 0x0021
+#define regIH_COOKIE_1_BASE_IDX 0
+#define regIH_COOKIE_2 0x0022
+#define regIH_COOKIE_2_BASE_IDX 0
+#define regIH_COOKIE_3 0x0023
+#define regIH_COOKIE_3_BASE_IDX 0
+#define regIH_COOKIE_4 0x0024
+#define regIH_COOKIE_4_BASE_IDX 0
+#define regIH_COOKIE_5 0x0025
+#define regIH_COOKIE_5_BASE_IDX 0
+#define regIH_COOKIE_6 0x0026
+#define regIH_COOKIE_6_BASE_IDX 0
+#define regIH_COOKIE_7 0x0027
+#define regIH_COOKIE_7_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART0 0x003f
+#define regIH_REGISTER_LAST_PART0_BASE_IDX 0
+#define regIH_RB_CNTL 0x0080
+#define regIH_RB_CNTL_BASE_IDX 0
+#define regIH_RB_RPTR 0x0081
+#define regIH_RB_RPTR_BASE_IDX 0
+#define regIH_RB_WPTR 0x0082
+#define regIH_RB_WPTR_BASE_IDX 0
+#define regIH_RB_BASE 0x0083
+#define regIH_RB_BASE_BASE_IDX 0
+#define regIH_RB_BASE_HI 0x0084
+#define regIH_RB_BASE_HI_BASE_IDX 0
+#define regIH_RB_WPTR_ADDR_HI 0x0085
+#define regIH_RB_WPTR_ADDR_HI_BASE_IDX 0
+#define regIH_RB_WPTR_ADDR_LO 0x0086
+#define regIH_RB_WPTR_ADDR_LO_BASE_IDX 0
+#define regIH_DOORBELL_RPTR 0x0087
+#define regIH_DOORBELL_RPTR_BASE_IDX 0
+#define regIH_DOORBELL_RETRY_CAM 0x0088
+#define regIH_DOORBELL_RETRY_CAM_BASE_IDX 0
+#define regIH_RB_CNTL_RING1 0x008c
+#define regIH_RB_CNTL_RING1_BASE_IDX 0
+#define regIH_RB_RPTR_RING1 0x008d
+#define regIH_RB_RPTR_RING1_BASE_IDX 0
+#define regIH_RB_WPTR_RING1 0x008e
+#define regIH_RB_WPTR_RING1_BASE_IDX 0
+#define regIH_RB_BASE_RING1 0x008f
+#define regIH_RB_BASE_RING1_BASE_IDX 0
+#define regIH_RB_BASE_HI_RING1 0x0090
+#define regIH_RB_BASE_HI_RING1_BASE_IDX 0
+#define regIH_DOORBELL_RPTR_RING1 0x0093
+#define regIH_DOORBELL_RPTR_RING1_BASE_IDX 0
+#define regIH_RETRY_CAM_ACK 0x00a4
+#define regIH_RETRY_CAM_ACK_BASE_IDX 0
+#define regIH_VERSION 0x00a5
+#define regIH_VERSION_BASE_IDX 0
+#define regIH_CNTL 0x00a8
+#define regIH_CNTL_BASE_IDX 0
+#define regIH_CLK_CTRL 0x00a9
+#define regIH_CLK_CTRL_BASE_IDX 0
+#define regIH_STORM_CLIENT_LIST_CNTL 0x00aa
+#define regIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0
+#define regIH_LIMIT_INT_RATE_CNTL 0x00ab
+#define regIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0
+#define regIH_RETRY_INT_CAM_CNTL 0x00ac
+#define regIH_RETRY_INT_CAM_CNTL_BASE_IDX 0
+#define regIH_MEM_POWER_CTRL 0x00ad
+#define regIH_MEM_POWER_CTRL_BASE_IDX 0
+#define regIH_MEM_POWER_CTRL2 0x00ae
+#define regIH_MEM_POWER_CTRL2_BASE_IDX 0
+#define regIH_CNTL2 0x00c1
+#define regIH_CNTL2_BASE_IDX 0
+#define regIH_STATUS 0x00c2
+#define regIH_STATUS_BASE_IDX 0
+#define regIH_PERFMON_CNTL 0x00c3
+#define regIH_PERFMON_CNTL_BASE_IDX 0
+#define regIH_PERFCOUNTER0_RESULT 0x00c4
+#define regIH_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define regIH_PERFCOUNTER1_RESULT 0x00c5
+#define regIH_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7
+#define regIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8
+#define regIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9
+#define regIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0
+#define regIH_DSM_MATCH_FIELD_CONTROL 0x00ca
+#define regIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0
+#define regIH_DSM_MATCH_DATA_CONTROL 0x00cb
+#define regIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0
+#define regIH_DSM_MATCH_FCN_ID 0x00cc
+#define regIH_DSM_MATCH_FCN_ID_BASE_IDX 0
+#define regIH_VF_RB_STATUS 0x00ce
+#define regIH_VF_RB_STATUS_BASE_IDX 0
+#define regIH_VF_RB_STATUS2 0x00cf
+#define regIH_VF_RB_STATUS2_BASE_IDX 0
+#define regIH_VF_RB1_STATUS 0x00d0
+#define regIH_VF_RB1_STATUS_BASE_IDX 0
+#define regIH_VF_RB1_STATUS2 0x00d1
+#define regIH_VF_RB1_STATUS2_BASE_IDX 0
+#define regIH_RB_STATUS 0x00d4
+#define regIH_RB_STATUS_BASE_IDX 0
+#define regIH_INT_FLOOD_CNTL 0x00d5
+#define regIH_INT_FLOOD_CNTL_BASE_IDX 0
+#define regIH_RB0_INT_FLOOD_STATUS 0x00d6
+#define regIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_RB1_INT_FLOOD_STATUS 0x00d7
+#define regIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_INT_FLOOD_STATUS 0x00d9
+#define regIH_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_INT_FLAGS 0x00dc
+#define regIH_INT_FLAGS_BASE_IDX 0
+#define regIH_SCRATCH 0x00e0
+#define regIH_SCRATCH_BASE_IDX 0
+#define regIH_CLIENT_CREDIT_ERROR 0x00e1
+#define regIH_CLIENT_CREDIT_ERROR_BASE_IDX 0
+#define regIH_GPU_IOV_VIOLATION_LOG 0x00e2
+#define regIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regIH_GPU_IOV_VIOLATION_LOG2 0x00e3
+#define regIH_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regIH_COOKIE_REC_VIOLATION_LOG 0x00e4
+#define regIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0
+#define regIH_CREDIT_STATUS 0x00e5
+#define regIH_CREDIT_STATUS_BASE_IDX 0
+#define regIH_MMHUB_ERROR 0x00e6
+#define regIH_MMHUB_ERROR_BASE_IDX 0
+#define regIH_VF_RB_STATUS3 0x00ea
+#define regIH_VF_RB_STATUS3_BASE_IDX 0
+#define regIH_VF_RB_STATUS4 0x00eb
+#define regIH_VF_RB_STATUS4_BASE_IDX 0
+#define regIH_VF_RB1_STATUS3 0x00ec
+#define regIH_VF_RB1_STATUS3_BASE_IDX 0
+#define regIH_MSI_STORM_CTRL 0x00f1
+#define regIH_MSI_STORM_CTRL_BASE_IDX 0
+#define regIH_MSI_STORM_CLIENT_INDEX 0x00f2
+#define regIH_MSI_STORM_CLIENT_INDEX_BASE_IDX 0
+#define regIH_MSI_STORM_CLIENT_DATA 0x00f3
+#define regIH_MSI_STORM_CLIENT_DATA_BASE_IDX 0
+#define regIH_LAST_INT_INFO0 0x00f9
+#define regIH_LAST_INT_INFO0_BASE_IDX 0
+#define regIH_LAST_INT_INFO1 0x00fa
+#define regIH_LAST_INT_INFO1_BASE_IDX 0
+#define regIH_LAST_INT_INFO2 0x00fb
+#define regIH_LAST_INT_INFO2_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART2 0x00ff
+#define regIH_REGISTER_LAST_PART2_BASE_IDX 0
+#define regSEM_MAILBOX 0x010a
+#define regSEM_MAILBOX_BASE_IDX 0
+#define regSEM_MAILBOX_CLEAR 0x010b
+#define regSEM_MAILBOX_CLEAR_BASE_IDX 0
+#define regSEM_REGISTER_LAST_PART2 0x017f
+#define regSEM_REGISTER_LAST_PART2_BASE_IDX 0
+#define regIH_ACTIVE_FCN_ID 0x0180
+#define regIH_ACTIVE_FCN_ID_BASE_IDX 0
+#define regIH_VIRT_RESET_REQ 0x0181
+#define regIH_VIRT_RESET_REQ_BASE_IDX 0
+#define regIH_CLIENT_CFG 0x0182
+#define regIH_CLIENT_CFG_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_INDEX 0x0183
+#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_DATA 0x0184
+#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0
+#define regIH_CLIENT_CFG_INDEX 0x0185
+#define regIH_CLIENT_CFG_INDEX_BASE_IDX 0
+#define regIH_CLIENT_CFG_DATA 0x0186
+#define regIH_CLIENT_CFG_DATA_BASE_IDX 0
+#define regIH_CLIENT_CFG_DATA2 0x0187
+#define regIH_CLIENT_CFG_DATA2_BASE_IDX 0
+#define regIH_CID_REMAP_INDEX 0x0188
+#define regIH_CID_REMAP_INDEX_BASE_IDX 0
+#define regIH_CID_REMAP_DATA 0x0189
+#define regIH_CID_REMAP_DATA_BASE_IDX 0
+#define regIH_CHICKEN 0x018a
+#define regIH_CHICKEN_BASE_IDX 0
+#define regIH_INT_DROP_CNTL 0x018c
+#define regIH_INT_DROP_CNTL_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_VALUE0 0x018d
+#define regIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_VALUE1 0x018e
+#define regIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_MASK0 0x018f
+#define regIH_INT_DROP_MATCH_MASK0_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_MASK1 0x0190
+#define regIH_INT_DROP_MATCH_MASK1_BASE_IDX 0
+#define regIH_MMHUB_CNTL 0x01a7
+#define regIH_MMHUB_CNTL_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART1 0x01a8
+#define regIH_REGISTER_LAST_PART1_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h
new file mode 100644
index 000000000000..a29607bc0db5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h
@@ -0,0 +1,1029 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_7_0_0_SH_MASK_HEADER
+#define _osssys_7_0_0_SH_MASK_HEADER
+
+
+// addressBlock: osssys_osssysdec
+//IH_VMID_0_LUT
+#define IH_VMID_0_LUT__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT
+#define IH_VMID_1_LUT__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT
+#define IH_VMID_2_LUT__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT
+#define IH_VMID_3_LUT__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT
+#define IH_VMID_4_LUT__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT
+#define IH_VMID_5_LUT__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT
+#define IH_VMID_6_LUT__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT
+#define IH_VMID_7_LUT__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT
+#define IH_VMID_8_LUT__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT
+#define IH_VMID_9_LUT__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT
+#define IH_VMID_10_LUT__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT
+#define IH_VMID_11_LUT__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT
+#define IH_VMID_12_LUT__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT
+#define IH_VMID_13_LUT__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT
+#define IH_VMID_14_LUT__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT
+#define IH_VMID_15_LUT__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_0_LUT_MM
+#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT_MM
+#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT_MM
+#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT_MM
+#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT_MM
+#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT_MM
+#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT_MM
+#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT_MM
+#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT_MM
+#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT_MM
+#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT_MM
+#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT_MM
+#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT_MM
+#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT_MM
+#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT_MM
+#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT_MM
+#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_COOKIE_0
+#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0
+#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8
+#define IH_COOKIE_0__RING_ID__SHIFT 0x10
+#define IH_COOKIE_0__VM_ID__SHIFT 0x18
+#define IH_COOKIE_0__RESERVED__SHIFT 0x1c
+#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f
+#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL
+#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L
+#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L
+#define IH_COOKIE_0__RESERVED_MASK 0x70000000L
+#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L
+//IH_COOKIE_1
+#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0
+#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_2
+#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0
+#define IH_COOKIE_2__RESERVED__SHIFT 0x10
+#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f
+#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL
+#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L
+//IH_COOKIE_3
+#define IH_COOKIE_3__PAS_ID__SHIFT 0x0
+#define IH_COOKIE_3__RESERVED__SHIFT 0x10
+#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f
+#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL
+#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L
+//IH_COOKIE_4
+#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0
+#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_5
+#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0
+#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL
+//IH_COOKIE_6
+#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0
+#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL
+//IH_COOKIE_7
+#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0
+#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART0
+#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//IH_RB_CNTL
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11
+#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15
+#define IH_RB_CNTL__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L
+#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L
+#define IH_RB_CNTL__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_RPTR
+#define IH_RB_RPTR__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_BASE
+#define IH_RB_BASE__ADDR__SHIFT 0x0
+#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI
+#define IH_RB_BASE_HI__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL
+//IH_RB_WPTR_ADDR_HI
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//IH_RB_WPTR_ADDR_LO
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//IH_DOORBELL_RPTR
+#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L
+//IH_DOORBELL_RETRY_CAM
+#define IH_DOORBELL_RETRY_CAM__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RETRY_CAM__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RETRY_CAM__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RETRY_CAM__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING1
+#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_RPTR_RING1
+#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING1
+#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_BASE_RING1
+#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING1
+#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL
+//IH_DOORBELL_RPTR_RING1
+#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L
+//IH_RETRY_CAM_ACK
+#define IH_RETRY_CAM_ACK__INDEX__SHIFT 0x0
+#define IH_RETRY_CAM_ACK__INDEX_MASK 0x000003FFL
+//IH_VERSION
+#define IH_VERSION__MINVER__SHIFT 0x0
+#define IH_VERSION__MAJVER__SHIFT 0x8
+#define IH_VERSION__REV__SHIFT 0x10
+#define IH_VERSION__MINVER_MASK 0x0000007FL
+#define IH_VERSION__MAJVER_MASK 0x00007F00L
+#define IH_VERSION__REV_MASK 0x003F0000L
+//IH_CNTL
+#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6
+#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14
+#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L
+#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L
+//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x17
+#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE__SHIFT 0x18
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE_MASK 0x00800000L
+#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE_MASK 0x01000000L
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//IH_STORM_CLIENT_LIST_CNTL
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
+//IH_LIMIT_INT_RATE_CNTL
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L
+//IH_RETRY_INT_CAM_CNTL
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE__SHIFT 0x0
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE__SHIFT 0x8
+#define IH_RETRY_INT_CAM_CNTL__ENABLE__SHIFT 0x10
+#define IH_RETRY_INT_CAM_CNTL__MM_BACK_PRESSURE_ENABLE__SHIFT 0x11
+#define IH_RETRY_INT_CAM_CNTL__GC_BACK_PRESSURE_ENABLE__SHIFT 0x12
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE__SHIFT 0x14
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE_MASK 0x0000001FL
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE_MASK 0x00003F00L
+#define IH_RETRY_INT_CAM_CNTL__ENABLE_MASK 0x00010000L
+#define IH_RETRY_INT_CAM_CNTL__MM_BACK_PRESSURE_ENABLE_MASK 0x00020000L
+#define IH_RETRY_INT_CAM_CNTL__GC_BACK_PRESSURE_ENABLE_MASK 0x00040000L
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE_MASK 0x00300000L
+//IH_MEM_POWER_CTRL
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN__SHIFT 0x10
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN__SHIFT 0x11
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN__SHIFT 0x12
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN__SHIFT 0x13
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS__SHIFT 0x14
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0x1e
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN_MASK 0x00020000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN_MASK 0x00040000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN_MASK 0x00080000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY_MASK 0xC0000000L
+//IH_MEM_POWER_CTRL2
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L
+//IH_CNTL2
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L
+//IH_STATUS
+#define IH_STATUS__IDLE__SHIFT 0x0
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x1
+#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2
+#define IH_STATUS__RB_FULL__SHIFT 0x3
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x7
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa
+#define IH_STATUS__SWITCH_READY__SHIFT 0xb
+#define IH_STATUS__RB1_FULL__SHIFT 0xc
+#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd
+#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe
+#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12
+#define IH_STATUS__RETRY_INT_CAM_IDLE__SHIFT 0x13
+#define IH_STATUS__ZSTATES_FENCE__SHIFT 0x14
+#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED__SHIFT 0x15
+#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED__SHIFT 0x16
+#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED__SHIFT 0x17
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__SWITCH_READY_MASK 0x00000800L
+#define IH_STATUS__RB1_FULL_MASK 0x00001000L
+#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L
+#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L
+#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L
+#define IH_STATUS__RETRY_INT_CAM_IDLE_MASK 0x00080000L
+#define IH_STATUS__ZSTATES_FENCE_MASK 0x00100000L
+#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED_MASK 0x00200000L
+#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED_MASK 0x00400000L
+#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED_MASK 0x00800000L
+//IH_PERFMON_CNTL
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x00000FFCL
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x0FFC0000L
+//IH_PERFCOUNTER0_RESULT
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_PERFCOUNTER1_RESULT
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_31_0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_63_32
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_95_64
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_FIELD_CONTROL
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6
+#define IH_DSM_MATCH_FIELD_CONTROL__DIEID_EN__SHIFT 0x7
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L
+#define IH_DSM_MATCH_FIELD_CONTROL__DIEID_EN_MASK 0x00000080L
+//IH_DSM_MATCH_DATA_CONTROL
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL
+//IH_DSM_MATCH_FCN_ID
+#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x7
+#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000001FL
+#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000080L
+//IH_VF_RB_STATUS
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x00FFFFFFL
+//IH_VF_RB_STATUS2
+#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x00FFFFFFL
+//IH_VF_RB1_STATUS
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x00FFFFFFL
+//IH_VF_RB1_STATUS2
+#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x00FFFFFFL
+//IH_RB_STATUS
+#define IH_RB_STATUS__RB_FULL__SHIFT 0x0
+#define IH_RB_STATUS__RB_FULL_DRAIN__SHIFT 0x1
+#define IH_RB_STATUS__RB_OVERFLOW__SHIFT 0x2
+#define IH_RB_STATUS__RB1_FULL__SHIFT 0x4
+#define IH_RB_STATUS__RB1_FULL_DRAIN__SHIFT 0x5
+#define IH_RB_STATUS__RB1_OVERFLOW__SHIFT 0x6
+#define IH_RB_STATUS__RB_FULL_MASK 0x00000001L
+#define IH_RB_STATUS__RB_FULL_DRAIN_MASK 0x00000002L
+#define IH_RB_STATUS__RB_OVERFLOW_MASK 0x00000004L
+#define IH_RB_STATUS__RB1_FULL_MASK 0x00000010L
+#define IH_RB_STATUS__RB1_FULL_DRAIN_MASK 0x00000020L
+#define IH_RB_STATUS__RB1_OVERFLOW_MASK 0x00000040L
+//IH_INT_FLOOD_CNTL
+#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4
+#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L
+//IH_RB0_INT_FLOOD_STATUS
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x00FFFFFFL
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB1_INT_FLOOD_STATUS
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x00FFFFFFL
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_INT_FLOOD_STATUS
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1d
+#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x1F000000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x20000000L
+#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L
+//IH_INT_FLAGS
+#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0
+#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1
+#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2
+#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3
+#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4
+#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5
+#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6
+#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7
+#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8
+#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9
+#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa
+#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb
+#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc
+#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd
+#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe
+#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf
+#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10
+#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11
+#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12
+#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13
+#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14
+#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15
+#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16
+#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17
+#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18
+#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19
+#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a
+#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b
+#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c
+#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d
+#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e
+#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f
+#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L
+#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L
+#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L
+#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L
+#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L
+#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L
+#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L
+#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L
+#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L
+#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L
+#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L
+#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L
+#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L
+#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L
+#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L
+#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L
+#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L
+#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L
+#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L
+#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L
+#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L
+#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L
+#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L
+#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L
+#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L
+#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L
+#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L
+#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L
+#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L
+#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L
+#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L
+#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L
+//IH_SCRATCH
+#define IH_SCRATCH__DATA__SHIFT 0x0
+#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL
+//IH_CLIENT_CREDIT_ERROR
+#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f
+#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L
+//IH_GPU_IOV_VIOLATION_LOG
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x16
+#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x17
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x18
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00400000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00800000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x1F000000L
+//IH_GPU_IOV_VIOLATION_LOG2
+#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//IH_COOKIE_REC_VIOLATION_LOG
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x8
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x10
+#define IH_COOKIE_REC_VIOLATION_LOG__DIE_ID__SHIFT 0x1a
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x0000FF00L
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0x03FF0000L
+#define IH_COOKIE_REC_VIOLATION_LOG__DIE_ID_MASK 0x3C000000L
+//IH_CREDIT_STATUS
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L
+//IH_MMHUB_ERROR
+#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1
+#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2
+#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7
+#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L
+#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L
+#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L
+//IH_VF_RB_STATUS3
+#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF_MASK 0x00FFFFFFL
+//IH_VF_RB_STATUS4
+#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF_MASK 0x00FFFFFFL
+//IH_VF_RB1_STATUS3
+#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF_MASK 0x00FFFFFFL
+//IH_MSI_STORM_CTRL
+#define IH_MSI_STORM_CTRL__DELAY__SHIFT 0x0
+#define IH_MSI_STORM_CTRL__DELAY_MASK 0x00000FFFL
+//IH_MSI_STORM_CLIENT_INDEX
+#define IH_MSI_STORM_CLIENT_INDEX__INDEX__SHIFT 0x0
+#define IH_MSI_STORM_CLIENT_INDEX__INDEX_MASK 0x00000007L
+//IH_MSI_STORM_CLIENT_DATA
+#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID__SHIFT 0x8
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10
+#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE__SHIFT 0x11
+#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID__SHIFT 0x1f
+#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MASK 0x0000FF00L
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L
+#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE_MASK 0x00020000L
+#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID_MASK 0x80000000L
+//IH_LAST_INT_INFO0
+#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8
+#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18
+#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f
+#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL
+#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L
+#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L
+#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L
+//IH_LAST_INT_INFO1
+#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//IH_LAST_INT_INFO2
+#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO2__VF__SHIFT 0x17
+#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL
+#define IH_LAST_INT_INFO2__VF_ID_MASK 0x001F0000L
+#define IH_LAST_INT_INFO2__VF_MASK 0x00800000L
+//IH_REGISTER_LAST_PART2
+#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//SEM_MAILBOX
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0
+#define SEM_MAILBOX__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL
+#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L
+//SEM_MAILBOX_CLEAR
+#define SEM_MAILBOX_CLEAR__CLEAR__SHIFT 0x0
+#define SEM_MAILBOX_CLEAR__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX_CLEAR__CLEAR_MASK 0x0000FFFFL
+#define SEM_MAILBOX_CLEAR__RESERVED_MASK 0xFFFF0000L
+//SEM_REGISTER_LAST_PART2
+#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//IH_ACTIVE_FCN_ID
+#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x5
+#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x0000001FL
+#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFE0L
+#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//IH_VIRT_RESET_REQ
+#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define IH_VIRT_RESET_REQ__VF_MASK 0x00FFFFFFL
+#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//IH_CLIENT_CFG
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000001FL
+//IH_RING1_CLIENT_CFG_INDEX
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L
+//IH_RING1_CLIENT_CFG_DATA
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L
+//IH_CLIENT_CFG_INDEX
+#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
+//IH_CLIENT_CFG_DATA
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x12
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18
+#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE__SHIFT 0x19
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x000C0000L
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L
+#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE_MASK 0x02000000L
+//IH_CLIENT_CFG_DATA2
+#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR__SHIFT 0x0
+#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR_MASK 0xFFFFFFFFL
+//IH_CID_REMAP_INDEX
+#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//IH_CID_REMAP_DATA
+#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x18
+#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0003FF00L
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0xFF000000L
+//IH_CHICKEN
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define IH_CHICKEN__DBGU_TRIGGER_ENABLE__SHIFT 0x1
+#define IH_CHICKEN__CROSS_TRIGGER_ENABLE__SHIFT 0x2
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4
+#define IH_CHICKEN__REG_FIREWALL_ENABLE__SHIFT 0x5
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define IH_CHICKEN__DBGU_TRIGGER_ENABLE_MASK 0x00000002L
+#define IH_CHICKEN__CROSS_TRIGGER_ENABLE_MASK 0x00000004L
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L
+#define IH_CHICKEN__REG_FIREWALL_ENABLE_MASK 0x00000020L
+//IH_INT_DROP_CNTL
+#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3
+#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5
+#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8
+#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10
+#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L
+#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L
+#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L
+#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L
+//IH_INT_DROP_MATCH_VALUE0
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x001F0000L
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_VALUE1
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL
+//IH_INT_DROP_MATCH_MASK0
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x001F0000L
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_MASK1
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL
+//IH_MMHUB_CNTL
+#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0
+#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc
+#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL
+#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000F00L
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x0000F000L
+//IH_REGISTER_LAST_PART1
+#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
new file mode 100644
index 000000000000..14574112c469
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
@@ -0,0 +1,1672 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _vcn_5_0_0_OFFSET_HEADER
+#define _vcn_5_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: uvd_uvddec
+// base address: 0x1fc00
+#define regUVD_TOP_CTRL 0x0100
+#define regUVD_TOP_CTRL_BASE_IDX 1
+#define regUVD_CGC_GATE 0x0101
+#define regUVD_CGC_GATE_BASE_IDX 1
+#define regUVD_CGC_CTRL 0x0102
+#define regUVD_CGC_CTRL_BASE_IDX 1
+#define regAVM_SUVD_CGC_GATE 0x0104
+#define regAVM_SUVD_CGC_GATE_BASE_IDX 1
+#define regEFC_SUVD_CGC_GATE 0x0104
+#define regEFC_SUVD_CGC_GATE_BASE_IDX 1
+#define regENT_SUVD_CGC_GATE 0x0104
+#define regENT_SUVD_CGC_GATE_BASE_IDX 1
+#define regIME_SUVD_CGC_GATE 0x0104
+#define regIME_SUVD_CGC_GATE_BASE_IDX 1
+#define regPPU_SUVD_CGC_GATE 0x0104
+#define regPPU_SUVD_CGC_GATE_BASE_IDX 1
+#define regSAOE_SUVD_CGC_GATE 0x0104
+#define regSAOE_SUVD_CGC_GATE_BASE_IDX 1
+#define regSCM_SUVD_CGC_GATE 0x0104
+#define regSCM_SUVD_CGC_GATE_BASE_IDX 1
+#define regSDB_SUVD_CGC_GATE 0x0104
+#define regSDB_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT0_NXT_SUVD_CGC_GATE 0x0104
+#define regSIT0_NXT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT1_NXT_SUVD_CGC_GATE 0x0104
+#define regSIT1_NXT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT2_NXT_SUVD_CGC_GATE 0x0104
+#define regSIT2_NXT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT_SUVD_CGC_GATE 0x0104
+#define regSIT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSMPA_SUVD_CGC_GATE 0x0104
+#define regSMPA_SUVD_CGC_GATE_BASE_IDX 1
+#define regSMP_SUVD_CGC_GATE 0x0104
+#define regSMP_SUVD_CGC_GATE_BASE_IDX 1
+#define regSRE_SUVD_CGC_GATE 0x0104
+#define regSRE_SUVD_CGC_GATE_BASE_IDX 1
+#define regUVD_SUVD_CGC_GATE 0x0104
+#define regUVD_SUVD_CGC_GATE_BASE_IDX 1
+#define regAVM_SUVD_CGC_GATE2 0x0105
+#define regAVM_SUVD_CGC_GATE2_BASE_IDX 1
+#define regDBR_SUVD_CGC_GATE2 0x0105
+#define regDBR_SUVD_CGC_GATE2_BASE_IDX 1
+#define regENT_SUVD_CGC_GATE2 0x0105
+#define regENT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regIME_SUVD_CGC_GATE2 0x0105
+#define regIME_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSAOE_SUVD_CGC_GATE2 0x0105
+#define regSAOE_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSDB_SUVD_CGC_GATE2 0x0105
+#define regSDB_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT0_NXT_SUVD_CGC_GATE2 0x0105
+#define regSIT0_NXT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT1_NXT_SUVD_CGC_GATE2 0x0105
+#define regSIT1_NXT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT2_NXT_SUVD_CGC_GATE2 0x0105
+#define regSIT2_NXT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT_SUVD_CGC_GATE2 0x0105
+#define regSIT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSMPA_SUVD_CGC_GATE2 0x0105
+#define regSMPA_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSMP_SUVD_CGC_GATE2 0x0105
+#define regSMP_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSRE_SUVD_CGC_GATE2 0x0105
+#define regSRE_SUVD_CGC_GATE2_BASE_IDX 1
+#define regUVD_SUVD_CGC_GATE2 0x0105
+#define regUVD_SUVD_CGC_GATE2_BASE_IDX 1
+#define regAVM_SUVD_CGC_CTRL 0x0106
+#define regAVM_SUVD_CGC_CTRL_BASE_IDX 1
+#define regDBR_SUVD_CGC_CTRL 0x0106
+#define regDBR_SUVD_CGC_CTRL_BASE_IDX 1
+#define regEFC_SUVD_CGC_CTRL 0x0106
+#define regEFC_SUVD_CGC_CTRL_BASE_IDX 1
+#define regENT_SUVD_CGC_CTRL 0x0106
+#define regENT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regIME_SUVD_CGC_CTRL 0x0106
+#define regIME_SUVD_CGC_CTRL_BASE_IDX 1
+#define regPPU_SUVD_CGC_CTRL 0x0106
+#define regPPU_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSAOE_SUVD_CGC_CTRL 0x0106
+#define regSAOE_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSCM_SUVD_CGC_CTRL 0x0106
+#define regSCM_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSDB_SUVD_CGC_CTRL 0x0106
+#define regSDB_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT0_NXT_SUVD_CGC_CTRL 0x0106
+#define regSIT0_NXT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT1_NXT_SUVD_CGC_CTRL 0x0106
+#define regSIT1_NXT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT2_NXT_SUVD_CGC_CTRL 0x0106
+#define regSIT2_NXT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT_SUVD_CGC_CTRL 0x0106
+#define regSIT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSMPA_SUVD_CGC_CTRL 0x0106
+#define regSMPA_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSMP_SUVD_CGC_CTRL 0x0106
+#define regSMP_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSRE_SUVD_CGC_CTRL 0x0106
+#define regSRE_SUVD_CGC_CTRL_BASE_IDX 1
+#define regUVD_SUVD_CGC_CTRL 0x0106
+#define regUVD_SUVD_CGC_CTRL_BASE_IDX 1
+#define regUVD_CGC_CTRL3 0x010a
+#define regUVD_CGC_CTRL3_BASE_IDX 1
+#define regUVD_GPCOM_VCPU_DATA0 0x0110
+#define regUVD_GPCOM_VCPU_DATA0_BASE_IDX 1
+#define regUVD_GPCOM_VCPU_DATA1 0x0111
+#define regUVD_GPCOM_VCPU_DATA1_BASE_IDX 1
+#define regUVD_GPCOM_SYS_CMD 0x0112
+#define regUVD_GPCOM_SYS_CMD_BASE_IDX 1
+#define regUVD_GPCOM_SYS_DATA0 0x0113
+#define regUVD_GPCOM_SYS_DATA0_BASE_IDX 1
+#define regUVD_GPCOM_SYS_DATA1 0x0114
+#define regUVD_GPCOM_SYS_DATA1_BASE_IDX 1
+#define regUVD_VCPU_INT_EN 0x0115
+#define regUVD_VCPU_INT_EN_BASE_IDX 1
+#define regUVD_VCPU_INT_STATUS 0x0116
+#define regUVD_VCPU_INT_STATUS_BASE_IDX 1
+#define regUVD_VCPU_INT_ACK 0x0117
+#define regUVD_VCPU_INT_ACK_BASE_IDX 1
+#define regUVD_VCPU_INT_ROUTE 0x0118
+#define regUVD_VCPU_INT_ROUTE_BASE_IDX 1
+#define regUVD_DRV_FW_MSG 0x0119
+#define regUVD_DRV_FW_MSG_BASE_IDX 1
+#define regUVD_FW_DRV_MSG_ACK 0x011a
+#define regUVD_FW_DRV_MSG_ACK_BASE_IDX 1
+#define regUVD_SUVD_INT_EN 0x011b
+#define regUVD_SUVD_INT_EN_BASE_IDX 1
+#define regUVD_SUVD_INT_STATUS 0x011c
+#define regUVD_SUVD_INT_STATUS_BASE_IDX 1
+#define regUVD_SUVD_INT_ACK 0x011d
+#define regUVD_SUVD_INT_ACK_BASE_IDX 1
+#define regUVD_ENC_VCPU_INT_EN 0x011e
+#define regUVD_ENC_VCPU_INT_EN_BASE_IDX 1
+#define regUVD_ENC_VCPU_INT_STATUS 0x011f
+#define regUVD_ENC_VCPU_INT_STATUS_BASE_IDX 1
+#define regUVD_ENC_VCPU_INT_ACK 0x0120
+#define regUVD_ENC_VCPU_INT_ACK_BASE_IDX 1
+#define regUVD_MASTINT_EN 0x0121
+#define regUVD_MASTINT_EN_BASE_IDX 1
+#define regUVD_SYS_INT_EN 0x0122
+#define regUVD_SYS_INT_EN_BASE_IDX 1
+#define regUVD_SYS_INT_STATUS 0x0123
+#define regUVD_SYS_INT_STATUS_BASE_IDX 1
+#define regUVD_SYS_INT_ACK 0x0124
+#define regUVD_SYS_INT_ACK_BASE_IDX 1
+#define regUVD_JOB_DONE 0x0125
+#define regUVD_JOB_DONE_BASE_IDX 1
+#define regUVD_CBUF_ID 0x0126
+#define regUVD_CBUF_ID_BASE_IDX 1
+#define regUVD_CONTEXT_ID 0x0127
+#define regUVD_CONTEXT_ID_BASE_IDX 1
+#define regUVD_CONTEXT_ID2 0x0128
+#define regUVD_CONTEXT_ID2_BASE_IDX 1
+#define regUVD_NO_OP 0x0129
+#define regUVD_NO_OP_BASE_IDX 1
+#define regUVD_RB_BASE_LO 0x012a
+#define regUVD_RB_BASE_LO_BASE_IDX 1
+#define regUVD_RB_BASE_HI 0x012b
+#define regUVD_RB_BASE_HI_BASE_IDX 1
+#define regUVD_RB_SIZE 0x012c
+#define regUVD_RB_SIZE_BASE_IDX 1
+#define regUVD_RB_BASE_LO2 0x012f
+#define regUVD_RB_BASE_LO2_BASE_IDX 1
+#define regUVD_RB_BASE_HI2 0x0130
+#define regUVD_RB_BASE_HI2_BASE_IDX 1
+#define regUVD_RB_SIZE2 0x0131
+#define regUVD_RB_SIZE2_BASE_IDX 1
+#define regUVD_RB_BASE_LO3 0x0134
+#define regUVD_RB_BASE_LO3_BASE_IDX 1
+#define regUVD_RB_BASE_HI3 0x0135
+#define regUVD_RB_BASE_HI3_BASE_IDX 1
+#define regUVD_RB_SIZE3 0x0136
+#define regUVD_RB_SIZE3_BASE_IDX 1
+#define regUVD_RB_BASE_LO4 0x0139
+#define regUVD_RB_BASE_LO4_BASE_IDX 1
+#define regUVD_RB_BASE_HI4 0x013a
+#define regUVD_RB_BASE_HI4_BASE_IDX 1
+#define regUVD_RB_SIZE4 0x013b
+#define regUVD_RB_SIZE4_BASE_IDX 1
+#define regUVD_OUT_RB_BASE_LO 0x013e
+#define regUVD_OUT_RB_BASE_LO_BASE_IDX 1
+#define regUVD_OUT_RB_BASE_HI 0x013f
+#define regUVD_OUT_RB_BASE_HI_BASE_IDX 1
+#define regUVD_OUT_RB_SIZE 0x0140
+#define regUVD_OUT_RB_SIZE_BASE_IDX 1
+#define regUVD_IOV_ACTIVE_FCN_ID 0x0143
+#define regUVD_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regUVD_IOV_MAILBOX 0x0144
+#define regUVD_IOV_MAILBOX_BASE_IDX 1
+#define regUVD_IOV_MAILBOX_RESP 0x0145
+#define regUVD_IOV_MAILBOX_RESP_BASE_IDX 1
+#define regUVD_RB_ARB_CTRL 0x0146
+#define regUVD_RB_ARB_CTRL_BASE_IDX 1
+#define regUVD_CTX_INDEX 0x0147
+#define regUVD_CTX_INDEX_BASE_IDX 1
+#define regUVD_CTX_DATA 0x0148
+#define regUVD_CTX_DATA_BASE_IDX 1
+#define regUVD_CXW_WR 0x0149
+#define regUVD_CXW_WR_BASE_IDX 1
+#define regUVD_CXW_WR_INT_ID 0x014a
+#define regUVD_CXW_WR_INT_ID_BASE_IDX 1
+#define regUVD_CXW_WR_INT_CTX_ID 0x014b
+#define regUVD_CXW_WR_INT_CTX_ID_BASE_IDX 1
+#define regUVD_CXW_INT_ID 0x014c
+#define regUVD_CXW_INT_ID_BASE_IDX 1
+#define regUVD_MPEG2_ERROR 0x014d
+#define regUVD_MPEG2_ERROR_BASE_IDX 1
+#define regUVD_YBASE 0x0150
+#define regUVD_YBASE_BASE_IDX 1
+#define regUVD_UVBASE 0x0151
+#define regUVD_UVBASE_BASE_IDX 1
+#define regUVD_PITCH 0x0152
+#define regUVD_PITCH_BASE_IDX 1
+#define regUVD_WIDTH 0x0153
+#define regUVD_WIDTH_BASE_IDX 1
+#define regUVD_HEIGHT 0x0154
+#define regUVD_HEIGHT_BASE_IDX 1
+#define regUVD_PICCOUNT 0x0155
+#define regUVD_PICCOUNT_BASE_IDX 1
+#define regUVD_MPRD_INITIAL_XY 0x0156
+#define regUVD_MPRD_INITIAL_XY_BASE_IDX 1
+#define regUVD_MPEG2_CTRL 0x0157
+#define regUVD_MPEG2_CTRL_BASE_IDX 1
+#define regUVD_MB_CTL_BUF_BASE 0x0158
+#define regUVD_MB_CTL_BUF_BASE_BASE_IDX 1
+#define regUVD_PIC_CTL_BUF_BASE 0x0159
+#define regUVD_PIC_CTL_BUF_BASE_BASE_IDX 1
+#define regUVD_DXVA_BUF_SIZE 0x015a
+#define regUVD_DXVA_BUF_SIZE_BASE_IDX 1
+#define regUVD_SCRATCH_NP 0x015b
+#define regUVD_SCRATCH_NP_BASE_IDX 1
+#define regUVD_CLK_SWT_HANDSHAKE 0x015c
+#define regUVD_CLK_SWT_HANDSHAKE_BASE_IDX 1
+#define regUVD_GP_SCRATCH0 0x015e
+#define regUVD_GP_SCRATCH0_BASE_IDX 1
+#define regUVD_GP_SCRATCH1 0x015f
+#define regUVD_GP_SCRATCH1_BASE_IDX 1
+#define regUVD_GP_SCRATCH2 0x0160
+#define regUVD_GP_SCRATCH2_BASE_IDX 1
+#define regUVD_GP_SCRATCH3 0x0161
+#define regUVD_GP_SCRATCH3_BASE_IDX 1
+#define regUVD_GP_SCRATCH4 0x0162
+#define regUVD_GP_SCRATCH4_BASE_IDX 1
+#define regUVD_GP_SCRATCH5 0x0163
+#define regUVD_GP_SCRATCH5_BASE_IDX 1
+#define regUVD_GP_SCRATCH6 0x0164
+#define regUVD_GP_SCRATCH6_BASE_IDX 1
+#define regUVD_GP_SCRATCH7 0x0165
+#define regUVD_GP_SCRATCH7_BASE_IDX 1
+#define regUVD_GP_SCRATCH8 0x0166
+#define regUVD_GP_SCRATCH8_BASE_IDX 1
+#define regUVD_GP_SCRATCH9 0x0167
+#define regUVD_GP_SCRATCH9_BASE_IDX 1
+#define regUVD_GP_SCRATCH10 0x0168
+#define regUVD_GP_SCRATCH10_BASE_IDX 1
+#define regUVD_GP_SCRATCH11 0x0169
+#define regUVD_GP_SCRATCH11_BASE_IDX 1
+#define regUVD_GP_SCRATCH12 0x016a
+#define regUVD_GP_SCRATCH12_BASE_IDX 1
+#define regUVD_GP_SCRATCH13 0x016b
+#define regUVD_GP_SCRATCH13_BASE_IDX 1
+#define regUVD_GP_SCRATCH14 0x016c
+#define regUVD_GP_SCRATCH14_BASE_IDX 1
+#define regUVD_GP_SCRATCH15 0x016d
+#define regUVD_GP_SCRATCH15_BASE_IDX 1
+#define regUVD_GP_SCRATCH16 0x016e
+#define regUVD_GP_SCRATCH16_BASE_IDX 1
+#define regUVD_GP_SCRATCH17 0x016f
+#define regUVD_GP_SCRATCH17_BASE_IDX 1
+#define regUVD_GP_SCRATCH18 0x0170
+#define regUVD_GP_SCRATCH18_BASE_IDX 1
+#define regUVD_GP_SCRATCH19 0x0171
+#define regUVD_GP_SCRATCH19_BASE_IDX 1
+#define regUVD_GP_SCRATCH20 0x0172
+#define regUVD_GP_SCRATCH20_BASE_IDX 1
+#define regUVD_GP_SCRATCH21 0x0173
+#define regUVD_GP_SCRATCH21_BASE_IDX 1
+#define regUVD_GP_SCRATCH22 0x0174
+#define regUVD_GP_SCRATCH22_BASE_IDX 1
+#define regUVD_GP_SCRATCH23 0x0175
+#define regUVD_GP_SCRATCH23_BASE_IDX 1
+#define regUVD_AUDIO_RB_BASE_LO 0x0176
+#define regUVD_AUDIO_RB_BASE_LO_BASE_IDX 1
+#define regUVD_AUDIO_RB_BASE_HI 0x0177
+#define regUVD_AUDIO_RB_BASE_HI_BASE_IDX 1
+#define regUVD_AUDIO_RB_SIZE 0x0178
+#define regUVD_AUDIO_RB_SIZE_BASE_IDX 1
+#define regUVD_VCPU_INT_STATUS2 0x017b
+#define regUVD_VCPU_INT_STATUS2_BASE_IDX 1
+#define regUVD_VCPU_INT_ACK2 0x017c
+#define regUVD_VCPU_INT_ACK2_BASE_IDX 1
+#define regUVD_VCPU_INT_EN2 0x017d
+#define regUVD_VCPU_INT_EN2_BASE_IDX 1
+#define regUVD_SUVD_CGC_STATUS2 0x017e
+#define regUVD_SUVD_CGC_STATUS2_BASE_IDX 1
+#define regUVD_SUVD_INT_STATUS2 0x0180
+#define regUVD_SUVD_INT_STATUS2_BASE_IDX 1
+#define regUVD_SUVD_INT_EN2 0x0181
+#define regUVD_SUVD_INT_EN2_BASE_IDX 1
+#define regUVD_SUVD_INT_ACK2 0x0182
+#define regUVD_SUVD_INT_ACK2_BASE_IDX 1
+#define regUVD_STATUS 0x0183
+#define regUVD_STATUS_BASE_IDX 1
+#define regUVD_ENC_PIPE_BUSY 0x0184
+#define regUVD_ENC_PIPE_BUSY_BASE_IDX 1
+#define regUVD_FW_POWER_STATUS 0x0185
+#define regUVD_FW_POWER_STATUS_BASE_IDX 1
+#define regUVD_CNTL 0x0186
+#define regUVD_CNTL_BASE_IDX 1
+#define regUVD_SOFT_RESET 0x0187
+#define regUVD_SOFT_RESET_BASE_IDX 1
+#define regUVD_SOFT_RESET2 0x0188
+#define regUVD_SOFT_RESET2_BASE_IDX 1
+#define regUVD_MMSCH_SOFT_RESET 0x0189
+#define regUVD_MMSCH_SOFT_RESET_BASE_IDX 1
+#define regUVD_WIG_CTRL 0x018a
+#define regUVD_WIG_CTRL_BASE_IDX 1
+#define regUVD_CGC_STATUS 0x018c
+#define regUVD_CGC_STATUS_BASE_IDX 1
+#define regUVD_CGC_UDEC_STATUS 0x018e
+#define regUVD_CGC_UDEC_STATUS_BASE_IDX 1
+#define regUVD_SUVD_CGC_STATUS 0x0190
+#define regUVD_SUVD_CGC_STATUS_BASE_IDX 1
+#define regUVD_GPCOM_VCPU_CMD 0x0192
+#define regUVD_GPCOM_VCPU_CMD_BASE_IDX 1
+
+
+// addressBlock: uvd_vcn_cdefe_cdefe_broadcast_dec0
+// base address: 0x1fc00
+#define regCDEFE_SUVD_CGC_GATE 0x0104
+#define regCDEFE_SUVD_CGC_GATE_BASE_IDX 1
+#define regCDEFE_SUVD_CGC_GATE2 0x0105
+#define regCDEFE_SUVD_CGC_GATE2_BASE_IDX 1
+#define regCDEFE_SUVD_CGC_CTRL 0x0106
+#define regCDEFE_SUVD_CGC_CTRL_BASE_IDX 1
+
+
+// addressBlock: uvd_ecpudec
+// base address: 0x1ff00
+#define regUVD_VCPU_CACHE_OFFSET0 0x01c0
+#define regUVD_VCPU_CACHE_OFFSET0_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE0 0x01c1
+#define regUVD_VCPU_CACHE_SIZE0_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET1 0x01c2
+#define regUVD_VCPU_CACHE_OFFSET1_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE1 0x01c3
+#define regUVD_VCPU_CACHE_SIZE1_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET2 0x01c4
+#define regUVD_VCPU_CACHE_OFFSET2_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE2 0x01c5
+#define regUVD_VCPU_CACHE_SIZE2_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET3 0x01c6
+#define regUVD_VCPU_CACHE_OFFSET3_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE3 0x01c7
+#define regUVD_VCPU_CACHE_SIZE3_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET4 0x01c8
+#define regUVD_VCPU_CACHE_OFFSET4_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE4 0x01c9
+#define regUVD_VCPU_CACHE_SIZE4_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET5 0x01ca
+#define regUVD_VCPU_CACHE_OFFSET5_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE5 0x01cb
+#define regUVD_VCPU_CACHE_SIZE5_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET6 0x01cc
+#define regUVD_VCPU_CACHE_OFFSET6_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE6 0x01cd
+#define regUVD_VCPU_CACHE_SIZE6_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET7 0x01ce
+#define regUVD_VCPU_CACHE_OFFSET7_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE7 0x01cf
+#define regUVD_VCPU_CACHE_SIZE7_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET8 0x01d0
+#define regUVD_VCPU_CACHE_OFFSET8_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE8 0x01d1
+#define regUVD_VCPU_CACHE_SIZE8_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_OFFSET0 0x01d2
+#define regUVD_VCPU_NONCACHE_OFFSET0_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_SIZE0 0x01d3
+#define regUVD_VCPU_NONCACHE_SIZE0_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_OFFSET1 0x01d4
+#define regUVD_VCPU_NONCACHE_OFFSET1_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_SIZE1 0x01d5
+#define regUVD_VCPU_NONCACHE_SIZE1_BASE_IDX 1
+#define regUVD_VCPU_CNTL 0x01d6
+#define regUVD_VCPU_CNTL_BASE_IDX 1
+#define regUVD_VCPU_PRID 0x01d7
+#define regUVD_VCPU_PRID_BASE_IDX 1
+#define regUVD_VCPU_TRCE 0x01d8
+#define regUVD_VCPU_TRCE_BASE_IDX 1
+#define regUVD_VCPU_TRCE_RD 0x01d9
+#define regUVD_VCPU_TRCE_RD_BASE_IDX 1
+#define regUVD_VCPU_IND_INDEX 0x01db
+#define regUVD_VCPU_IND_INDEX_BASE_IDX 1
+#define regUVD_VCPU_IND_DATA 0x01dc
+#define regUVD_VCPU_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: uvd_lmi_adpdec
+// base address: 0x20290
+#define regUVD_LMI_RE_64BIT_BAR_LOW 0x02af
+#define regUVD_LMI_RE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_RE_64BIT_BAR_HIGH 0x02b0
+#define regUVD_LMI_RE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_IT_64BIT_BAR_LOW 0x02b1
+#define regUVD_LMI_IT_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_IT_64BIT_BAR_HIGH 0x02b2
+#define regUVD_LMI_IT_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MP_64BIT_BAR_LOW 0x02b3
+#define regUVD_LMI_MP_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MP_64BIT_BAR_HIGH 0x02b4
+#define regUVD_LMI_MP_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_CM_64BIT_BAR_LOW 0x02b5
+#define regUVD_LMI_CM_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_CM_64BIT_BAR_HIGH 0x02b6
+#define regUVD_LMI_CM_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_DB_64BIT_BAR_LOW 0x02b7
+#define regUVD_LMI_DB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_DB_64BIT_BAR_HIGH 0x02b8
+#define regUVD_LMI_DB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_DBW_64BIT_BAR_LOW 0x02b9
+#define regUVD_LMI_DBW_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_DBW_64BIT_BAR_HIGH 0x02ba
+#define regUVD_LMI_DBW_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_IDCT_64BIT_BAR_LOW 0x02bb
+#define regUVD_LMI_IDCT_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_IDCT_64BIT_BAR_HIGH 0x02bc
+#define regUVD_LMI_IDCT_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW 0x02bd
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH 0x02be
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW 0x02bf
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH 0x02c0
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW 0x02c1
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH 0x02c2
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x02c5
+#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x02c6
+#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x02c7
+#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH 0x02c8
+#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_LBSI_64BIT_BAR_LOW 0x02c9
+#define regUVD_LMI_LBSI_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_LBSI_64BIT_BAR_HIGH 0x02ca
+#define regUVD_LMI_LBSI_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW 0x02cb
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH 0x02cc
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW 0x02cd
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH 0x02ce
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x02cf
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x02d0
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_CENC_64BIT_BAR_LOW 0x02d1
+#define regUVD_LMI_CENC_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_CENC_64BIT_BAR_HIGH 0x02d2
+#define regUVD_LMI_CENC_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_SRE_64BIT_BAR_LOW 0x02d3
+#define regUVD_LMI_SRE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_SRE_64BIT_BAR_HIGH 0x02d4
+#define regUVD_LMI_SRE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW 0x02d5
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH 0x02d6
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW 0x02d7
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH 0x02d8
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW 0x02d9
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH 0x02da
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW 0x02dd
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH 0x02de
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW 0x02df
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH 0x02e0
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW 0x02e1
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH 0x02e2
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW 0x02e3
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH 0x02e4
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW 0x02e5
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH 0x02e6
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW 0x02e7
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH 0x02e8
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW 0x02e9
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH 0x02ea
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW 0x02eb
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH 0x02ec
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW 0x02ed
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH 0x02ee
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW 0x02ef
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH 0x02f0
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW 0x02f1
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH 0x02f2
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW 0x02fb
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH 0x02fc
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW 0x02fd
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH 0x02fe
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW 0x02ff
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH 0x0300
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW 0x0301
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH 0x0302
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW 0x0303
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH 0x0304
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW 0x0305
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH 0x0306
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW 0x0307
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH 0x0308
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW 0x0309
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH 0x030a
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW 0x030b
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH 0x030c
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW 0x030d
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH 0x030e
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_SPH_64BIT_BAR_HIGH 0x030f
+#define regUVD_LMI_SPH_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW 0x0318
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH 0x0319
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW 0x031a
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH 0x031b
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW 0x031c
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH 0x031d
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW 0x031e
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH 0x031f
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_ADP_ATOMIC_CONFIG 0x0321
+#define regUVD_ADP_ATOMIC_CONFIG_BASE_IDX 1
+#define regUVD_LMI_ARB_CTRL2 0x0322
+#define regUVD_LMI_ARB_CTRL2_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI 0x0327
+#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC_VMIDS_MULTI 0x0328
+#define regUVD_LMI_VCPU_NC_VMIDS_MULTI_BASE_IDX 1
+#define regUVD_LMI_LAT_CTRL 0x0329
+#define regUVD_LMI_LAT_CTRL_BASE_IDX 1
+#define regUVD_LMI_LAT_CNTR 0x032a
+#define regUVD_LMI_LAT_CNTR_BASE_IDX 1
+#define regUVD_LMI_AVG_LAT_CNTR 0x032b
+#define regUVD_LMI_AVG_LAT_CNTR_BASE_IDX 1
+#define regUVD_LMI_SPH 0x032c
+#define regUVD_LMI_SPH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_VMID 0x032d
+#define regUVD_LMI_VCPU_CACHE_VMID_BASE_IDX 1
+#define regUVD_LMI_CTRL2 0x032e
+#define regUVD_LMI_CTRL2_BASE_IDX 1
+#define regUVD_LMI_URGENT_CTRL 0x032f
+#define regUVD_LMI_URGENT_CTRL_BASE_IDX 1
+#define regUVD_LMI_CTRL 0x0330
+#define regUVD_LMI_CTRL_BASE_IDX 1
+#define regUVD_LMI_STATUS 0x0331
+#define regUVD_LMI_STATUS_BASE_IDX 1
+#define regUVD_LMI_PERFMON_CTRL 0x0334
+#define regUVD_LMI_PERFMON_CTRL_BASE_IDX 1
+#define regUVD_LMI_PERFMON_COUNT_LO 0x0335
+#define regUVD_LMI_PERFMON_COUNT_LO_BASE_IDX 1
+#define regUVD_LMI_PERFMON_COUNT_HI 0x0336
+#define regUVD_LMI_PERFMON_COUNT_HI_BASE_IDX 1
+#define regUVD_LMI_ADP_SWAP_CNTL 0x0337
+#define regUVD_LMI_ADP_SWAP_CNTL_BASE_IDX 1
+#define regUVD_LMI_RBC_RB_VMID 0x0338
+#define regUVD_LMI_RBC_RB_VMID_BASE_IDX 1
+#define regUVD_LMI_RBC_IB_VMID 0x0339
+#define regUVD_LMI_RBC_IB_VMID_BASE_IDX 1
+#define regUVD_LMI_MC_CREDITS 0x033a
+#define regUVD_LMI_MC_CREDITS_BASE_IDX 1
+#define regUVD_LMI_ADP_IND_INDEX 0x033e
+#define regUVD_LMI_ADP_IND_INDEX_BASE_IDX 1
+#define regUVD_LMI_ADP_IND_DATA 0x033f
+#define regUVD_LMI_ADP_IND_DATA_BASE_IDX 1
+#define regUVD_LMI_ADP_PF_EN 0x0340
+#define regUVD_LMI_ADP_PF_EN_BASE_IDX 1
+#define regUVD_LMI_PREF_CTRL 0x0342
+#define regUVD_LMI_PREF_CTRL_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jpeg0_jpegnpdec
+// base address: 0x20f00
+#define regUVD_JPEG_CNTL 0x05c0
+#define regUVD_JPEG_CNTL_BASE_IDX 1
+#define regUVD_JPEG_RB_BASE 0x05c1
+#define regUVD_JPEG_RB_BASE_BASE_IDX 1
+#define regUVD_JPEG_RB_WPTR 0x05c2
+#define regUVD_JPEG_RB_WPTR_BASE_IDX 1
+#define regUVD_JPEG_RB_RPTR 0x05c3
+#define regUVD_JPEG_RB_RPTR_BASE_IDX 1
+#define regUVD_JPEG_RB_SIZE 0x05c4
+#define regUVD_JPEG_RB_SIZE_BASE_IDX 1
+#define regUVD_JPEG_DEC_CNT 0x05c5
+#define regUVD_JPEG_DEC_CNT_BASE_IDX 1
+#define regUVD_JPEG_SPS_INFO 0x05c6
+#define regUVD_JPEG_SPS_INFO_BASE_IDX 1
+#define regUVD_JPEG_SPS1_INFO 0x05c7
+#define regUVD_JPEG_SPS1_INFO_BASE_IDX 1
+#define regUVD_JPEG_RE_TIMER 0x05c8
+#define regUVD_JPEG_RE_TIMER_BASE_IDX 1
+#define regUVD_JPEG_DEC_SCRATCH0 0x05c9
+#define regUVD_JPEG_DEC_SCRATCH0_BASE_IDX 1
+#define regUVD_JPEG_INT_EN 0x05ca
+#define regUVD_JPEG_INT_EN_BASE_IDX 1
+#define regUVD_JPEG_INT_STAT 0x05cb
+#define regUVD_JPEG_INT_STAT_BASE_IDX 1
+#define regUVD_JPEG_TIER_CNTL0 0x05cc
+#define regUVD_JPEG_TIER_CNTL0_BASE_IDX 1
+#define regUVD_JPEG_TIER_CNTL1 0x05cd
+#define regUVD_JPEG_TIER_CNTL1_BASE_IDX 1
+#define regUVD_JPEG_TIER_CNTL2 0x05ce
+#define regUVD_JPEG_TIER_CNTL2_BASE_IDX 1
+#define regUVD_JPEG_TIER_STATUS 0x05cf
+#define regUVD_JPEG_TIER_STATUS_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jpeg_sclk0_jpegnpsclkdec
+// base address: 0x21000
+#define regUVD_JPEG_OUTBUF_CNTL 0x0600
+#define regUVD_JPEG_OUTBUF_CNTL_BASE_IDX 1
+#define regUVD_JPEG_OUTBUF_WPTR 0x0601
+#define regUVD_JPEG_OUTBUF_WPTR_BASE_IDX 1
+#define regUVD_JPEG_OUTBUF_RPTR 0x0602
+#define regUVD_JPEG_OUTBUF_RPTR_BASE_IDX 1
+#define regUVD_JPEG_PITCH 0x0603
+#define regUVD_JPEG_PITCH_BASE_IDX 1
+#define regUVD_JPEG_UV_PITCH 0x0604
+#define regUVD_JPEG_UV_PITCH_BASE_IDX 1
+#define regJPEG_DEC_Y_GFX8_TILING_SURFACE 0x0605
+#define regJPEG_DEC_Y_GFX8_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_UV_GFX8_TILING_SURFACE 0x0606
+#define regJPEG_DEC_UV_GFX8_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_GFX8_ADDR_CONFIG 0x0607
+#define regJPEG_DEC_GFX8_ADDR_CONFIG_BASE_IDX 1
+#define regJPEG_DEC_Y_GFX10_TILING_SURFACE 0x0608
+#define regJPEG_DEC_Y_GFX10_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_UV_GFX10_TILING_SURFACE 0x0609
+#define regJPEG_DEC_UV_GFX10_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_GFX10_ADDR_CONFIG 0x060a
+#define regJPEG_DEC_GFX10_ADDR_CONFIG_BASE_IDX 1
+#define regJPEG_DEC_ADDR_MODE 0x060b
+#define regJPEG_DEC_ADDR_MODE_BASE_IDX 1
+#define regUVD_JPEG_OUTPUT_XY 0x060c
+#define regUVD_JPEG_OUTPUT_XY_BASE_IDX 1
+#define regUVD_JPEG_GPCOM_CMD 0x060d
+#define regUVD_JPEG_GPCOM_CMD_BASE_IDX 1
+#define regUVD_JPEG_GPCOM_DATA0 0x060e
+#define regUVD_JPEG_GPCOM_DATA0_BASE_IDX 1
+#define regUVD_JPEG_GPCOM_DATA1 0x060f
+#define regUVD_JPEG_GPCOM_DATA1_BASE_IDX 1
+#define regUVD_JPEG_SCRATCH1 0x0610
+#define regUVD_JPEG_SCRATCH1_BASE_IDX 1
+#define regUVD_JPEG_DEC_SOFT_RST 0x0611
+#define regUVD_JPEG_DEC_SOFT_RST_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jrbc0_uvd_jrbc_dec
+// base address: 0x21100
+#define regUVD_JRBC_RB_WPTR 0x0640
+#define regUVD_JRBC_RB_WPTR_BASE_IDX 1
+#define regUVD_JRBC_RB_CNTL 0x0641
+#define regUVD_JRBC_RB_CNTL_BASE_IDX 1
+#define regUVD_JRBC_IB_SIZE 0x0642
+#define regUVD_JRBC_IB_SIZE_BASE_IDX 1
+#define regUVD_JRBC_URGENT_CNTL 0x0643
+#define regUVD_JRBC_URGENT_CNTL_BASE_IDX 1
+#define regUVD_JRBC_RB_REF_DATA 0x0644
+#define regUVD_JRBC_RB_REF_DATA_BASE_IDX 1
+#define regUVD_JRBC_RB_COND_RD_TIMER 0x0645
+#define regUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 1
+#define regUVD_JRBC_SOFT_RESET 0x0648
+#define regUVD_JRBC_SOFT_RESET_BASE_IDX 1
+#define regUVD_JRBC_STATUS 0x0649
+#define regUVD_JRBC_STATUS_BASE_IDX 1
+#define regUVD_JRBC_RB_RPTR 0x064a
+#define regUVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JRBC_RB_BUF_STATUS 0x064b
+#define regUVD_JRBC_RB_BUF_STATUS_BASE_IDX 1
+#define regUVD_JRBC_IB_BUF_STATUS 0x064c
+#define regUVD_JRBC_IB_BUF_STATUS_BASE_IDX 1
+#define regUVD_JRBC_IB_SIZE_UPDATE 0x064d
+#define regUVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 1
+#define regUVD_JRBC_IB_COND_RD_TIMER 0x064e
+#define regUVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 1
+#define regUVD_JRBC_IB_REF_DATA 0x064f
+#define regUVD_JRBC_IB_REF_DATA_BASE_IDX 1
+#define regUVD_JPEG_PREEMPT_CMD 0x0650
+#define regUVD_JPEG_PREEMPT_CMD_BASE_IDX 1
+#define regUVD_JPEG_PREEMPT_FENCE_DATA0 0x0651
+#define regUVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 1
+#define regUVD_JPEG_PREEMPT_FENCE_DATA1 0x0652
+#define regUVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 1
+#define regUVD_JRBC_RB_SIZE 0x0653
+#define regUVD_JRBC_RB_SIZE_BASE_IDX 1
+#define regUVD_JRBC_SCRATCH0 0x0654
+#define regUVD_JRBC_SCRATCH0_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jmi0_uvd_jmi_dec
+// base address: 0x21180
+#define regUVD_JPEG_DEC_PF_CTRL 0x0660
+#define regUVD_JPEG_DEC_PF_CTRL_BASE_IDX 1
+#define regUVD_LMI_JRBC_CTRL 0x0661
+#define regUVD_LMI_JRBC_CTRL_BASE_IDX 1
+#define regUVD_LMI_JPEG_CTRL 0x0662
+#define regUVD_LMI_JPEG_CTRL_BASE_IDX 1
+#define regJPEG_LMI_DROP 0x0663
+#define regJPEG_LMI_DROP_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_VMID 0x0664
+#define regUVD_LMI_JRBC_IB_VMID_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_VMID 0x0665
+#define regUVD_LMI_JRBC_RB_VMID_BASE_IDX 1
+#define regUVD_LMI_JPEG_VMID 0x0666
+#define regUVD_LMI_JPEG_VMID_BASE_IDX 1
+#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0667
+#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0668
+#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0669
+#define regUVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x066a
+#define regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x066b
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x066c
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JPEG_PREEMPT_VMID 0x066d
+#define regUVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 1
+#define regUVD_JMI_DEC_SWAP_CNTL 0x066e
+#define regUVD_JMI_DEC_SWAP_CNTL_BASE_IDX 1
+#define regUVD_JMI_ATOMIC_CNTL 0x066f
+#define regUVD_JMI_ATOMIC_CNTL_BASE_IDX 1
+#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0670
+#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0671
+#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0672
+#define regUVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0673
+#define regUVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0674
+#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0675
+#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0676
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0677
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0678
+#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0679
+#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI_ATOMIC_CNTL2 0x067d
+#define regUVD_JMI_ATOMIC_CNTL2_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jmi_common_dec
+// base address: 0x21300
+#define regUVD_JADP_MCIF_URGENT_CTRL 0x06c1
+#define regUVD_JADP_MCIF_URGENT_CTRL_BASE_IDX 1
+#define regUVD_JMI_URGENT_CTRL 0x06c2
+#define regUVD_JMI_URGENT_CTRL_BASE_IDX 1
+#define regUVD_JMI_CTRL 0x06c3
+#define regUVD_JMI_CTRL_BASE_IDX 1
+#define regJPEG_MEMCHECK_CLAMPING_CNTL 0x06c4
+#define regJPEG_MEMCHECK_CLAMPING_CNTL_BASE_IDX 1
+#define regJPEG_MEMCHECK_SAFE_ADDR 0x06c5
+#define regJPEG_MEMCHECK_SAFE_ADDR_BASE_IDX 1
+#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT 0x06c6
+#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT_BASE_IDX 1
+#define regUVD_JMI_LAT_CTRL 0x06c7
+#define regUVD_JMI_LAT_CTRL_BASE_IDX 1
+#define regUVD_JMI_LAT_CNTR 0x06c8
+#define regUVD_JMI_LAT_CNTR_BASE_IDX 1
+#define regUVD_JMI_AVG_LAT_CNTR 0x06c9
+#define regUVD_JMI_AVG_LAT_CNTR_BASE_IDX 1
+#define regUVD_JMI_PERFMON_CTRL 0x06ca
+#define regUVD_JMI_PERFMON_CTRL_BASE_IDX 1
+#define regUVD_JMI_PERFMON_COUNT_LO 0x06cb
+#define regUVD_JMI_PERFMON_COUNT_LO_BASE_IDX 1
+#define regUVD_JMI_PERFMON_COUNT_HI 0x06cc
+#define regUVD_JMI_PERFMON_COUNT_HI_BASE_IDX 1
+#define regUVD_JMI_CLEAN_STATUS 0x06cd
+#define regUVD_JMI_CLEAN_STATUS_BASE_IDX 1
+#define regUVD_JMI_CNTL 0x06ce
+#define regUVD_JMI_CNTL_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jpeg_common_dec
+// base address: 0x21400
+#define regJPEG_SOFT_RESET_STATUS 0x0700
+#define regJPEG_SOFT_RESET_STATUS_BASE_IDX 1
+#define regJPEG_SYS_INT_EN 0x0701
+#define regJPEG_SYS_INT_EN_BASE_IDX 1
+#define regJPEG_SYS_INT_EN1 0x0702
+#define regJPEG_SYS_INT_EN1_BASE_IDX 1
+#define regJPEG_SYS_INT_STATUS 0x0703
+#define regJPEG_SYS_INT_STATUS_BASE_IDX 1
+#define regJPEG_SYS_INT_STATUS1 0x0704
+#define regJPEG_SYS_INT_STATUS1_BASE_IDX 1
+#define regJPEG_SYS_INT_ACK 0x0705
+#define regJPEG_SYS_INT_ACK_BASE_IDX 1
+#define regJPEG_SYS_INT_ACK1 0x0706
+#define regJPEG_SYS_INT_ACK1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_EN 0x0707
+#define regJPEG_MEMCHECK_SYS_INT_EN_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_EN1 0x0708
+#define regJPEG_MEMCHECK_SYS_INT_EN1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_STAT 0x0709
+#define regJPEG_MEMCHECK_SYS_INT_STAT_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_STAT1 0x070a
+#define regJPEG_MEMCHECK_SYS_INT_STAT1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_STAT2 0x070b
+#define regJPEG_MEMCHECK_SYS_INT_STAT2_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_ACK 0x070c
+#define regJPEG_MEMCHECK_SYS_INT_ACK_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_ACK1 0x070d
+#define regJPEG_MEMCHECK_SYS_INT_ACK1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_ACK2 0x070e
+#define regJPEG_MEMCHECK_SYS_INT_ACK2_BASE_IDX 1
+#define regJPEG_MASTINT_EN 0x070f
+#define regJPEG_MASTINT_EN_BASE_IDX 1
+#define regJPEG_IH_CTRL 0x0710
+#define regJPEG_IH_CTRL_BASE_IDX 1
+#define regJRBBM_ARB_CTRL 0x0712
+#define regJRBBM_ARB_CTRL_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jpeg_common_sclk_dec
+// base address: 0x21480
+#define regJPEG_CGC_GATE 0x0720
+#define regJPEG_CGC_GATE_BASE_IDX 1
+#define regJPEG_CGC_CTRL 0x0721
+#define regJPEG_CGC_CTRL_BASE_IDX 1
+#define regJPEG_CGC_STATUS 0x0722
+#define regJPEG_CGC_STATUS_BASE_IDX 1
+#define regJPEG_COMN_CGC_MEM_CTRL 0x0723
+#define regJPEG_COMN_CGC_MEM_CTRL_BASE_IDX 1
+#define regJPEG_DEC_CGC_MEM_CTRL 0x0724
+#define regJPEG_DEC_CGC_MEM_CTRL_BASE_IDX 1
+#define regJPEG_ENC_CGC_MEM_CTRL 0x0726
+#define regJPEG_ENC_CGC_MEM_CTRL_BASE_IDX 1
+#define regJPEG_PERF_BANK_CONF 0x0727
+#define regJPEG_PERF_BANK_CONF_BASE_IDX 1
+#define regJPEG_PERF_BANK_EVENT_SEL 0x0728
+#define regJPEG_PERF_BANK_EVENT_SEL_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT0 0x0729
+#define regJPEG_PERF_BANK_COUNT0_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT1 0x072a
+#define regJPEG_PERF_BANK_COUNT1_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT2 0x072b
+#define regJPEG_PERF_BANK_COUNT2_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT3 0x072c
+#define regJPEG_PERF_BANK_COUNT3_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_pg_dec
+// base address: 0x1f800
+#define regUVD_IPX_DLDO_CONFIG 0x0000
+#define regUVD_IPX_DLDO_CONFIG_BASE_IDX 1
+#define regUVD_IPX_DLDO_STATUS 0x0001
+#define regUVD_IPX_DLDO_STATUS_BASE_IDX 1
+#define regUVD_POWER_STATUS 0x0002
+#define regUVD_POWER_STATUS_BASE_IDX 1
+#define regUVD_JPEG_POWER_STATUS 0x0003
+#define regUVD_JPEG_POWER_STATUS_BASE_IDX 1
+#define regUVD_MC_DJPEG_RD_SPACE 0x0007
+#define regUVD_MC_DJPEG_RD_SPACE_BASE_IDX 1
+#define regUVD_MC_DJPEG_WR_SPACE 0x0008
+#define regUVD_MC_DJPEG_WR_SPACE_BASE_IDX 1
+#define regUVD_PG_IND_INDEX 0x000c
+#define regUVD_PG_IND_INDEX_BASE_IDX 1
+#define regUVD_PG_IND_DATA 0x000e
+#define regUVD_PG_IND_DATA_BASE_IDX 1
+#define regCC_UVD_HARVESTING 0x000f
+#define regCC_UVD_HARVESTING_BASE_IDX 1
+#define regUVD_DPG_LMA_CTL 0x0011
+#define regUVD_DPG_LMA_CTL_BASE_IDX 1
+#define regUVD_DPG_LMA_DATA 0x0012
+#define regUVD_DPG_LMA_DATA_BASE_IDX 1
+#define regUVD_DPG_LMA_MASK 0x0013
+#define regUVD_DPG_LMA_MASK_BASE_IDX 1
+#define regUVD_DPG_PAUSE 0x0014
+#define regUVD_DPG_PAUSE_BASE_IDX 1
+#define regUVD_SCRATCH1 0x0015
+#define regUVD_SCRATCH1_BASE_IDX 1
+#define regUVD_SCRATCH2 0x0016
+#define regUVD_SCRATCH2_BASE_IDX 1
+#define regUVD_SCRATCH3 0x0017
+#define regUVD_SCRATCH3_BASE_IDX 1
+#define regUVD_SCRATCH4 0x0018
+#define regUVD_SCRATCH4_BASE_IDX 1
+#define regUVD_SCRATCH5 0x0019
+#define regUVD_SCRATCH5_BASE_IDX 1
+#define regUVD_SCRATCH6 0x001a
+#define regUVD_SCRATCH6_BASE_IDX 1
+#define regUVD_SCRATCH7 0x001b
+#define regUVD_SCRATCH7_BASE_IDX 1
+#define regUVD_SCRATCH8 0x001c
+#define regUVD_SCRATCH8_BASE_IDX 1
+#define regUVD_SCRATCH9 0x001d
+#define regUVD_SCRATCH9_BASE_IDX 1
+#define regUVD_SCRATCH10 0x001e
+#define regUVD_SCRATCH10_BASE_IDX 1
+#define regUVD_SCRATCH11 0x001f
+#define regUVD_SCRATCH11_BASE_IDX 1
+#define regUVD_SCRATCH12 0x0020
+#define regUVD_SCRATCH12_BASE_IDX 1
+#define regUVD_SCRATCH13 0x0021
+#define regUVD_SCRATCH13_BASE_IDX 1
+#define regUVD_SCRATCH14 0x0022
+#define regUVD_SCRATCH14_BASE_IDX 1
+#define regUVD_FREE_COUNTER_REG 0x0023
+#define regUVD_FREE_COUNTER_REG_BASE_IDX 1
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0024
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0025
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_DPG_VCPU_CACHE_OFFSET0 0x0026
+#define regUVD_DPG_VCPU_CACHE_OFFSET0_BASE_IDX 1
+#define regUVD_DPG_LMI_VCPU_CACHE_VMID 0x0027
+#define regUVD_DPG_LMI_VCPU_CACHE_VMID_BASE_IDX 1
+#define regUVD_REG_FILTER_EN 0x0028
+#define regUVD_REG_FILTER_EN_BASE_IDX 1
+#define regUVD_SECURITY_REG_VIO_REPORT 0x0029
+#define regUVD_SECURITY_REG_VIO_REPORT_BASE_IDX 1
+#define regUVD_FW_VERSION 0x002a
+#define regUVD_FW_VERSION_BASE_IDX 1
+#define regUVD_PF_STATUS 0x002c
+#define regUVD_PF_STATUS_BASE_IDX 1
+#define regUVD_DPG_CLK_EN_VCPU_REPORT 0x002e
+#define regUVD_DPG_CLK_EN_VCPU_REPORT_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO 0x002f
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI 0x0030
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO 0x0031
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI 0x0032
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR 0x0033
+#define regCC_UVD_VCPU_ERR_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_INST_ADDR_LO 0x0034
+#define regCC_UVD_VCPU_ERR_INST_ADDR_LO_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_INST_ADDR_HI 0x0035
+#define regCC_UVD_VCPU_ERR_INST_ADDR_HI_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC_SPACE 0x003d
+#define regUVD_LMI_MMSCH_NC_SPACE_BASE_IDX 1
+#define regUVD_LMI_ATOMIC_SPACE 0x003e
+#define regUVD_LMI_ATOMIC_SPACE_BASE_IDX 1
+#define regUVD_GFX8_ADDR_CONFIG 0x0041
+#define regUVD_GFX8_ADDR_CONFIG_BASE_IDX 1
+#define regUVD_GFX10_ADDR_CONFIG 0x0042
+#define regUVD_GFX10_ADDR_CONFIG_BASE_IDX 1
+#define regUVD_GPCNT2_CNTL 0x0043
+#define regUVD_GPCNT2_CNTL_BASE_IDX 1
+#define regUVD_GPCNT2_TARGET_LOWER 0x0044
+#define regUVD_GPCNT2_TARGET_LOWER_BASE_IDX 1
+#define regUVD_GPCNT2_STATUS_LOWER 0x0045
+#define regUVD_GPCNT2_STATUS_LOWER_BASE_IDX 1
+#define regUVD_GPCNT2_TARGET_UPPER 0x0046
+#define regUVD_GPCNT2_TARGET_UPPER_BASE_IDX 1
+#define regUVD_GPCNT2_STATUS_UPPER 0x0047
+#define regUVD_GPCNT2_STATUS_UPPER_BASE_IDX 1
+#define regUVD_GPCNT3_CNTL 0x0048
+#define regUVD_GPCNT3_CNTL_BASE_IDX 1
+#define regUVD_GPCNT3_TARGET_LOWER 0x0049
+#define regUVD_GPCNT3_TARGET_LOWER_BASE_IDX 1
+#define regUVD_GPCNT3_STATUS_LOWER 0x004a
+#define regUVD_GPCNT3_STATUS_LOWER_BASE_IDX 1
+#define regUVD_GPCNT3_TARGET_UPPER 0x004b
+#define regUVD_GPCNT3_TARGET_UPPER_BASE_IDX 1
+#define regUVD_GPCNT3_STATUS_UPPER 0x004c
+#define regUVD_GPCNT3_STATUS_UPPER_BASE_IDX 1
+#define regUVD_VCLK_DS_CNTL 0x004d
+#define regUVD_VCLK_DS_CNTL_BASE_IDX 1
+#define regUVD_DCLK_DS_CNTL 0x004e
+#define regUVD_DCLK_DS_CNTL_BASE_IDX 1
+#define regUVD_TSC_LOWER 0x004f
+#define regUVD_TSC_LOWER_BASE_IDX 1
+#define regUVD_TSC_UPPER 0x0050
+#define regUVD_TSC_UPPER_BASE_IDX 1
+#define regVCN_FEATURES 0x0051
+#define regVCN_FEATURES_BASE_IDX 1
+#define regUVD_GPUIOV_STATUS 0x0055
+#define regUVD_GPUIOV_STATUS_BASE_IDX 1
+#define regUVD_SCRATCH15 0x005c
+#define regUVD_SCRATCH15_BASE_IDX 1
+#define regUVD_VERSION 0x005d
+#define regUVD_VERSION_BASE_IDX 1
+#define regVCN_UMSCH_CNTL 0x005e
+#define regVCN_UMSCH_CNTL_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL 0x0068
+#define regVCN_JPEG_DB_CTRL_BASE_IDX 1
+#define regVCN_RB1_DB_CTRL 0x0072
+#define regVCN_RB1_DB_CTRL_BASE_IDX 1
+#define regVCN_RB2_DB_CTRL 0x0073
+#define regVCN_RB2_DB_CTRL_BASE_IDX 1
+#define regVCN_RB3_DB_CTRL 0x0074
+#define regVCN_RB3_DB_CTRL_BASE_IDX 1
+#define regVCN_RB4_DB_CTRL 0x0075
+#define regVCN_RB4_DB_CTRL_BASE_IDX 1
+#define regVCN_UMSCH_RB_DB_CTRL 0x0076
+#define regVCN_UMSCH_RB_DB_CTRL_BASE_IDX 1
+#define regVCN_RB_DB_CTRL 0x0077
+#define regVCN_RB_DB_CTRL_BASE_IDX 1
+#define regVCN_AGDB_CTRL0 0x0079
+#define regVCN_AGDB_CTRL0_BASE_IDX 1
+#define regVCN_AGDB_CTRL1 0x007a
+#define regVCN_AGDB_CTRL1_BASE_IDX 1
+#define regVCN_AGDB_CTRL2 0x007b
+#define regVCN_AGDB_CTRL2_BASE_IDX 1
+#define regVCN_AGDB_CTRL3 0x007c
+#define regVCN_AGDB_CTRL3_BASE_IDX 1
+#define regVCN_AGDB_CTRL4 0x007d
+#define regVCN_AGDB_CTRL4_BASE_IDX 1
+#define regVCN_AGDB_CTRL5 0x007e
+#define regVCN_AGDB_CTRL5_BASE_IDX 1
+#define regVCN_AGDB_MASK0 0x007f
+#define regVCN_AGDB_MASK0_BASE_IDX 1
+#define regVCN_AGDB_MASK1 0x0080
+#define regVCN_AGDB_MASK1_BASE_IDX 1
+#define regVCN_AGDB_MASK2 0x0081
+#define regVCN_AGDB_MASK2_BASE_IDX 1
+#define regVCN_AGDB_MASK3 0x0082
+#define regVCN_AGDB_MASK3_BASE_IDX 1
+#define regVCN_AGDB_MASK4 0x0083
+#define regVCN_AGDB_MASK4_BASE_IDX 1
+#define regVCN_AGDB_MASK5 0x0084
+#define regVCN_AGDB_MASK5_BASE_IDX 1
+#define regVCN_RB_ENABLE 0x0085
+#define regVCN_RB_ENABLE_BASE_IDX 1
+#define regVCN_RB_WPTR_CTRL 0x0086
+#define regVCN_RB_WPTR_CTRL_BASE_IDX 1
+#define regUVD_RB_RPTR 0x00ac
+#define regUVD_RB_RPTR_BASE_IDX 1
+#define regUVD_RB_WPTR 0x00ad
+#define regUVD_RB_WPTR_BASE_IDX 1
+#define regUVD_RB_RPTR2 0x00ae
+#define regUVD_RB_RPTR2_BASE_IDX 1
+#define regUVD_RB_WPTR2 0x00af
+#define regUVD_RB_WPTR2_BASE_IDX 1
+#define regUVD_RB_RPTR3 0x00b0
+#define regUVD_RB_RPTR3_BASE_IDX 1
+#define regUVD_RB_WPTR3 0x00b1
+#define regUVD_RB_WPTR3_BASE_IDX 1
+#define regUVD_RB_RPTR4 0x00b2
+#define regUVD_RB_RPTR4_BASE_IDX 1
+#define regUVD_RB_WPTR4 0x00b3
+#define regUVD_RB_WPTR4_BASE_IDX 1
+#define regUVD_OUT_RB_RPTR 0x00b4
+#define regUVD_OUT_RB_RPTR_BASE_IDX 1
+#define regUVD_OUT_RB_WPTR 0x00b5
+#define regUVD_OUT_RB_WPTR_BASE_IDX 1
+#define regUVD_AUDIO_RB_RPTR 0x00b6
+#define regUVD_AUDIO_RB_RPTR_BASE_IDX 1
+#define regUVD_AUDIO_RB_WPTR 0x00b7
+#define regUVD_AUDIO_RB_WPTR_BASE_IDX 1
+#define regUVD_RBC_RB_RPTR 0x00b8
+#define regUVD_RBC_RB_RPTR_BASE_IDX 1
+#define regUVD_RBC_RB_WPTR 0x00b9
+#define regUVD_RBC_RB_WPTR_BASE_IDX 1
+#define regUVD_DPG_LMA_CTL2 0x00bb
+#define regUVD_DPG_LMA_CTL2_BASE_IDX 1
+
+
+// addressBlock: uvd_vcn_umsch_dec
+// base address: 0x21500
+#define regVCN_UMSCH_MES_CNTL 0x0740
+#define regVCN_UMSCH_MES_CNTL_BASE_IDX 1
+#define regUMSCH_CTL 0x0741
+#define regUMSCH_CTL_BASE_IDX 1
+#define regUMSCH_CTL2 0x0742
+#define regUMSCH_CTL2_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR0 0x0743
+#define regVCN_UMSCH_AGDB_WPTR0_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR1 0x0744
+#define regVCN_UMSCH_AGDB_WPTR1_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR2 0x0745
+#define regVCN_UMSCH_AGDB_WPTR2_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR3 0x0746
+#define regVCN_UMSCH_AGDB_WPTR3_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR4 0x0747
+#define regVCN_UMSCH_AGDB_WPTR4_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR5 0x0748
+#define regVCN_UMSCH_AGDB_WPTR5_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX0 0x0749
+#define regVCN_UMSCH_MAILBOX0_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX_RESP0 0x074a
+#define regVCN_UMSCH_MAILBOX_RESP0_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX1 0x074b
+#define regVCN_UMSCH_MAILBOX1_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX_RESP1 0x074c
+#define regVCN_UMSCH_MAILBOX_RESP1_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX2 0x074d
+#define regVCN_UMSCH_MAILBOX2_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX_RESP2 0x074e
+#define regVCN_UMSCH_MAILBOX_RESP2_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX3 0x074f
+#define regVCN_UMSCH_MAILBOX3_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX_RESP3 0x0750
+#define regVCN_UMSCH_MAILBOX_RESP3_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER0 0x0751
+#define regVCN_UMSCH_SPARE_REGISTER0_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER1 0x0752
+#define regVCN_UMSCH_SPARE_REGISTER1_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER2 0x0753
+#define regVCN_UMSCH_SPARE_REGISTER2_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER3 0x0754
+#define regVCN_UMSCH_SPARE_REGISTER3_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER4 0x0755
+#define regVCN_UMSCH_SPARE_REGISTER4_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER5 0x0756
+#define regVCN_UMSCH_SPARE_REGISTER5_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER6 0x0757
+#define regVCN_UMSCH_SPARE_REGISTER6_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER7 0x0758
+#define regVCN_UMSCH_SPARE_REGISTER7_BASE_IDX 1
+#define regVCN_UMSCH_MES_UTCL1_CNTL 0x0759
+#define regVCN_UMSCH_MES_UTCL1_CNTL_BASE_IDX 1
+#define regVCN_UMSCH_MES_BUSY 0x075a
+#define regVCN_UMSCH_MES_BUSY_BASE_IDX 1
+#define regVCN_UMSCH_RB_BASE_LO 0x075b
+#define regVCN_UMSCH_RB_BASE_LO_BASE_IDX 1
+#define regVCN_UMSCH_RB_BASE_HI 0x075c
+#define regVCN_UMSCH_RB_BASE_HI_BASE_IDX 1
+#define regVCN_UMSCH_RB_SIZE 0x075d
+#define regVCN_UMSCH_RB_SIZE_BASE_IDX 1
+#define regVCN_UMSCH_RB_RPTR 0x075e
+#define regVCN_UMSCH_RB_RPTR_BASE_IDX 1
+#define regVCN_UMSCH_RB_WPTR 0x075f
+#define regVCN_UMSCH_RB_WPTR_BASE_IDX 1
+#define regVCN_UMSCH_MASTINT_EN 0x0760
+#define regVCN_UMSCH_MASTINT_EN_BASE_IDX 1
+#define regVCN_UMSCH_IH_CTRL 0x0761
+#define regVCN_UMSCH_IH_CTRL_BASE_IDX 1
+#define regVCN_UMSCH_SYS_INT_EN 0x0762
+#define regVCN_UMSCH_SYS_INT_EN_BASE_IDX 1
+#define regVCN_UMSCH_SYS_INT_STATUS 0x0763
+#define regVCN_UMSCH_SYS_INT_STATUS_BASE_IDX 1
+#define regVCN_UMSCH_SYS_INT_ACK 0x0764
+#define regVCN_UMSCH_SYS_INT_ACK_BASE_IDX 1
+#define regVCN_UMSCH_SYS_INT_SRC 0x0765
+#define regVCN_UMSCH_SYS_INT_SRC_BASE_IDX 1
+#define regVCN_UMSCH_IH_CTX_CTRL 0x0766
+#define regVCN_UMSCH_IH_CTX_CTRL_BASE_IDX 1
+#define regUVD_UMSCH_FORCE 0x076b
+#define regUVD_UMSCH_FORCE_BASE_IDX 1
+#define regUMSCH_MES_RESET_CTRL 0x0770
+#define regUMSCH_MES_RESET_CTRL_BASE_IDX 1
+
+
+// addressBlock: uvd_vcn_cprs64dec
+// base address: 0x21600
+#define regVCN_MES_PRGRM_CNTR_START 0x0780
+#define regVCN_MES_PRGRM_CNTR_START_BASE_IDX 1
+#define regVCN_MES_INTR_ROUTINE_START 0x0781
+#define regVCN_MES_INTR_ROUTINE_START_BASE_IDX 1
+#define regVCN_MES_MTVEC_LO 0x0781
+#define regVCN_MES_MTVEC_LO_BASE_IDX 1
+#define regVCN_MES_INTR_ROUTINE_START_HI 0x0782
+#define regVCN_MES_INTR_ROUTINE_START_HI_BASE_IDX 1
+#define regVCN_MES_MTVEC_HI 0x0782
+#define regVCN_MES_MTVEC_HI_BASE_IDX 1
+#define regVCN_MES_CNTL 0x0787
+#define regVCN_MES_CNTL_BASE_IDX 1
+#define regVCN_MES_PIPE_PRIORITY_CNTS 0x0788
+#define regVCN_MES_PIPE_PRIORITY_CNTS_BASE_IDX 1
+#define regVCN_MES_PIPE0_PRIORITY 0x0789
+#define regVCN_MES_PIPE0_PRIORITY_BASE_IDX 1
+#define regVCN_MES_PIPE1_PRIORITY 0x078a
+#define regVCN_MES_PIPE1_PRIORITY_BASE_IDX 1
+#define regVCN_MES_PIPE2_PRIORITY 0x078b
+#define regVCN_MES_PIPE2_PRIORITY_BASE_IDX 1
+#define regVCN_MES_PIPE3_PRIORITY 0x078c
+#define regVCN_MES_PIPE3_PRIORITY_BASE_IDX 1
+#define regVCN_MES_HEADER_DUMP 0x078d
+#define regVCN_MES_HEADER_DUMP_BASE_IDX 1
+#define regVCN_MES_MIE_LO 0x078e
+#define regVCN_MES_MIE_LO_BASE_IDX 1
+#define regVCN_MES_MIE_HI 0x078f
+#define regVCN_MES_MIE_HI_BASE_IDX 1
+#define regVCN_MES_INTERRUPT 0x0790
+#define regVCN_MES_INTERRUPT_BASE_IDX 1
+#define regVCN_MES_SCRATCH_INDEX 0x0791
+#define regVCN_MES_SCRATCH_INDEX_BASE_IDX 1
+#define regVCN_MES_SCRATCH_DATA 0x0792
+#define regVCN_MES_SCRATCH_DATA_BASE_IDX 1
+#define regVCN_MES_INSTR_PNTR 0x0793
+#define regVCN_MES_INSTR_PNTR_BASE_IDX 1
+#define regVCN_MES_MSCRATCH_HI 0x0794
+#define regVCN_MES_MSCRATCH_HI_BASE_IDX 1
+#define regVCN_MES_MSCRATCH_LO 0x0795
+#define regVCN_MES_MSCRATCH_LO_BASE_IDX 1
+#define regVCN_MES_MSTATUS_LO 0x0796
+#define regVCN_MES_MSTATUS_LO_BASE_IDX 1
+#define regVCN_MES_MSTATUS_HI 0x0797
+#define regVCN_MES_MSTATUS_HI_BASE_IDX 1
+#define regVCN_MES_MEPC_LO 0x0798
+#define regVCN_MES_MEPC_LO_BASE_IDX 1
+#define regVCN_MES_MEPC_HI 0x0799
+#define regVCN_MES_MEPC_HI_BASE_IDX 1
+#define regVCN_MES_MCAUSE_LO 0x079a
+#define regVCN_MES_MCAUSE_LO_BASE_IDX 1
+#define regVCN_MES_MCAUSE_HI 0x079b
+#define regVCN_MES_MCAUSE_HI_BASE_IDX 1
+#define regVCN_MES_MBADADDR_LO 0x079c
+#define regVCN_MES_MBADADDR_LO_BASE_IDX 1
+#define regVCN_MES_MBADADDR_HI 0x079d
+#define regVCN_MES_MBADADDR_HI_BASE_IDX 1
+#define regVCN_MES_MIP_LO 0x079e
+#define regVCN_MES_MIP_LO_BASE_IDX 1
+#define regVCN_MES_MIP_HI 0x079f
+#define regVCN_MES_MIP_HI_BASE_IDX 1
+#define regVCN_MES_IC_OP_CNTL 0x07a0
+#define regVCN_MES_IC_OP_CNTL_BASE_IDX 1
+#define regVCN_MES_MCYCLE_LO 0x07a6
+#define regVCN_MES_MCYCLE_LO_BASE_IDX 1
+#define regVCN_MES_MCYCLE_HI 0x07a7
+#define regVCN_MES_MCYCLE_HI_BASE_IDX 1
+#define regVCN_MES_MTIME_LO 0x07a8
+#define regVCN_MES_MTIME_LO_BASE_IDX 1
+#define regVCN_MES_MTIME_HI 0x07a9
+#define regVCN_MES_MTIME_HI_BASE_IDX 1
+#define regVCN_MES_MINSTRET_LO 0x07aa
+#define regVCN_MES_MINSTRET_LO_BASE_IDX 1
+#define regVCN_MES_MINSTRET_HI 0x07ab
+#define regVCN_MES_MINSTRET_HI_BASE_IDX 1
+#define regVCN_MES_MISA_LO 0x07ac
+#define regVCN_MES_MISA_LO_BASE_IDX 1
+#define regVCN_MES_MISA_HI 0x07ad
+#define regVCN_MES_MISA_HI_BASE_IDX 1
+#define regVCN_MES_MVENDORID_LO 0x07ae
+#define regVCN_MES_MVENDORID_LO_BASE_IDX 1
+#define regVCN_MES_MVENDORID_HI 0x07af
+#define regVCN_MES_MVENDORID_HI_BASE_IDX 1
+#define regVCN_MES_MARCHID_LO 0x07b0
+#define regVCN_MES_MARCHID_LO_BASE_IDX 1
+#define regVCN_MES_MARCHID_HI 0x07b1
+#define regVCN_MES_MARCHID_HI_BASE_IDX 1
+#define regVCN_MES_MIMPID_LO 0x07b2
+#define regVCN_MES_MIMPID_LO_BASE_IDX 1
+#define regVCN_MES_MIMPID_HI 0x07b3
+#define regVCN_MES_MIMPID_HI_BASE_IDX 1
+#define regVCN_MES_MHARTID_LO 0x07b4
+#define regVCN_MES_MHARTID_LO_BASE_IDX 1
+#define regVCN_MES_MHARTID_HI 0x07b5
+#define regVCN_MES_MHARTID_HI_BASE_IDX 1
+#define regVCN_MES_DC_BASE_CNTL 0x07b6
+#define regVCN_MES_DC_BASE_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_OP_CNTL 0x07b7
+#define regVCN_MES_DC_OP_CNTL_BASE_IDX 1
+#define regVCN_MES_MTIMECMP_LO 0x07b8
+#define regVCN_MES_MTIMECMP_LO_BASE_IDX 1
+#define regVCN_MES_MTIMECMP_HI 0x07b9
+#define regVCN_MES_MTIMECMP_HI_BASE_IDX 1
+#define regVCN_MES_GP0_LO 0x07c3
+#define regVCN_MES_GP0_LO_BASE_IDX 1
+#define regVCN_MES_GP0_HI 0x07c4
+#define regVCN_MES_GP0_HI_BASE_IDX 1
+#define regVCN_MES_GP1_LO 0x07c5
+#define regVCN_MES_GP1_LO_BASE_IDX 1
+#define regVCN_MES_GP1_HI 0x07c6
+#define regVCN_MES_GP1_HI_BASE_IDX 1
+#define regVCN_MES_GP2_LO 0x07c7
+#define regVCN_MES_GP2_LO_BASE_IDX 1
+#define regVCN_MES_GP2_HI 0x07c8
+#define regVCN_MES_GP2_HI_BASE_IDX 1
+#define regVCN_MES_GP3_LO 0x07c9
+#define regVCN_MES_GP3_LO_BASE_IDX 1
+#define regVCN_MES_GP3_HI 0x07ca
+#define regVCN_MES_GP3_HI_BASE_IDX 1
+#define regVCN_MES_GP4_LO 0x07cb
+#define regVCN_MES_GP4_LO_BASE_IDX 1
+#define regVCN_MES_GP4_HI 0x07cc
+#define regVCN_MES_GP4_HI_BASE_IDX 1
+#define regVCN_MES_GP5_LO 0x07cd
+#define regVCN_MES_GP5_LO_BASE_IDX 1
+#define regVCN_MES_GP5_HI 0x07ce
+#define regVCN_MES_GP5_HI_BASE_IDX 1
+#define regVCN_MES_GP6_LO 0x07cf
+#define regVCN_MES_GP6_LO_BASE_IDX 1
+#define regVCN_MES_GP6_HI 0x07d0
+#define regVCN_MES_GP6_HI_BASE_IDX 1
+#define regVCN_MES_GP7_LO 0x07d1
+#define regVCN_MES_GP7_LO_BASE_IDX 1
+#define regVCN_MES_GP7_HI 0x07d2
+#define regVCN_MES_GP7_HI_BASE_IDX 1
+#define regVCN_MES_GP8_LO 0x07d3
+#define regVCN_MES_GP8_LO_BASE_IDX 1
+#define regVCN_MES_GP8_HI 0x07d4
+#define regVCN_MES_GP8_HI_BASE_IDX 1
+#define regVCN_MES_GP9_LO 0x07d5
+#define regVCN_MES_GP9_LO_BASE_IDX 1
+#define regVCN_MES_GP9_HI 0x07d6
+#define regVCN_MES_GP9_HI_BASE_IDX 1
+#define regVCN_MES_DM_INDEX_ADDR 0x0800
+#define regVCN_MES_DM_INDEX_ADDR_BASE_IDX 1
+#define regVCN_MES_DM_INDEX_DATA 0x0801
+#define regVCN_MES_DM_INDEX_DATA_BASE_IDX 1
+#define regVCN_MES_LOCAL_BASE0_LO 0x0803
+#define regVCN_MES_LOCAL_BASE0_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_BASE0_HI 0x0804
+#define regVCN_MES_LOCAL_BASE0_HI_BASE_IDX 1
+#define regVCN_MES_LOCAL_MASK0_LO 0x0805
+#define regVCN_MES_LOCAL_MASK0_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_MASK0_HI 0x0806
+#define regVCN_MES_LOCAL_MASK0_HI_BASE_IDX 1
+#define regVCN_MES_LOCAL_APERTURE 0x0807
+#define regVCN_MES_LOCAL_APERTURE_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_BASE_LO 0x0808
+#define regVCN_MES_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_BASE_HI 0x0809
+#define regVCN_MES_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_MASK_LO 0x080a
+#define regVCN_MES_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_MASK_HI 0x080b
+#define regVCN_MES_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_APERTURE 0x080c
+#define regVCN_MES_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regVCN_MES_LOCAL_SCRATCH_APERTURE 0x080d
+#define regVCN_MES_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regVCN_MES_LOCAL_SCRATCH_BASE_LO 0x080e
+#define regVCN_MES_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_SCRATCH_BASE_HI 0x080f
+#define regVCN_MES_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regVCN_MES_PERFCOUNT_CNTL 0x0819
+#define regVCN_MES_PERFCOUNT_CNTL_BASE_IDX 1
+#define regVCN_MES_PENDING_INTERRUPT 0x081a
+#define regVCN_MES_PENDING_INTERRUPT_BASE_IDX 1
+#define regVCN_MES_PRGRM_CNTR_START_HI 0x081d
+#define regVCN_MES_PRGRM_CNTR_START_HI_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_16 0x081f
+#define regVCN_MES_INTERRUPT_DATA_16_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_17 0x0820
+#define regVCN_MES_INTERRUPT_DATA_17_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_18 0x0821
+#define regVCN_MES_INTERRUPT_DATA_18_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_19 0x0822
+#define regVCN_MES_INTERRUPT_DATA_19_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_20 0x0823
+#define regVCN_MES_INTERRUPT_DATA_20_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_21 0x0824
+#define regVCN_MES_INTERRUPT_DATA_21_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_22 0x0825
+#define regVCN_MES_INTERRUPT_DATA_22_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_23 0x0826
+#define regVCN_MES_INTERRUPT_DATA_23_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_24 0x0827
+#define regVCN_MES_INTERRUPT_DATA_24_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_25 0x0828
+#define regVCN_MES_INTERRUPT_DATA_25_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_26 0x0829
+#define regVCN_MES_INTERRUPT_DATA_26_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_27 0x082a
+#define regVCN_MES_INTERRUPT_DATA_27_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_28 0x082b
+#define regVCN_MES_INTERRUPT_DATA_28_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_29 0x082c
+#define regVCN_MES_INTERRUPT_DATA_29_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_30 0x082d
+#define regVCN_MES_INTERRUPT_DATA_30_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_31 0x082e
+#define regVCN_MES_INTERRUPT_DATA_31_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE0_BASE 0x082f
+#define regVCN_MES_DC_APERTURE0_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE0_MASK 0x0830
+#define regVCN_MES_DC_APERTURE0_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE0_CNTL 0x0831
+#define regVCN_MES_DC_APERTURE0_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE1_BASE 0x0832
+#define regVCN_MES_DC_APERTURE1_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE1_MASK 0x0833
+#define regVCN_MES_DC_APERTURE1_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE1_CNTL 0x0834
+#define regVCN_MES_DC_APERTURE1_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE2_BASE 0x0835
+#define regVCN_MES_DC_APERTURE2_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE2_MASK 0x0836
+#define regVCN_MES_DC_APERTURE2_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE2_CNTL 0x0837
+#define regVCN_MES_DC_APERTURE2_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE3_BASE 0x0838
+#define regVCN_MES_DC_APERTURE3_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE3_MASK 0x0839
+#define regVCN_MES_DC_APERTURE3_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE3_CNTL 0x083a
+#define regVCN_MES_DC_APERTURE3_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE4_BASE 0x083b
+#define regVCN_MES_DC_APERTURE4_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE4_MASK 0x083c
+#define regVCN_MES_DC_APERTURE4_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE4_CNTL 0x083d
+#define regVCN_MES_DC_APERTURE4_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE5_BASE 0x083e
+#define regVCN_MES_DC_APERTURE5_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE5_MASK 0x083f
+#define regVCN_MES_DC_APERTURE5_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE5_CNTL 0x0840
+#define regVCN_MES_DC_APERTURE5_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE6_BASE 0x0841
+#define regVCN_MES_DC_APERTURE6_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE6_MASK 0x0842
+#define regVCN_MES_DC_APERTURE6_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE6_CNTL 0x0843
+#define regVCN_MES_DC_APERTURE6_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE7_BASE 0x0844
+#define regVCN_MES_DC_APERTURE7_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE7_MASK 0x0845
+#define regVCN_MES_DC_APERTURE7_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE7_CNTL 0x0846
+#define regVCN_MES_DC_APERTURE7_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE8_BASE 0x0847
+#define regVCN_MES_DC_APERTURE8_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE8_MASK 0x0848
+#define regVCN_MES_DC_APERTURE8_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE8_CNTL 0x0849
+#define regVCN_MES_DC_APERTURE8_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE9_BASE 0x084a
+#define regVCN_MES_DC_APERTURE9_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE9_MASK 0x084b
+#define regVCN_MES_DC_APERTURE9_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE9_CNTL 0x084c
+#define regVCN_MES_DC_APERTURE9_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE10_BASE 0x084d
+#define regVCN_MES_DC_APERTURE10_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE10_MASK 0x084e
+#define regVCN_MES_DC_APERTURE10_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE10_CNTL 0x084f
+#define regVCN_MES_DC_APERTURE10_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE11_BASE 0x0850
+#define regVCN_MES_DC_APERTURE11_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE11_MASK 0x0851
+#define regVCN_MES_DC_APERTURE11_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE11_CNTL 0x0852
+#define regVCN_MES_DC_APERTURE11_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE12_BASE 0x0853
+#define regVCN_MES_DC_APERTURE12_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE12_MASK 0x0854
+#define regVCN_MES_DC_APERTURE12_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE12_CNTL 0x0855
+#define regVCN_MES_DC_APERTURE12_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE13_BASE 0x0856
+#define regVCN_MES_DC_APERTURE13_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE13_MASK 0x0857
+#define regVCN_MES_DC_APERTURE13_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE13_CNTL 0x0858
+#define regVCN_MES_DC_APERTURE13_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE14_BASE 0x0859
+#define regVCN_MES_DC_APERTURE14_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE14_MASK 0x085a
+#define regVCN_MES_DC_APERTURE14_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE14_CNTL 0x085b
+#define regVCN_MES_DC_APERTURE14_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE15_BASE 0x085c
+#define regVCN_MES_DC_APERTURE15_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE15_MASK 0x085d
+#define regVCN_MES_DC_APERTURE15_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE15_CNTL 0x085e
+#define regVCN_MES_DC_APERTURE15_CNTL_BASE_IDX 1
+
+
+// addressBlock: uvd_vcn_hypdec
+// base address: 0x21a00
+#define regVCN_MES_IC_BASE_LO 0x08d0
+#define regVCN_MES_IC_BASE_LO_BASE_IDX 1
+#define regVCN_MES_MIBASE_LO 0x08d0
+#define regVCN_MES_MIBASE_LO_BASE_IDX 1
+#define regVCN_MES_IC_BASE_HI 0x08d1
+#define regVCN_MES_IC_BASE_HI_BASE_IDX 1
+#define regVCN_MES_MIBASE_HI 0x08d1
+#define regVCN_MES_MIBASE_HI_BASE_IDX 1
+#define regVCN_MES_IC_BASE_CNTL 0x08d2
+#define regVCN_MES_IC_BASE_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_BASE_LO 0x08d4
+#define regVCN_MES_DC_BASE_LO_BASE_IDX 1
+#define regVCN_MES_MDBASE_LO 0x08d4
+#define regVCN_MES_MDBASE_LO_BASE_IDX 1
+#define regVCN_MES_DC_BASE_HI 0x08d5
+#define regVCN_MES_DC_BASE_HI_BASE_IDX 1
+#define regVCN_MES_MDBASE_HI 0x08d5
+#define regVCN_MES_MDBASE_HI_BASE_IDX 1
+#define regVCN_MES_MIBOUND_LO 0x08db
+#define regVCN_MES_MIBOUND_LO_BASE_IDX 1
+#define regVCN_MES_MIBOUND_HI 0x08dc
+#define regVCN_MES_MIBOUND_HI_BASE_IDX 1
+#define regVCN_MES_MDBOUND_LO 0x08dd
+#define regVCN_MES_MDBOUND_LO_BASE_IDX 1
+#define regVCN_MES_MDBOUND_HI 0x08de
+#define regVCN_MES_MDBOUND_HI_BASE_IDX 1
+
+
+// addressBlock: uvd_slmi_adpdec
+// base address: 0x21c00
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW 0x0900
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH 0x0901
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW 0x0902
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH 0x0903
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW 0x0904
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH 0x0905
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW 0x0906
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH 0x0907
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW 0x0908
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH 0x0909
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW 0x090a
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH 0x090b
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW 0x090c
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH 0x090d
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW 0x090e
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH 0x090f
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC_VMID 0x0910
+#define regUVD_LMI_MMSCH_NC_VMID_BASE_IDX 1
+#define regUVD_LMI_MMSCH_CTRL 0x0911
+#define regUVD_LMI_MMSCH_CTRL_BASE_IDX 1
+#define regUVD_MMSCH_LMI_STATUS 0x0912
+#define regUVD_MMSCH_LMI_STATUS_BASE_IDX 1
+#define regUMSCH_IOV_ACTIVE_FCN_ID 0x0920
+#define regUMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regUVD_UMSCH_LMI_STATUS 0x0923
+#define regUVD_UMSCH_LMI_STATUS_BASE_IDX 1
+
+
+// addressBlock: uvdctxind
+// base address: 0x0
+#define ixUVD_CGC_MEM_CTRL 0x0000
+#define ixUVD_CGC_CTRL2 0x0001
+#define ixUVD_CGC_MEM_DS_CTRL 0x0002
+#define ixUVD_CGC_MEM_SD_CTRL 0x0003
+#define ixUVD_SW_SCRATCH_00 0x0004
+#define ixUVD_SW_SCRATCH_01 0x0005
+#define ixUVD_SW_SCRATCH_02 0x0006
+#define ixUVD_SW_SCRATCH_03 0x0007
+#define ixUVD_SW_SCRATCH_04 0x0008
+#define ixUVD_SW_SCRATCH_05 0x0009
+#define ixUVD_SW_SCRATCH_06 0x000a
+#define ixUVD_SW_SCRATCH_07 0x000b
+#define ixUVD_SW_SCRATCH_08 0x000c
+#define ixUVD_SW_SCRATCH_09 0x000d
+#define ixUVD_SW_SCRATCH_10 0x000e
+#define ixUVD_SW_SCRATCH_11 0x000f
+#define ixUVD_SW_SCRATCH_12 0x0010
+#define ixUVD_SW_SCRATCH_13 0x0011
+#define ixUVD_SW_SCRATCH_14 0x0012
+#define ixUVD_SW_SCRATCH_15 0x0013
+#define ixUVD_IH_SEM_CTRL 0x001e
+
+
+// addressBlock: lmi_adp_indirect
+// base address: 0x0
+#define ixUVD_LMI_CRC0 0x0000
+#define ixUVD_LMI_CRC1 0x0001
+#define ixUVD_LMI_CRC2 0x0002
+#define ixUVD_LMI_CRC3 0x0003
+#define ixUVD_LMI_CRC10 0x000a
+#define ixUVD_LMI_CRC11 0x000b
+#define ixUVD_LMI_CRC12 0x000c
+#define ixUVD_LMI_CRC13 0x000d
+#define ixUVD_LMI_CRC14 0x000e
+#define ixUVD_LMI_CRC15 0x000f
+#define ixUVD_LMI_SWAP_CNTL2 0x0029
+#define ixUVD_MEMCHECK_SYS_INT_EN 0x0134
+#define ixUVD_MEMCHECK_SYS_INT_STAT 0x0135
+#define ixUVD_MEMCHECK_SYS_INT_ACK 0x0136
+#define ixUVD_MEMCHECK_VCPU_INT_EN 0x0137
+#define ixUVD_MEMCHECK_VCPU_INT_STAT 0x0138
+#define ixUVD_MEMCHECK_VCPU_INT_ACK 0x0139
+#define ixUVD_MEMCHECK2_SYS_INT_STAT 0x0140
+#define ixUVD_MEMCHECK2_SYS_INT_ACK 0x0141
+#define ixUVD_MEMCHECK2_VCPU_INT_STAT 0x0142
+#define ixUVD_MEMCHECK2_VCPU_INT_ACK 0x0143
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
new file mode 100644
index 000000000000..5c119a6b87fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
@@ -0,0 +1,7627 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _vcn_5_0_0_SH_MASK_HEADER
+#define _vcn_5_0_0_SH_MASK_HEADER
+
+
+// addressBlock: uvd_uvddec
+//UVD_TOP_CTRL
+#define UVD_TOP_CTRL__STANDARD__SHIFT 0x0
+#define UVD_TOP_CTRL__STD_VERSION__SHIFT 0x4
+#define UVD_TOP_CTRL__STANDARD_MASK 0x0000000FL
+#define UVD_TOP_CTRL__STD_VERSION_MASK 0x00000010L
+//UVD_CGC_GATE
+#define UVD_CGC_GATE__SYS__SHIFT 0x0
+#define UVD_CGC_GATE__UDEC__SHIFT 0x1
+#define UVD_CGC_GATE__MPEG2__SHIFT 0x2
+#define UVD_CGC_GATE__REGS__SHIFT 0x3
+#define UVD_CGC_GATE__RBC__SHIFT 0x4
+#define UVD_CGC_GATE__LMI_MC__SHIFT 0x5
+#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x6
+#define UVD_CGC_GATE__IDCT__SHIFT 0x7
+#define UVD_CGC_GATE__MPRD__SHIFT 0x8
+#define UVD_CGC_GATE__MPC__SHIFT 0x9
+#define UVD_CGC_GATE__LBSI__SHIFT 0xa
+#define UVD_CGC_GATE__LRBBM__SHIFT 0xb
+#define UVD_CGC_GATE__UDEC_RE__SHIFT 0xc
+#define UVD_CGC_GATE__UDEC_CM__SHIFT 0xd
+#define UVD_CGC_GATE__UDEC_IT__SHIFT 0xe
+#define UVD_CGC_GATE__UDEC_DB__SHIFT 0xf
+#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x10
+#define UVD_CGC_GATE__WCB__SHIFT 0x11
+#define UVD_CGC_GATE__VCPU__SHIFT 0x12
+#define UVD_CGC_GATE__MMSCH__SHIFT 0x14
+#define UVD_CGC_GATE__LCM0__SHIFT 0x15
+#define UVD_CGC_GATE__LCM1__SHIFT 0x16
+#define UVD_CGC_GATE__MIF__SHIFT 0x17
+#define UVD_CGC_GATE__VREG__SHIFT 0x18
+#define UVD_CGC_GATE__PE__SHIFT 0x19
+#define UVD_CGC_GATE__PPU__SHIFT 0x1a
+#define UVD_CGC_GATE__SYS_MASK 0x00000001L
+#define UVD_CGC_GATE__UDEC_MASK 0x00000002L
+#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L
+#define UVD_CGC_GATE__REGS_MASK 0x00000008L
+#define UVD_CGC_GATE__RBC_MASK 0x00000010L
+#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L
+#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L
+#define UVD_CGC_GATE__IDCT_MASK 0x00000080L
+#define UVD_CGC_GATE__MPRD_MASK 0x00000100L
+#define UVD_CGC_GATE__MPC_MASK 0x00000200L
+#define UVD_CGC_GATE__LBSI_MASK 0x00000400L
+#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L
+#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L
+#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L
+#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L
+#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L
+#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L
+#define UVD_CGC_GATE__WCB_MASK 0x00020000L
+#define UVD_CGC_GATE__VCPU_MASK 0x00040000L
+#define UVD_CGC_GATE__MMSCH_MASK 0x00100000L
+#define UVD_CGC_GATE__LCM0_MASK 0x00200000L
+#define UVD_CGC_GATE__LCM1_MASK 0x00400000L
+#define UVD_CGC_GATE__MIF_MASK 0x00800000L
+#define UVD_CGC_GATE__VREG_MASK 0x01000000L
+#define UVD_CGC_GATE__PE_MASK 0x02000000L
+#define UVD_CGC_GATE__PPU_MASK 0x04000000L
+//UVD_CGC_CTRL
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x2
+#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x6
+#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0xb
+#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0xc
+#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0xd
+#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0xe
+#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0xf
+#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x10
+#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x11
+#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x12
+#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x13
+#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x14
+#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x15
+#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x16
+#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x17
+#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x18
+#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x19
+#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x1a
+#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x1b
+#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x1c
+#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x1d
+#define UVD_CGC_CTRL__MMSCH_MODE__SHIFT 0x1f
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003CL
+#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007C0L
+#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L
+#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L
+#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L
+#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L
+#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L
+#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L
+#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L
+#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L
+#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L
+#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L
+#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L
+#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L
+#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L
+#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L
+#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L
+#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L
+#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L
+#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L
+#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L
+#define UVD_CGC_CTRL__MMSCH_MODE_MASK 0x80000000L
+//AVM_SUVD_CGC_GATE
+#define AVM_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define AVM_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define AVM_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define AVM_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define AVM_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define AVM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define AVM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define AVM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define AVM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define AVM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define AVM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define AVM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define AVM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define AVM_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define AVM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define AVM_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define AVM_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define AVM_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define AVM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define AVM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define AVM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define AVM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define AVM_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define AVM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define AVM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define AVM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define AVM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define AVM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define AVM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define AVM_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define AVM_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define AVM_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define AVM_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define AVM_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define AVM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define AVM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define AVM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define AVM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define AVM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define AVM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define AVM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define AVM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define AVM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define AVM_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define AVM_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define AVM_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define AVM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define AVM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define AVM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define AVM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define AVM_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define AVM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define AVM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define AVM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define AVM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define AVM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define AVM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//EFC_SUVD_CGC_GATE
+#define EFC_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define EFC_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define EFC_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define EFC_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define EFC_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define EFC_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define EFC_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define EFC_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define EFC_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define EFC_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define EFC_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define EFC_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define EFC_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define EFC_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define EFC_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define EFC_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define EFC_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define EFC_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define EFC_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define EFC_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define EFC_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define EFC_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define EFC_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define EFC_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define EFC_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define EFC_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define EFC_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define EFC_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define EFC_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define EFC_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define EFC_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define EFC_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define EFC_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define EFC_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define EFC_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define EFC_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define EFC_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define EFC_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define EFC_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define EFC_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define EFC_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define EFC_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define EFC_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define EFC_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define EFC_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define EFC_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define EFC_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define EFC_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define EFC_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define EFC_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define EFC_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define EFC_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define EFC_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define EFC_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define EFC_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define EFC_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define EFC_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//ENT_SUVD_CGC_GATE
+#define ENT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define ENT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define ENT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define ENT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define ENT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define ENT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define ENT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define ENT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define ENT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define ENT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define ENT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define ENT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define ENT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define ENT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define ENT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define ENT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define ENT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define ENT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define ENT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define ENT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define ENT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define ENT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define ENT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define ENT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define ENT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define ENT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define ENT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define ENT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define ENT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define ENT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define ENT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define ENT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define ENT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define ENT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define ENT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define ENT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define ENT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define ENT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define ENT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define ENT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define ENT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define ENT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define ENT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define ENT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define ENT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define ENT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define ENT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define ENT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define ENT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define ENT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define ENT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define ENT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define ENT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define ENT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define ENT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define ENT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define ENT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//IME_SUVD_CGC_GATE
+#define IME_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define IME_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define IME_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define IME_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define IME_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define IME_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define IME_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define IME_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define IME_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define IME_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define IME_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define IME_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define IME_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define IME_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define IME_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define IME_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define IME_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define IME_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define IME_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define IME_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define IME_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define IME_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define IME_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define IME_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define IME_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define IME_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define IME_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define IME_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define IME_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define IME_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define IME_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define IME_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define IME_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define IME_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define IME_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define IME_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define IME_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define IME_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define IME_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define IME_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define IME_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define IME_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define IME_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define IME_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define IME_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define IME_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define IME_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define IME_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define IME_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define IME_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define IME_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define IME_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define IME_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define IME_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define IME_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define IME_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define IME_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define IME_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define IME_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define IME_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//PPU_SUVD_CGC_GATE
+#define PPU_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define PPU_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define PPU_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define PPU_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define PPU_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define PPU_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define PPU_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define PPU_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define PPU_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define PPU_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define PPU_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define PPU_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define PPU_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define PPU_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define PPU_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define PPU_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define PPU_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define PPU_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define PPU_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define PPU_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define PPU_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define PPU_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define PPU_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define PPU_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define PPU_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define PPU_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define PPU_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define PPU_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define PPU_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define PPU_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define PPU_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define PPU_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define PPU_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define PPU_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define PPU_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define PPU_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define PPU_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define PPU_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define PPU_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define PPU_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define PPU_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define PPU_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define PPU_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define PPU_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define PPU_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define PPU_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define PPU_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define PPU_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define PPU_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define PPU_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define PPU_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define PPU_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define PPU_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define PPU_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define PPU_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define PPU_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define PPU_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SAOE_SUVD_CGC_GATE
+#define SAOE_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SAOE_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SAOE_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SAOE_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SAOE_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SAOE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SAOE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SAOE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SAOE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SAOE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SAOE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SAOE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SAOE_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SAOE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SAOE_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SAOE_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SAOE_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SAOE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SAOE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SAOE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SAOE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SAOE_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SAOE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SAOE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SAOE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SAOE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SAOE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SAOE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SAOE_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SAOE_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SAOE_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SAOE_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SAOE_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SAOE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SAOE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SAOE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SAOE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SAOE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SAOE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SAOE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SAOE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SAOE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SAOE_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SAOE_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SAOE_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SAOE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SAOE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SAOE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SAOE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SAOE_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SAOE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SAOE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SAOE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SAOE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SAOE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SAOE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SCM_SUVD_CGC_GATE
+#define SCM_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SCM_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SCM_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SCM_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SCM_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SCM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SCM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SCM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SCM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SCM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SCM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SCM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SCM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SCM_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SCM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SCM_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SCM_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SCM_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SCM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SCM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SCM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SCM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SCM_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SCM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SCM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SCM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SCM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SCM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SCM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SCM_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SCM_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SCM_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SCM_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SCM_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SCM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SCM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SCM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SCM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SCM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SCM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SCM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SCM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SCM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SCM_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SCM_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SCM_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SCM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SCM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SCM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SCM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SCM_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SCM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SCM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SCM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SCM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SCM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SCM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SDB_SUVD_CGC_GATE
+#define SDB_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SDB_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SDB_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SDB_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SDB_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SDB_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SDB_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SDB_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SDB_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SDB_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SDB_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SDB_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SDB_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SDB_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SDB_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SDB_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SDB_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SDB_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SDB_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SDB_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SDB_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SDB_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SDB_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SDB_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SDB_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SDB_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SDB_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SDB_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SDB_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SDB_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SDB_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SDB_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SDB_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SDB_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SDB_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SDB_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SDB_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SDB_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SDB_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SDB_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SDB_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SDB_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SDB_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SDB_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SDB_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SDB_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SDB_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SDB_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SDB_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SDB_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SDB_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SDB_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SDB_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SDB_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SDB_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SDB_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SDB_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT0_NXT_SUVD_CGC_GATE
+#define SIT0_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT0_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT0_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT0_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT0_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT0_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT0_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT0_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT0_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT0_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT0_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT0_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT0_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT0_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT0_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT0_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT0_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT0_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT1_NXT_SUVD_CGC_GATE
+#define SIT1_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT1_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT1_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT1_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT1_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT1_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT1_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT1_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT1_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT1_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT1_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT1_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT1_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT1_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT1_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT1_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT1_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT1_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT2_NXT_SUVD_CGC_GATE
+#define SIT2_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT2_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT2_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT2_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT2_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT2_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT2_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT2_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT2_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT2_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT2_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT2_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT2_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT2_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT2_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT2_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT2_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT2_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT_SUVD_CGC_GATE
+#define SIT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SMPA_SUVD_CGC_GATE
+#define SMPA_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SMPA_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SMPA_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SMPA_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SMPA_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SMPA_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SMPA_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SMPA_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SMPA_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SMPA_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SMPA_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SMPA_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SMPA_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SMPA_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SMPA_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SMPA_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SMPA_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SMPA_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SMPA_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SMPA_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SMPA_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SMPA_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SMPA_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SMPA_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SMPA_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SMPA_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SMPA_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SMPA_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SMPA_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SMPA_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SMPA_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SMPA_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SMPA_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SMPA_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SMPA_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SMPA_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SMPA_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SMPA_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SMPA_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SMPA_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SMPA_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SMPA_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SMPA_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SMPA_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SMPA_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SMPA_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SMPA_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SMPA_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SMPA_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SMPA_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SMPA_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SMPA_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SMPA_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SMPA_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SMPA_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SMPA_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SMP_SUVD_CGC_GATE
+#define SMP_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SMP_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SMP_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SMP_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SMP_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SMP_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SMP_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SMP_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SMP_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SMP_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SMP_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SMP_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SMP_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SMP_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SMP_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SMP_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SMP_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SMP_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SMP_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SMP_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SMP_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SMP_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SMP_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SMP_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SMP_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SMP_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SMP_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SMP_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SMP_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SMP_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SMP_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SMP_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SMP_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SMP_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SMP_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SMP_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SMP_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SMP_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SMP_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SMP_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SMP_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SMP_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SMP_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SMP_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SMP_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SMP_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SMP_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SMP_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SMP_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SMP_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SMP_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SMP_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SMP_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SMP_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SMP_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SMP_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SMP_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SRE_SUVD_CGC_GATE
+#define SRE_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SRE_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SRE_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SRE_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SRE_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SRE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SRE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SRE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SRE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SRE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SRE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SRE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SRE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SRE_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SRE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SRE_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SRE_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SRE_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SRE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SRE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SRE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SRE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SRE_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SRE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SRE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SRE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SRE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SRE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SRE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SRE_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SRE_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SRE_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SRE_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SRE_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SRE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SRE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SRE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SRE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SRE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SRE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SRE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SRE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SRE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SRE_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SRE_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SRE_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SRE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SRE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SRE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SRE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SRE_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SRE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SRE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SRE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SRE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SRE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SRE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//UVD_SUVD_CGC_GATE
+#define UVD_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define UVD_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define UVD_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define UVD_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define UVD_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define UVD_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define UVD_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define UVD_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define UVD_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define UVD_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define UVD_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define UVD_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define UVD_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define UVD_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define UVD_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define UVD_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define UVD_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define UVD_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define UVD_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define UVD_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define UVD_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define UVD_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define UVD_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define UVD_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define UVD_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define UVD_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define UVD_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define UVD_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define UVD_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define UVD_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define UVD_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define UVD_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define UVD_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define UVD_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define UVD_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define UVD_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define UVD_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define UVD_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define UVD_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define UVD_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define UVD_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define UVD_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define UVD_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define UVD_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define UVD_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//AVM_SUVD_CGC_GATE2
+#define AVM_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define AVM_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define AVM_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define AVM_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define AVM_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define AVM_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define AVM_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define AVM_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define AVM_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define AVM_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define AVM_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define AVM_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define AVM_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define AVM_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define AVM_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define AVM_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//DBR_SUVD_CGC_GATE2
+#define DBR_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define DBR_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define DBR_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define DBR_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define DBR_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define DBR_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define DBR_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define DBR_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define DBR_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define DBR_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define DBR_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define DBR_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define DBR_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define DBR_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define DBR_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define DBR_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//ENT_SUVD_CGC_GATE2
+#define ENT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define ENT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define ENT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define ENT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define ENT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define ENT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define ENT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define ENT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define ENT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define ENT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define ENT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define ENT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define ENT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define ENT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define ENT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define ENT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//IME_SUVD_CGC_GATE2
+#define IME_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define IME_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define IME_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define IME_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define IME_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define IME_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define IME_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define IME_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define IME_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define IME_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define IME_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define IME_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define IME_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define IME_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define IME_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define IME_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SAOE_SUVD_CGC_GATE2
+#define SAOE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SAOE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SAOE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SAOE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SAOE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SAOE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SAOE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SAOE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SAOE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SAOE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SAOE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SAOE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SAOE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SAOE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SAOE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SAOE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SDB_SUVD_CGC_GATE2
+#define SDB_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SDB_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SDB_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SDB_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SDB_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SDB_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SDB_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SDB_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SDB_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SDB_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SDB_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SDB_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SDB_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SDB_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SDB_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SDB_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT0_NXT_SUVD_CGC_GATE2
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT0_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT0_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT1_NXT_SUVD_CGC_GATE2
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT1_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT1_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT2_NXT_SUVD_CGC_GATE2
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT2_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT2_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT_SUVD_CGC_GATE2
+#define SIT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SMPA_SUVD_CGC_GATE2
+#define SMPA_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SMPA_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SMPA_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SMPA_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SMPA_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SMPA_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SMPA_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SMPA_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SMPA_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SMPA_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SMPA_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SMPA_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SMPA_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SMPA_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SMPA_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SMPA_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SMP_SUVD_CGC_GATE2
+#define SMP_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SMP_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SMP_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SMP_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SMP_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SMP_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SMP_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SMP_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SMP_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SMP_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SMP_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SMP_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SMP_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SMP_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SMP_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SMP_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SRE_SUVD_CGC_GATE2
+#define SRE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SRE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SRE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SRE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SRE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SRE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SRE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SRE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SRE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SRE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SRE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SRE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SRE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SRE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SRE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SRE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//UVD_SUVD_CGC_GATE2
+#define UVD_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define UVD_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define UVD_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define UVD_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define UVD_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define UVD_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define UVD_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define UVD_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define UVD_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define UVD_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define UVD_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define UVD_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define UVD_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define UVD_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define UVD_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define UVD_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//AVM_SUVD_CGC_CTRL
+#define AVM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define AVM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define AVM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define AVM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define AVM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define AVM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define AVM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define AVM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define AVM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define AVM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define AVM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define AVM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define AVM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define AVM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define AVM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define AVM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define AVM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define AVM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define AVM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define AVM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define AVM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define AVM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define AVM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define AVM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define AVM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define AVM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define AVM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define AVM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define AVM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define AVM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define AVM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define AVM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define AVM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define AVM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define AVM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define AVM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define AVM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define AVM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define AVM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define AVM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//DBR_SUVD_CGC_CTRL
+#define DBR_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define DBR_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define DBR_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define DBR_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define DBR_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define DBR_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define DBR_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define DBR_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define DBR_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define DBR_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define DBR_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define DBR_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define DBR_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define DBR_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define DBR_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define DBR_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define DBR_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define DBR_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define DBR_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define DBR_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define DBR_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define DBR_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define DBR_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define DBR_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define DBR_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define DBR_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define DBR_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define DBR_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define DBR_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define DBR_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define DBR_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define DBR_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define DBR_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define DBR_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define DBR_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define DBR_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define DBR_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define DBR_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define DBR_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define DBR_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//EFC_SUVD_CGC_CTRL
+#define EFC_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define EFC_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define EFC_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define EFC_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define EFC_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define EFC_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define EFC_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define EFC_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define EFC_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define EFC_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define EFC_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define EFC_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define EFC_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define EFC_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define EFC_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define EFC_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define EFC_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define EFC_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define EFC_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define EFC_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define EFC_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define EFC_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define EFC_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define EFC_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define EFC_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define EFC_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define EFC_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define EFC_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define EFC_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define EFC_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define EFC_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define EFC_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define EFC_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define EFC_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define EFC_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define EFC_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define EFC_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define EFC_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define EFC_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define EFC_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//ENT_SUVD_CGC_CTRL
+#define ENT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define ENT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define ENT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define ENT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define ENT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define ENT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define ENT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define ENT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define ENT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define ENT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define ENT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define ENT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define ENT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define ENT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define ENT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define ENT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define ENT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define ENT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define ENT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define ENT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define ENT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define ENT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define ENT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define ENT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define ENT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define ENT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define ENT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define ENT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define ENT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define ENT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define ENT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define ENT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define ENT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define ENT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define ENT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define ENT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define ENT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define ENT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define ENT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define ENT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//IME_SUVD_CGC_CTRL
+#define IME_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define IME_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define IME_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define IME_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define IME_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define IME_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define IME_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define IME_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define IME_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define IME_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define IME_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define IME_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define IME_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define IME_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define IME_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define IME_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define IME_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define IME_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define IME_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define IME_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define IME_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define IME_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define IME_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define IME_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define IME_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define IME_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define IME_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define IME_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define IME_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define IME_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define IME_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define IME_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define IME_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define IME_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define IME_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define IME_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define IME_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define IME_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define IME_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define IME_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define IME_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define IME_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//PPU_SUVD_CGC_CTRL
+#define PPU_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define PPU_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define PPU_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define PPU_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define PPU_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define PPU_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define PPU_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define PPU_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define PPU_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define PPU_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define PPU_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define PPU_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define PPU_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define PPU_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define PPU_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define PPU_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define PPU_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define PPU_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define PPU_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define PPU_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define PPU_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define PPU_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define PPU_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define PPU_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define PPU_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define PPU_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define PPU_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define PPU_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define PPU_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define PPU_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define PPU_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define PPU_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define PPU_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define PPU_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define PPU_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define PPU_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define PPU_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define PPU_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define PPU_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define PPU_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SAOE_SUVD_CGC_CTRL
+#define SAOE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SAOE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SAOE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SAOE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SAOE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SAOE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SAOE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SAOE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SAOE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SAOE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SAOE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SAOE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SAOE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SAOE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SAOE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SAOE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SAOE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SAOE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SAOE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SAOE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SAOE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SAOE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SAOE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SAOE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SAOE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SAOE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SAOE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SAOE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SAOE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SAOE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SCM_SUVD_CGC_CTRL
+#define SCM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SCM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SCM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SCM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SCM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SCM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SCM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SCM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SCM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SCM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SCM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SCM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SCM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SCM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SCM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SCM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SCM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SCM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SCM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SCM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SCM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SCM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SCM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SCM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SCM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SCM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SCM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SCM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SCM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SCM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SCM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SCM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SCM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SCM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SCM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SCM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SCM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SCM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SCM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SCM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SDB_SUVD_CGC_CTRL
+#define SDB_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SDB_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SDB_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SDB_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SDB_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SDB_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SDB_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SDB_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SDB_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SDB_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SDB_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SDB_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SDB_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SDB_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SDB_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SDB_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SDB_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SDB_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SDB_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SDB_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SDB_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SDB_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SDB_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SDB_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SDB_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SDB_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SDB_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SDB_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SDB_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SDB_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SDB_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SDB_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SDB_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SDB_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SDB_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SDB_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SDB_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SDB_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SDB_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SDB_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT0_NXT_SUVD_CGC_CTRL
+#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT1_NXT_SUVD_CGC_CTRL
+#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT2_NXT_SUVD_CGC_CTRL
+#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT_SUVD_CGC_CTRL
+#define SIT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SMPA_SUVD_CGC_CTRL
+#define SMPA_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SMPA_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SMPA_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SMPA_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SMPA_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SMPA_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SMPA_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SMPA_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SMPA_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SMPA_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SMPA_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SMPA_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SMPA_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SMPA_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SMPA_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SMPA_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SMPA_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SMPA_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SMPA_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SMPA_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SMPA_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SMPA_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SMPA_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SMPA_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SMPA_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SMPA_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SMPA_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SMPA_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SMPA_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SMPA_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SMP_SUVD_CGC_CTRL
+#define SMP_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SMP_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SMP_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SMP_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SMP_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SMP_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SMP_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SMP_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SMP_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SMP_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SMP_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SMP_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SMP_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SMP_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SMP_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SMP_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SMP_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SMP_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SMP_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SMP_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SMP_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SMP_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SMP_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SMP_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SMP_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SMP_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SMP_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SMP_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SMP_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SMP_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SMP_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SMP_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SMP_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SMP_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SMP_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SMP_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SMP_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SMP_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SMP_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SMP_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SRE_SUVD_CGC_CTRL
+#define SRE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SRE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SRE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SRE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SRE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SRE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SRE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SRE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SRE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SRE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SRE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SRE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SRE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SRE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SRE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SRE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SRE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SRE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SRE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SRE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SRE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SRE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SRE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SRE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SRE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SRE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SRE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SRE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SRE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SRE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SRE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SRE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SRE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SRE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SRE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SRE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SRE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SRE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SRE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SRE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//UVD_SUVD_CGC_CTRL
+#define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define UVD_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define UVD_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define UVD_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define UVD_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define UVD_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define UVD_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define UVD_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define UVD_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define UVD_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define UVD_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define UVD_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define UVD_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define UVD_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define UVD_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define UVD_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define UVD_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define UVD_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define UVD_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define UVD_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define UVD_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define UVD_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define UVD_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//UVD_CGC_CTRL3
+#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY__SHIFT 0x0
+#define UVD_CGC_CTRL3__LCM0_MODE__SHIFT 0xb
+#define UVD_CGC_CTRL3__LCM1_MODE__SHIFT 0xc
+#define UVD_CGC_CTRL3__MIF_MODE__SHIFT 0xd
+#define UVD_CGC_CTRL3__VREG_MODE__SHIFT 0xe
+#define UVD_CGC_CTRL3__PE_MODE__SHIFT 0xf
+#define UVD_CGC_CTRL3__PPU_MODE__SHIFT 0x10
+#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY_MASK 0x000000FFL
+#define UVD_CGC_CTRL3__LCM0_MODE_MASK 0x00000800L
+#define UVD_CGC_CTRL3__LCM1_MODE_MASK 0x00001000L
+#define UVD_CGC_CTRL3__MIF_MODE_MASK 0x00002000L
+#define UVD_CGC_CTRL3__VREG_MODE_MASK 0x00004000L
+#define UVD_CGC_CTRL3__PE_MODE_MASK 0x00008000L
+#define UVD_CGC_CTRL3__PPU_MODE_MASK 0x00010000L
+//UVD_GPCOM_VCPU_DATA0
+#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x0
+#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_GPCOM_VCPU_DATA1
+#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0
+#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_GPCOM_SYS_CMD
+#define UVD_GPCOM_SYS_CMD__CMD_SEND__SHIFT 0x0
+#define UVD_GPCOM_SYS_CMD__CMD__SHIFT 0x1
+#define UVD_GPCOM_SYS_CMD__CMD_SOURCE__SHIFT 0x1f
+#define UVD_GPCOM_SYS_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_SYS_CMD__CMD_MASK 0x7FFFFFFEL
+#define UVD_GPCOM_SYS_CMD__CMD_SOURCE_MASK 0x80000000L
+//UVD_GPCOM_SYS_DATA0
+#define UVD_GPCOM_SYS_DATA0__DATA0__SHIFT 0x0
+#define UVD_GPCOM_SYS_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_GPCOM_SYS_DATA1
+#define UVD_GPCOM_SYS_DATA1__DATA1__SHIFT 0x0
+#define UVD_GPCOM_SYS_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_VCPU_INT_EN
+#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1
+#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2
+#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN__SHIFT 0x3
+#define UVD_VCPU_INT_EN__SW_RB1_INT_EN__SHIFT 0x4
+#define UVD_VCPU_INT_EN__SW_RB2_INT_EN__SHIFT 0x5
+#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6
+#define UVD_VCPU_INT_EN__SW_RB3_INT_EN__SHIFT 0x7
+#define UVD_VCPU_INT_EN__SW_RB4_INT_EN__SHIFT 0x9
+#define UVD_VCPU_INT_EN__SW_RB5_INT_EN__SHIFT 0xa
+#define UVD_VCPU_INT_EN__LBSI_EN__SHIFT 0xb
+#define UVD_VCPU_INT_EN__UDEC_EN__SHIFT 0xc
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe
+#define UVD_VCPU_INT_EN__SUVD_EN__SHIFT 0xf
+#define UVD_VCPU_INT_EN__RPTR_WR_EN__SHIFT 0x10
+#define UVD_VCPU_INT_EN__JOB_START_EN__SHIFT 0x11
+#define UVD_VCPU_INT_EN__NJ_PF_EN__SHIFT 0x12
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17
+#define UVD_VCPU_INT_EN__IDCT_EN__SHIFT 0x18
+#define UVD_VCPU_INT_EN__MPRD_EN__SHIFT 0x19
+#define UVD_VCPU_INT_EN__AVM_INT_EN__SHIFT 0x1a
+#define UVD_VCPU_INT_EN__CLK_SWT_EN__SHIFT 0x1b
+#define UVD_VCPU_INT_EN__MIF_HWINT_EN__SHIFT 0x1c
+#define UVD_VCPU_INT_EN__MPRD_ERR_EN__SHIFT 0x1d
+#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN__SHIFT 0x1e
+#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN__SHIFT 0x1f
+#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L
+#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L
+#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN_MASK 0x00000008L
+#define UVD_VCPU_INT_EN__SW_RB1_INT_EN_MASK 0x00000010L
+#define UVD_VCPU_INT_EN__SW_RB2_INT_EN_MASK 0x00000020L
+#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L
+#define UVD_VCPU_INT_EN__SW_RB3_INT_EN_MASK 0x00000080L
+#define UVD_VCPU_INT_EN__SW_RB4_INT_EN_MASK 0x00000200L
+#define UVD_VCPU_INT_EN__SW_RB5_INT_EN_MASK 0x00000400L
+#define UVD_VCPU_INT_EN__LBSI_EN_MASK 0x00000800L
+#define UVD_VCPU_INT_EN__UDEC_EN_MASK 0x00001000L
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L
+#define UVD_VCPU_INT_EN__SUVD_EN_MASK 0x00008000L
+#define UVD_VCPU_INT_EN__RPTR_WR_EN_MASK 0x00010000L
+#define UVD_VCPU_INT_EN__JOB_START_EN_MASK 0x00020000L
+#define UVD_VCPU_INT_EN__NJ_PF_EN_MASK 0x00040000L
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L
+#define UVD_VCPU_INT_EN__IDCT_EN_MASK 0x01000000L
+#define UVD_VCPU_INT_EN__MPRD_EN_MASK 0x02000000L
+#define UVD_VCPU_INT_EN__AVM_INT_EN_MASK 0x04000000L
+#define UVD_VCPU_INT_EN__CLK_SWT_EN_MASK 0x08000000L
+#define UVD_VCPU_INT_EN__MIF_HWINT_EN_MASK 0x10000000L
+#define UVD_VCPU_INT_EN__MPRD_ERR_EN_MASK 0x20000000L
+#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN_MASK 0x40000000L
+#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN_MASK 0x80000000L
+//UVD_VCPU_INT_STATUS
+#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1
+#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2
+#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT__SHIFT 0x3
+#define UVD_VCPU_INT_STATUS__SW_RB1_INT__SHIFT 0x4
+#define UVD_VCPU_INT_STATUS__SW_RB2_INT__SHIFT 0x5
+#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6
+#define UVD_VCPU_INT_STATUS__SW_RB3_INT__SHIFT 0x7
+#define UVD_VCPU_INT_STATUS__SW_RB4_INT__SHIFT 0x9
+#define UVD_VCPU_INT_STATUS__SW_RB5_INT__SHIFT 0xa
+#define UVD_VCPU_INT_STATUS__LBSI_INT__SHIFT 0xb
+#define UVD_VCPU_INT_STATUS__UDEC_INT__SHIFT 0xc
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe
+#define UVD_VCPU_INT_STATUS__SUVD_INT__SHIFT 0xf
+#define UVD_VCPU_INT_STATUS__RPTR_WR_INT__SHIFT 0x10
+#define UVD_VCPU_INT_STATUS__JOB_START_INT__SHIFT 0x11
+#define UVD_VCPU_INT_STATUS__NJ_PF_INT__SHIFT 0x12
+#define UVD_VCPU_INT_STATUS__GPCOM_INT__SHIFT 0x14
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17
+#define UVD_VCPU_INT_STATUS__IDCT_INT__SHIFT 0x18
+#define UVD_VCPU_INT_STATUS__MPRD_INT__SHIFT 0x19
+#define UVD_VCPU_INT_STATUS__AVM_INT__SHIFT 0x1a
+#define UVD_VCPU_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b
+#define UVD_VCPU_INT_STATUS__MIF_HWINT__SHIFT 0x1c
+#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d
+#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT__SHIFT 0x1e
+#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT__SHIFT 0x1f
+#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L
+#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L
+#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT_MASK 0x00000008L
+#define UVD_VCPU_INT_STATUS__SW_RB1_INT_MASK 0x00000010L
+#define UVD_VCPU_INT_STATUS__SW_RB2_INT_MASK 0x00000020L
+#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L
+#define UVD_VCPU_INT_STATUS__SW_RB3_INT_MASK 0x00000080L
+#define UVD_VCPU_INT_STATUS__SW_RB4_INT_MASK 0x00000200L
+#define UVD_VCPU_INT_STATUS__SW_RB5_INT_MASK 0x00000400L
+#define UVD_VCPU_INT_STATUS__LBSI_INT_MASK 0x00000800L
+#define UVD_VCPU_INT_STATUS__UDEC_INT_MASK 0x00001000L
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L
+#define UVD_VCPU_INT_STATUS__SUVD_INT_MASK 0x00008000L
+#define UVD_VCPU_INT_STATUS__RPTR_WR_INT_MASK 0x00010000L
+#define UVD_VCPU_INT_STATUS__JOB_START_INT_MASK 0x00020000L
+#define UVD_VCPU_INT_STATUS__NJ_PF_INT_MASK 0x00040000L
+#define UVD_VCPU_INT_STATUS__GPCOM_INT_MASK 0x00100000L
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L
+#define UVD_VCPU_INT_STATUS__IDCT_INT_MASK 0x01000000L
+#define UVD_VCPU_INT_STATUS__MPRD_INT_MASK 0x02000000L
+#define UVD_VCPU_INT_STATUS__AVM_INT_MASK 0x04000000L
+#define UVD_VCPU_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L
+#define UVD_VCPU_INT_STATUS__MIF_HWINT_MASK 0x10000000L
+#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L
+#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT_MASK 0x40000000L
+#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT_MASK 0x80000000L
+//UVD_VCPU_INT_ACK
+#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1
+#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2
+#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK__SHIFT 0x3
+#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK__SHIFT 0x4
+#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK__SHIFT 0x5
+#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6
+#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK__SHIFT 0x7
+#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK__SHIFT 0x9
+#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK__SHIFT 0xa
+#define UVD_VCPU_INT_ACK__LBSI_ACK__SHIFT 0xb
+#define UVD_VCPU_INT_ACK__UDEC_ACK__SHIFT 0xc
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe
+#define UVD_VCPU_INT_ACK__SUVD_ACK__SHIFT 0xf
+#define UVD_VCPU_INT_ACK__RPTR_WR_ACK__SHIFT 0x10
+#define UVD_VCPU_INT_ACK__JOB_START_ACK__SHIFT 0x11
+#define UVD_VCPU_INT_ACK__NJ_PF_ACK__SHIFT 0x12
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17
+#define UVD_VCPU_INT_ACK__IDCT_ACK__SHIFT 0x18
+#define UVD_VCPU_INT_ACK__MPRD_ACK__SHIFT 0x19
+#define UVD_VCPU_INT_ACK__AVM_INT_ACK__SHIFT 0x1a
+#define UVD_VCPU_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b
+#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c
+#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d
+#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK__SHIFT 0x1e
+#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK__SHIFT 0x1f
+#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L
+#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L
+#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK_MASK 0x00000008L
+#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK_MASK 0x00000010L
+#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK_MASK 0x00000020L
+#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L
+#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK_MASK 0x00000080L
+#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK_MASK 0x00000200L
+#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK_MASK 0x00000400L
+#define UVD_VCPU_INT_ACK__LBSI_ACK_MASK 0x00000800L
+#define UVD_VCPU_INT_ACK__UDEC_ACK_MASK 0x00001000L
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L
+#define UVD_VCPU_INT_ACK__SUVD_ACK_MASK 0x00008000L
+#define UVD_VCPU_INT_ACK__RPTR_WR_ACK_MASK 0x00010000L
+#define UVD_VCPU_INT_ACK__JOB_START_ACK_MASK 0x00020000L
+#define UVD_VCPU_INT_ACK__NJ_PF_ACK_MASK 0x00040000L
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L
+#define UVD_VCPU_INT_ACK__IDCT_ACK_MASK 0x01000000L
+#define UVD_VCPU_INT_ACK__MPRD_ACK_MASK 0x02000000L
+#define UVD_VCPU_INT_ACK__AVM_INT_ACK_MASK 0x04000000L
+#define UVD_VCPU_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L
+#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L
+#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L
+#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK_MASK 0x40000000L
+#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK_MASK 0x80000000L
+//UVD_VCPU_INT_ROUTE
+#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG__SHIFT 0x0
+#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK__SHIFT 0x1
+#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM__SHIFT 0x2
+#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG_MASK 0x00000001L
+#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK_MASK 0x00000002L
+#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM_MASK 0x00000004L
+//UVD_DRV_FW_MSG
+#define UVD_DRV_FW_MSG__MSG__SHIFT 0x0
+#define UVD_DRV_FW_MSG__MSG_MASK 0xFFFFFFFFL
+//UVD_FW_DRV_MSG_ACK
+#define UVD_FW_DRV_MSG_ACK__ACK__SHIFT 0x0
+#define UVD_FW_DRV_MSG_ACK__ACK_MASK 0x00000001L
+//UVD_SUVD_INT_EN
+#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN__SHIFT 0x0
+#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN__SHIFT 0x5
+#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN__SHIFT 0x6
+#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN__SHIFT 0xb
+#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN__SHIFT 0xc
+#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN__SHIFT 0x11
+#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN__SHIFT 0x12
+#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN__SHIFT 0x17
+#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN__SHIFT 0x18
+#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN__SHIFT 0x1d
+#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN__SHIFT 0x1e
+#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN_MASK 0x0000001FL
+#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN_MASK 0x00000020L
+#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN_MASK 0x000007C0L
+#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN_MASK 0x00000800L
+#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN_MASK 0x0001F000L
+#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN_MASK 0x00020000L
+#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN_MASK 0x007C0000L
+#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN_MASK 0x00800000L
+#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN_MASK 0x1F000000L
+#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN_MASK 0x20000000L
+#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN_MASK 0x40000000L
+//UVD_SUVD_INT_STATUS
+#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT__SHIFT 0x0
+#define UVD_SUVD_INT_STATUS__SRE_ERR_INT__SHIFT 0x5
+#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT__SHIFT 0x6
+#define UVD_SUVD_INT_STATUS__SIT_ERR_INT__SHIFT 0xb
+#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT__SHIFT 0xc
+#define UVD_SUVD_INT_STATUS__SMP_ERR_INT__SHIFT 0x11
+#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT__SHIFT 0x12
+#define UVD_SUVD_INT_STATUS__SCM_ERR_INT__SHIFT 0x17
+#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT__SHIFT 0x18
+#define UVD_SUVD_INT_STATUS__SDB_ERR_INT__SHIFT 0x1d
+#define UVD_SUVD_INT_STATUS__FBC_ERR_INT__SHIFT 0x1e
+#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT_MASK 0x0000001FL
+#define UVD_SUVD_INT_STATUS__SRE_ERR_INT_MASK 0x00000020L
+#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT_MASK 0x000007C0L
+#define UVD_SUVD_INT_STATUS__SIT_ERR_INT_MASK 0x00000800L
+#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT_MASK 0x0001F000L
+#define UVD_SUVD_INT_STATUS__SMP_ERR_INT_MASK 0x00020000L
+#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT_MASK 0x007C0000L
+#define UVD_SUVD_INT_STATUS__SCM_ERR_INT_MASK 0x00800000L
+#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT_MASK 0x1F000000L
+#define UVD_SUVD_INT_STATUS__SDB_ERR_INT_MASK 0x20000000L
+#define UVD_SUVD_INT_STATUS__FBC_ERR_INT_MASK 0x40000000L
+//UVD_SUVD_INT_ACK
+#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK__SHIFT 0x0
+#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK__SHIFT 0x5
+#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK__SHIFT 0x6
+#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK__SHIFT 0xb
+#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK__SHIFT 0xc
+#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK__SHIFT 0x11
+#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK__SHIFT 0x12
+#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK__SHIFT 0x17
+#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK__SHIFT 0x18
+#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK__SHIFT 0x1d
+#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK__SHIFT 0x1e
+#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK_MASK 0x0000001FL
+#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK_MASK 0x00000020L
+#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK_MASK 0x000007C0L
+#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK_MASK 0x00000800L
+#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK_MASK 0x0001F000L
+#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK_MASK 0x00020000L
+#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK_MASK 0x007C0000L
+#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK_MASK 0x00800000L
+#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK_MASK 0x1F000000L
+#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK_MASK 0x20000000L
+#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK_MASK 0x40000000L
+//UVD_ENC_VCPU_INT_EN
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN_MASK 0x00000004L
+//UVD_ENC_VCPU_INT_STATUS
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT_MASK 0x00000004L
+//UVD_ENC_VCPU_INT_ACK
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK_MASK 0x00000004L
+//UVD_MASTINT_EN
+#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1
+#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x2
+#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L
+#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
+#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+//UVD_SYS_INT_EN
+#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1
+#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2
+#define UVD_SYS_INT_EN__CXW_WR_EN__SHIFT 0x3
+#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6
+#define UVD_SYS_INT_EN__LBSI_EN__SHIFT 0xb
+#define UVD_SYS_INT_EN__UDEC_EN__SHIFT 0xc
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe
+#define UVD_SYS_INT_EN__SUVD_EN__SHIFT 0xf
+#define UVD_SYS_INT_EN__JOB_DONE_EN__SHIFT 0x10
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17
+#define UVD_SYS_INT_EN__IDCT_EN__SHIFT 0x18
+#define UVD_SYS_INT_EN__MPRD_EN__SHIFT 0x19
+#define UVD_SYS_INT_EN__CLK_SWT_EN__SHIFT 0x1b
+#define UVD_SYS_INT_EN__MIF_HWINT_EN__SHIFT 0x1c
+#define UVD_SYS_INT_EN__MPRD_ERR_EN__SHIFT 0x1d
+#define UVD_SYS_INT_EN__AVM_INT_EN__SHIFT 0x1f
+#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L
+#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L
+#define UVD_SYS_INT_EN__CXW_WR_EN_MASK 0x00000008L
+#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L
+#define UVD_SYS_INT_EN__LBSI_EN_MASK 0x00000800L
+#define UVD_SYS_INT_EN__UDEC_EN_MASK 0x00001000L
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L
+#define UVD_SYS_INT_EN__SUVD_EN_MASK 0x00008000L
+#define UVD_SYS_INT_EN__JOB_DONE_EN_MASK 0x00010000L
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L
+#define UVD_SYS_INT_EN__IDCT_EN_MASK 0x01000000L
+#define UVD_SYS_INT_EN__MPRD_EN_MASK 0x02000000L
+#define UVD_SYS_INT_EN__CLK_SWT_EN_MASK 0x08000000L
+#define UVD_SYS_INT_EN__MIF_HWINT_EN_MASK 0x10000000L
+#define UVD_SYS_INT_EN__MPRD_ERR_EN_MASK 0x20000000L
+#define UVD_SYS_INT_EN__AVM_INT_EN_MASK 0x80000000L
+//UVD_SYS_INT_STATUS
+#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1
+#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2
+#define UVD_SYS_INT_STATUS__CXW_WR_INT__SHIFT 0x3
+#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6
+#define UVD_SYS_INT_STATUS__LBSI_INT__SHIFT 0xb
+#define UVD_SYS_INT_STATUS__UDEC_INT__SHIFT 0xc
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe
+#define UVD_SYS_INT_STATUS__SUVD_INT__SHIFT 0xf
+#define UVD_SYS_INT_STATUS__JOB_DONE_INT__SHIFT 0x10
+#define UVD_SYS_INT_STATUS__GPCOM_INT__SHIFT 0x12
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17
+#define UVD_SYS_INT_STATUS__IDCT_INT__SHIFT 0x18
+#define UVD_SYS_INT_STATUS__MPRD_INT__SHIFT 0x19
+#define UVD_SYS_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b
+#define UVD_SYS_INT_STATUS__MIF_HWINT__SHIFT 0x1c
+#define UVD_SYS_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d
+#define UVD_SYS_INT_STATUS__AVM_INT__SHIFT 0x1f
+#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L
+#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L
+#define UVD_SYS_INT_STATUS__CXW_WR_INT_MASK 0x00000008L
+#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L
+#define UVD_SYS_INT_STATUS__LBSI_INT_MASK 0x00000800L
+#define UVD_SYS_INT_STATUS__UDEC_INT_MASK 0x00001000L
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L
+#define UVD_SYS_INT_STATUS__SUVD_INT_MASK 0x00008000L
+#define UVD_SYS_INT_STATUS__JOB_DONE_INT_MASK 0x00010000L
+#define UVD_SYS_INT_STATUS__GPCOM_INT_MASK 0x00040000L
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L
+#define UVD_SYS_INT_STATUS__IDCT_INT_MASK 0x01000000L
+#define UVD_SYS_INT_STATUS__MPRD_INT_MASK 0x02000000L
+#define UVD_SYS_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L
+#define UVD_SYS_INT_STATUS__MIF_HWINT_MASK 0x10000000L
+#define UVD_SYS_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L
+#define UVD_SYS_INT_STATUS__AVM_INT_MASK 0x80000000L
+//UVD_SYS_INT_ACK
+#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1
+#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2
+#define UVD_SYS_INT_ACK__CXW_WR_ACK__SHIFT 0x3
+#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6
+#define UVD_SYS_INT_ACK__LBSI_ACK__SHIFT 0xb
+#define UVD_SYS_INT_ACK__UDEC_ACK__SHIFT 0xc
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe
+#define UVD_SYS_INT_ACK__SUVD_ACK__SHIFT 0xf
+#define UVD_SYS_INT_ACK__JOB_DONE_ACK__SHIFT 0x10
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17
+#define UVD_SYS_INT_ACK__IDCT_ACK__SHIFT 0x18
+#define UVD_SYS_INT_ACK__MPRD_ACK__SHIFT 0x19
+#define UVD_SYS_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b
+#define UVD_SYS_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c
+#define UVD_SYS_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d
+#define UVD_SYS_INT_ACK__AVM_INT_ACK__SHIFT 0x1f
+#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L
+#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L
+#define UVD_SYS_INT_ACK__CXW_WR_ACK_MASK 0x00000008L
+#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L
+#define UVD_SYS_INT_ACK__LBSI_ACK_MASK 0x00000800L
+#define UVD_SYS_INT_ACK__UDEC_ACK_MASK 0x00001000L
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L
+#define UVD_SYS_INT_ACK__SUVD_ACK_MASK 0x00008000L
+#define UVD_SYS_INT_ACK__JOB_DONE_ACK_MASK 0x00010000L
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L
+#define UVD_SYS_INT_ACK__IDCT_ACK_MASK 0x01000000L
+#define UVD_SYS_INT_ACK__MPRD_ACK_MASK 0x02000000L
+#define UVD_SYS_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L
+#define UVD_SYS_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L
+#define UVD_SYS_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L
+#define UVD_SYS_INT_ACK__AVM_INT_ACK_MASK 0x80000000L
+//UVD_JOB_DONE
+#define UVD_JOB_DONE__JOB_DONE__SHIFT 0x0
+#define UVD_JOB_DONE__JOB_DONE_MASK 0x00000003L
+//UVD_CBUF_ID
+#define UVD_CBUF_ID__CBUF_ID__SHIFT 0x0
+#define UVD_CBUF_ID__CBUF_ID_MASK 0xFFFFFFFFL
+//UVD_CONTEXT_ID
+#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x0
+#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xFFFFFFFFL
+//UVD_CONTEXT_ID2
+#define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT 0x0
+#define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK 0xFFFFFFFFL
+//UVD_NO_OP
+#define UVD_NO_OP__NO_OP__SHIFT 0x0
+#define UVD_NO_OP__NO_OP_MASK 0xFFFFFFFFL
+//UVD_RB_BASE_LO
+#define UVD_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI
+#define UVD_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE
+#define UVD_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO2
+#define UVD_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO2__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI2
+#define UVD_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI2__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE2
+#define UVD_RB_SIZE2__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE2__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO3
+#define UVD_RB_BASE_LO3__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO3__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI3
+#define UVD_RB_BASE_HI3__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI3__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE3
+#define UVD_RB_SIZE3__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE3__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO4
+#define UVD_RB_BASE_LO4__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO4__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI4
+#define UVD_RB_BASE_HI4__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI4__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE4
+#define UVD_RB_SIZE4__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE4__RB_SIZE_MASK 0x007FFFF0L
+//UVD_OUT_RB_BASE_LO
+#define UVD_OUT_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_OUT_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_OUT_RB_BASE_HI
+#define UVD_OUT_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_OUT_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_OUT_RB_SIZE
+#define UVD_OUT_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_OUT_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_IOV_ACTIVE_FCN_ID
+#define UVD_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define UVD_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define UVD_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000003FL
+#define UVD_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//UVD_IOV_MAILBOX
+#define UVD_IOV_MAILBOX__MAILBOX__SHIFT 0x0
+#define UVD_IOV_MAILBOX__MAILBOX_MASK 0xFFFFFFFFL
+//UVD_IOV_MAILBOX_RESP
+#define UVD_IOV_MAILBOX_RESP__RESP__SHIFT 0x0
+#define UVD_IOV_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+//UVD_RB_ARB_CTRL
+#define UVD_RB_ARB_CTRL__SRBM_DROP__SHIFT 0x0
+#define UVD_RB_ARB_CTRL__SRBM_DIS__SHIFT 0x1
+#define UVD_RB_ARB_CTRL__VCPU_DROP__SHIFT 0x2
+#define UVD_RB_ARB_CTRL__VCPU_DIS__SHIFT 0x3
+#define UVD_RB_ARB_CTRL__RBC_DROP__SHIFT 0x4
+#define UVD_RB_ARB_CTRL__RBC_DIS__SHIFT 0x5
+#define UVD_RB_ARB_CTRL__FWOFLD_DROP__SHIFT 0x6
+#define UVD_RB_ARB_CTRL__FWOFLD_DIS__SHIFT 0x7
+#define UVD_RB_ARB_CTRL__FAST_PATH_EN__SHIFT 0x8
+#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN__SHIFT 0x9
+#define UVD_RB_ARB_CTRL__SRBM_DROP_MASK 0x00000001L
+#define UVD_RB_ARB_CTRL__SRBM_DIS_MASK 0x00000002L
+#define UVD_RB_ARB_CTRL__VCPU_DROP_MASK 0x00000004L
+#define UVD_RB_ARB_CTRL__VCPU_DIS_MASK 0x00000008L
+#define UVD_RB_ARB_CTRL__RBC_DROP_MASK 0x00000010L
+#define UVD_RB_ARB_CTRL__RBC_DIS_MASK 0x00000020L
+#define UVD_RB_ARB_CTRL__FWOFLD_DROP_MASK 0x00000040L
+#define UVD_RB_ARB_CTRL__FWOFLD_DIS_MASK 0x00000080L
+#define UVD_RB_ARB_CTRL__FAST_PATH_EN_MASK 0x00000100L
+#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN_MASK 0x00000200L
+//UVD_CTX_INDEX
+#define UVD_CTX_INDEX__INDEX__SHIFT 0x0
+#define UVD_CTX_INDEX__INDEX_MASK 0x000001FFL
+//UVD_CTX_DATA
+#define UVD_CTX_DATA__DATA__SHIFT 0x0
+#define UVD_CTX_DATA__DATA_MASK 0xFFFFFFFFL
+//UVD_CXW_WR
+#define UVD_CXW_WR__DAT__SHIFT 0x0
+#define UVD_CXW_WR__STAT__SHIFT 0x1f
+#define UVD_CXW_WR__DAT_MASK 0x0FFFFFFFL
+#define UVD_CXW_WR__STAT_MASK 0x80000000L
+//UVD_CXW_WR_INT_ID
+#define UVD_CXW_WR_INT_ID__ID__SHIFT 0x0
+#define UVD_CXW_WR_INT_ID__ID_MASK 0x000000FFL
+//UVD_CXW_WR_INT_CTX_ID
+#define UVD_CXW_WR_INT_CTX_ID__ID__SHIFT 0x0
+#define UVD_CXW_WR_INT_CTX_ID__ID_MASK 0x0FFFFFFFL
+//UVD_CXW_INT_ID
+#define UVD_CXW_INT_ID__ID__SHIFT 0x0
+#define UVD_CXW_INT_ID__ID_MASK 0x000000FFL
+//UVD_MPEG2_ERROR
+#define UVD_MPEG2_ERROR__STATUS__SHIFT 0x0
+#define UVD_MPEG2_ERROR__STATUS_MASK 0xFFFFFFFFL
+//UVD_YBASE
+#define UVD_YBASE__DUM__SHIFT 0x0
+#define UVD_YBASE__DUM_MASK 0xFFFFFFFFL
+//UVD_UVBASE
+#define UVD_UVBASE__DUM__SHIFT 0x0
+#define UVD_UVBASE__DUM_MASK 0xFFFFFFFFL
+//UVD_PITCH
+#define UVD_PITCH__DUM__SHIFT 0x0
+#define UVD_PITCH__DUM_MASK 0xFFFFFFFFL
+//UVD_WIDTH
+#define UVD_WIDTH__DUM__SHIFT 0x0
+#define UVD_WIDTH__DUM_MASK 0xFFFFFFFFL
+//UVD_HEIGHT
+#define UVD_HEIGHT__DUM__SHIFT 0x0
+#define UVD_HEIGHT__DUM_MASK 0xFFFFFFFFL
+//UVD_PICCOUNT
+#define UVD_PICCOUNT__DUM__SHIFT 0x0
+#define UVD_PICCOUNT__DUM_MASK 0xFFFFFFFFL
+//UVD_MPRD_INITIAL_XY
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X__SHIFT 0x0
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y__SHIFT 0x10
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X_MASK 0x00000FFFL
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y_MASK 0x0FFF0000L
+//UVD_MPEG2_CTRL
+#define UVD_MPEG2_CTRL__EN__SHIFT 0x0
+#define UVD_MPEG2_CTRL__TRICK_MODE__SHIFT 0x1
+#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB__SHIFT 0x10
+#define UVD_MPEG2_CTRL__EN_MASK 0x00000001L
+#define UVD_MPEG2_CTRL__TRICK_MODE_MASK 0x00000002L
+#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB_MASK 0xFFFF0000L
+//UVD_MB_CTL_BUF_BASE
+#define UVD_MB_CTL_BUF_BASE__BASE__SHIFT 0x0
+#define UVD_MB_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//UVD_PIC_CTL_BUF_BASE
+#define UVD_PIC_CTL_BUF_BASE__BASE__SHIFT 0x0
+#define UVD_PIC_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//UVD_DXVA_BUF_SIZE
+#define UVD_DXVA_BUF_SIZE__PIC_SIZE__SHIFT 0x0
+#define UVD_DXVA_BUF_SIZE__MB_SIZE__SHIFT 0x10
+#define UVD_DXVA_BUF_SIZE__PIC_SIZE_MASK 0x0000FFFFL
+#define UVD_DXVA_BUF_SIZE__MB_SIZE_MASK 0xFFFF0000L
+//UVD_SCRATCH_NP
+#define UVD_SCRATCH_NP__DATA__SHIFT 0x0
+#define UVD_SCRATCH_NP__DATA_MASK 0xFFFFFFFFL
+//UVD_CLK_SWT_HANDSHAKE
+#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE__SHIFT 0x0
+#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT__SHIFT 0x8
+#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE_MASK 0x00000003L
+#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT_MASK 0x00000300L
+//UVD_GP_SCRATCH0
+#define UVD_GP_SCRATCH0__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH1
+#define UVD_GP_SCRATCH1__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH2
+#define UVD_GP_SCRATCH2__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH3
+#define UVD_GP_SCRATCH3__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH4
+#define UVD_GP_SCRATCH4__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH5
+#define UVD_GP_SCRATCH5__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH6
+#define UVD_GP_SCRATCH6__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH7
+#define UVD_GP_SCRATCH7__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH8
+#define UVD_GP_SCRATCH8__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH9
+#define UVD_GP_SCRATCH9__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH9__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH10
+#define UVD_GP_SCRATCH10__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH10__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH11
+#define UVD_GP_SCRATCH11__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH11__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH12
+#define UVD_GP_SCRATCH12__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH12__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH13
+#define UVD_GP_SCRATCH13__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH13__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH14
+#define UVD_GP_SCRATCH14__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH14__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH15
+#define UVD_GP_SCRATCH15__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH15__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH16
+#define UVD_GP_SCRATCH16__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH16__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH17
+#define UVD_GP_SCRATCH17__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH17__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH18
+#define UVD_GP_SCRATCH18__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH18__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH19
+#define UVD_GP_SCRATCH19__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH19__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH20
+#define UVD_GP_SCRATCH20__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH20__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH21
+#define UVD_GP_SCRATCH21__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH21__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH22
+#define UVD_GP_SCRATCH22__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH22__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH23
+#define UVD_GP_SCRATCH23__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH23__DATA_MASK 0xFFFFFFFFL
+//UVD_AUDIO_RB_BASE_LO
+#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_AUDIO_RB_BASE_HI
+#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_AUDIO_RB_SIZE
+#define UVD_AUDIO_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_AUDIO_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_VCPU_INT_STATUS2
+#define UVD_VCPU_INT_STATUS2__SW_RB6_INT__SHIFT 0x0
+#define UVD_VCPU_INT_STATUS2__SW_RB6_INT_MASK 0x00000001L
+//UVD_VCPU_INT_ACK2
+#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK__SHIFT 0x0
+#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK_MASK 0x00000001L
+//UVD_VCPU_INT_EN2
+#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN__SHIFT 0x0
+#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN_MASK 0x00000001L
+//UVD_SUVD_CGC_STATUS2
+#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK__SHIFT 0x0
+#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK__SHIFT 0x1
+#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK__SHIFT 0x3
+#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK__SHIFT 0x4
+#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK__SHIFT 0x5
+#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK__SHIFT 0x6
+#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK__SHIFT 0x7
+#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK__SHIFT 0x8
+#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK__SHIFT 0x9
+#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK__SHIFT 0xa
+#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK__SHIFT 0xb
+#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK__SHIFT 0xc
+#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK__SHIFT 0xd
+#define UVD_SUVD_CGC_STATUS2__FBC_PCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_STATUS2__FBC_CCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK_MASK 0x00000001L
+#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK_MASK 0x00000002L
+#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK_MASK 0x00000008L
+#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK_MASK 0x00000010L
+#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK_MASK 0x00000020L
+#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK_MASK 0x00000040L
+#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK_MASK 0x00000080L
+#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK_MASK 0x00000100L
+#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK_MASK 0x00000200L
+#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK_MASK 0x00000400L
+#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK_MASK 0x00000800L
+#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK_MASK 0x00001000L
+#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK_MASK 0x00002000L
+#define UVD_SUVD_CGC_STATUS2__FBC_PCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_STATUS2__FBC_CCLK_MASK 0x20000000L
+//UVD_SUVD_INT_STATUS2
+#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT__SHIFT 0x0
+#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT__SHIFT 0x5
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT__SHIFT 0x6
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT__SHIFT 0xb
+#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT_MASK 0x0000001FL
+#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT_MASK 0x00000020L
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT_MASK 0x000007C0L
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT_MASK 0x00000800L
+//UVD_SUVD_INT_EN2
+#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN__SHIFT 0x0
+#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN__SHIFT 0x5
+#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN__SHIFT 0x6
+#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN__SHIFT 0xb
+#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN_MASK 0x0000001FL
+#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN_MASK 0x00000020L
+#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN_MASK 0x000007C0L
+#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN_MASK 0x00000800L
+//UVD_SUVD_INT_ACK2
+#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK__SHIFT 0x0
+#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK__SHIFT 0x5
+#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK__SHIFT 0x6
+#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK__SHIFT 0xb
+#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK_MASK 0x0000001FL
+#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK_MASK 0x00000020L
+#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK_MASK 0x000007C0L
+#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK_MASK 0x00000800L
+//UVD_STATUS
+#define UVD_STATUS__RBC_BUSY__SHIFT 0x0
+#define UVD_STATUS__VCPU_REPORT__SHIFT 0x1
+#define UVD_STATUS__FILL_0__SHIFT 0x8
+#define UVD_STATUS__RBC_ACCESS_GPCOM__SHIFT 0x10
+#define UVD_STATUS__DRM_BUSY__SHIFT 0x11
+#define UVD_STATUS__FILL_1__SHIFT 0x12
+#define UVD_STATUS__SYS_GPCOM_REQ__SHIFT 0x1f
+#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L
+#define UVD_STATUS__VCPU_REPORT_MASK 0x000000FEL
+#define UVD_STATUS__FILL_0_MASK 0x0000FF00L
+#define UVD_STATUS__RBC_ACCESS_GPCOM_MASK 0x00010000L
+#define UVD_STATUS__DRM_BUSY_MASK 0x00020000L
+#define UVD_STATUS__FILL_1_MASK 0x7FFC0000L
+#define UVD_STATUS__SYS_GPCOM_REQ_MASK 0x80000000L
+//UVD_ENC_PIPE_BUSY
+#define UVD_ENC_PIPE_BUSY__IME_BUSY__SHIFT 0x0
+#define UVD_ENC_PIPE_BUSY__SMP_BUSY__SHIFT 0x1
+#define UVD_ENC_PIPE_BUSY__SIT_BUSY__SHIFT 0x2
+#define UVD_ENC_PIPE_BUSY__SDB_BUSY__SHIFT 0x3
+#define UVD_ENC_PIPE_BUSY__ENT_BUSY__SHIFT 0x4
+#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY__SHIFT 0x5
+#define UVD_ENC_PIPE_BUSY__LCM_BUSY__SHIFT 0x6
+#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x7
+#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x8
+#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x9
+#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0xa
+#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0xb
+#define UVD_ENC_PIPE_BUSY__EFC_BUSY__SHIFT 0xc
+#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY__SHIFT 0xd
+#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY__SHIFT 0xe
+#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY__SHIFT 0xf
+#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0x10
+#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0x11
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0x12
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0x13
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0x14
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0x15
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x16
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x17
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x18
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x19
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x1a
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x1b
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x1c
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY__SHIFT 0x1d
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY__SHIFT 0x1e
+#define UVD_ENC_PIPE_BUSY__SAOE_BUSY__SHIFT 0x1f
+#define UVD_ENC_PIPE_BUSY__IME_BUSY_MASK 0x00000001L
+#define UVD_ENC_PIPE_BUSY__SMP_BUSY_MASK 0x00000002L
+#define UVD_ENC_PIPE_BUSY__SIT_BUSY_MASK 0x00000004L
+#define UVD_ENC_PIPE_BUSY__SDB_BUSY_MASK 0x00000008L
+#define UVD_ENC_PIPE_BUSY__ENT_BUSY_MASK 0x00000010L
+#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY_MASK 0x00000020L
+#define UVD_ENC_PIPE_BUSY__LCM_BUSY_MASK 0x00000040L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000080L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000100L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000200L
+#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000400L
+#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000800L
+#define UVD_ENC_PIPE_BUSY__EFC_BUSY_MASK 0x00001000L
+#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY_MASK 0x00002000L
+#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY_MASK 0x00004000L
+#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY_MASK 0x00008000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00010000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00020000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00040000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00080000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00100000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00200000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00400000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00800000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x01000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x02000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x04000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x08000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x10000000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY_MASK 0x20000000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY_MASK 0x40000000L
+#define UVD_ENC_PIPE_BUSY__SAOE_BUSY_MASK 0x80000000L
+//UVD_FW_POWER_STATUS
+#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF__SHIFT 0x0
+#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF__SHIFT 0x1
+#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF__SHIFT 0x2
+#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF__SHIFT 0x3
+#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF__SHIFT 0x4
+#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF__SHIFT 0x5
+#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF__SHIFT 0x6
+#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF__SHIFT 0x7
+#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF__SHIFT 0x8
+#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF__SHIFT 0x9
+#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF__SHIFT 0xa
+#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF_MASK 0x00000001L
+#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF_MASK 0x00000002L
+#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF_MASK 0x00000004L
+#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF_MASK 0x00000008L
+#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF_MASK 0x00000010L
+#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF_MASK 0x00000020L
+#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF_MASK 0x00000040L
+#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF_MASK 0x00000080L
+#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF_MASK 0x00000100L
+#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF_MASK 0x00000200L
+#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF_MASK 0x00000400L
+//UVD_CNTL
+#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP__SHIFT 0x11
+#define UVD_CNTL__SUVD_EN__SHIFT 0x13
+#define UVD_CNTL__CABAC_MB_ACC__SHIFT 0x1c
+#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS__SHIFT 0x1f
+#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK 0x00020000L
+#define UVD_CNTL__SUVD_EN_MASK 0x00080000L
+#define UVD_CNTL__CABAC_MB_ACC_MASK 0x10000000L
+#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS_MASK 0x80000000L
+//UVD_SOFT_RESET
+#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1
+#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x2
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x3
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x4
+#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x6
+#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x7
+#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x8
+#define UVD_SOFT_RESET__EFC_SOFT_RESET__SHIFT 0x9
+#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0xa
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0xb
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0xc
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0xd
+#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0xe
+#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0xf
+#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x10
+#define UVD_SOFT_RESET__SUVD_SOFT_RESET__SHIFT 0x11
+#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS__SHIFT 0x12
+#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS__SHIFT 0x13
+#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS__SHIFT 0x14
+#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS__SHIFT 0x15
+#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS__SHIFT 0x16
+#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS__SHIFT 0x17
+#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS__SHIFT 0x18
+#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS__SHIFT 0x19
+#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS__SHIFT 0x1a
+#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS__SHIFT 0x1b
+#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS__SHIFT 0x1c
+#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS__SHIFT 0x1d
+#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS__SHIFT 0x1e
+#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS__SHIFT 0x1f
+#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L
+#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L
+#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L
+#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L
+#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L
+#define UVD_SOFT_RESET__EFC_SOFT_RESET_MASK 0x00000200L
+#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L
+#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L
+#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L
+#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L
+#define UVD_SOFT_RESET__SUVD_SOFT_RESET_MASK 0x00020000L
+#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS_MASK 0x00040000L
+#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS_MASK 0x00080000L
+#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS_MASK 0x00100000L
+#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS_MASK 0x00200000L
+#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS_MASK 0x00400000L
+#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS_MASK 0x00800000L
+#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS_MASK 0x01000000L
+#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS_MASK 0x02000000L
+#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS_MASK 0x04000000L
+#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS_MASK 0x08000000L
+#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS_MASK 0x10000000L
+#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS_MASK 0x20000000L
+#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS_MASK 0x40000000L
+#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS_MASK 0x80000000L
+//UVD_SOFT_RESET2
+#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET__SHIFT 0x0
+#define UVD_SOFT_RESET2__PPU_SOFT_RESET__SHIFT 0x1
+#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS__SHIFT 0x10
+#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET2__PPU_SOFT_RESET_MASK 0x00000002L
+#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS_MASK 0x00010000L
+#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_MMSCH_SOFT_RESET
+#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET__SHIFT 0x0
+#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x1
+#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK__SHIFT 0x1f
+#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET_MASK 0x00000001L
+#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000002L
+#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK_MASK 0x80000000L
+//UVD_WIG_CTRL
+#define UVD_WIG_CTRL__AVM_SOFT_RESET__SHIFT 0x0
+#define UVD_WIG_CTRL__ACAP_SOFT_RESET__SHIFT 0x1
+#define UVD_WIG_CTRL__WIG_SOFT_RESET__SHIFT 0x2
+#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON__SHIFT 0x3
+#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON__SHIFT 0x4
+#define UVD_WIG_CTRL__AVM_SOFT_RESET_MASK 0x00000001L
+#define UVD_WIG_CTRL__ACAP_SOFT_RESET_MASK 0x00000002L
+#define UVD_WIG_CTRL__WIG_SOFT_RESET_MASK 0x00000004L
+#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON_MASK 0x00000008L
+#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON_MASK 0x00000010L
+//UVD_CGC_STATUS
+#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x0
+#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x1
+#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x2
+#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x3
+#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x4
+#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x5
+#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x6
+#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x7
+#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x8
+#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x9
+#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0xa
+#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0xb
+#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0xc
+#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0xd
+#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0xe
+#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0xf
+#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x10
+#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x11
+#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x12
+#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x13
+#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x14
+#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x15
+#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x16
+#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x17
+#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x18
+#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x19
+#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x1a
+#define UVD_CGC_STATUS__MMSCH_SCLK__SHIFT 0x1b
+#define UVD_CGC_STATUS__MMSCH_VCLK__SHIFT 0x1c
+#define UVD_CGC_STATUS__ALL_ENC_ACTIVE__SHIFT 0x1d
+#define UVD_CGC_STATUS__LRBBM_DCLK__SHIFT 0x1e
+#define UVD_CGC_STATUS__ALL_DEC_ACTIVE__SHIFT 0x1f
+#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L
+#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L
+#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L
+#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L
+#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L
+#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L
+#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L
+#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L
+#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L
+#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L
+#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L
+#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L
+#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L
+#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L
+#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L
+#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L
+#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L
+#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L
+#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L
+#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L
+#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L
+#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L
+#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L
+#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L
+#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L
+#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L
+#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L
+#define UVD_CGC_STATUS__MMSCH_SCLK_MASK 0x08000000L
+#define UVD_CGC_STATUS__MMSCH_VCLK_MASK 0x10000000L
+#define UVD_CGC_STATUS__ALL_ENC_ACTIVE_MASK 0x20000000L
+#define UVD_CGC_STATUS__LRBBM_DCLK_MASK 0x40000000L
+#define UVD_CGC_STATUS__ALL_DEC_ACTIVE_MASK 0x80000000L
+//UVD_CGC_UDEC_STATUS
+#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x0
+#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x1
+#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x2
+#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x3
+#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x4
+#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x5
+#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x6
+#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x7
+#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x8
+#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x9
+#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0xa
+#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0xb
+#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0xc
+#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0xd
+#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0xe
+#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L
+#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L
+#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L
+#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L
+#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L
+#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L
+#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L
+#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L
+#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L
+#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L
+#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L
+#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L
+#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L
+#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L
+#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L
+//UVD_SUVD_CGC_STATUS
+#define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0
+#define UVD_SUVD_CGC_STATUS__SRE_DCLK__SHIFT 0x1
+#define UVD_SUVD_CGC_STATUS__SIT_DCLK__SHIFT 0x2
+#define UVD_SUVD_CGC_STATUS__SMP_DCLK__SHIFT 0x3
+#define UVD_SUVD_CGC_STATUS__SCM_DCLK__SHIFT 0x4
+#define UVD_SUVD_CGC_STATUS__SDB_DCLK__SHIFT 0x5
+#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK__SHIFT 0x6
+#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK__SHIFT 0x7
+#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK__SHIFT 0x8
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK__SHIFT 0x9
+#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK__SHIFT 0xa
+#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK__SHIFT 0xb
+#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK__SHIFT 0xc
+#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK__SHIFT 0xd
+#define UVD_SUVD_CGC_STATUS__SCLR_DCLK__SHIFT 0xe
+#define UVD_SUVD_CGC_STATUS__UVD_SC__SHIFT 0xf
+#define UVD_SUVD_CGC_STATUS__ENT_DCLK__SHIFT 0x10
+#define UVD_SUVD_CGC_STATUS__IME_DCLK__SHIFT 0x11
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK__SHIFT 0x12
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK__SHIFT 0x13
+#define UVD_SUVD_CGC_STATUS__SITE_DCLK__SHIFT 0x14
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK__SHIFT 0x15
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK__SHIFT 0x16
+#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK__SHIFT 0x17
+#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK__SHIFT 0x18
+#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK__SHIFT 0x19
+#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK__SHIFT 0x1a
+#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b
+#define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_STATUS__SAOE_DCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK__SHIFT 0x1e
+#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK__SHIFT 0x1f
+#define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L
+#define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L
+#define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L
+#define UVD_SUVD_CGC_STATUS__SMP_DCLK_MASK 0x00000008L
+#define UVD_SUVD_CGC_STATUS__SCM_DCLK_MASK 0x00000010L
+#define UVD_SUVD_CGC_STATUS__SDB_DCLK_MASK 0x00000020L
+#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK_MASK 0x00000040L
+#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK_MASK 0x00000080L
+#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK_MASK 0x00000100L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK_MASK 0x00000200L
+#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK_MASK 0x00000400L
+#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK_MASK 0x00000800L
+#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK_MASK 0x00001000L
+#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK_MASK 0x00002000L
+#define UVD_SUVD_CGC_STATUS__SCLR_DCLK_MASK 0x00004000L
+#define UVD_SUVD_CGC_STATUS__UVD_SC_MASK 0x00008000L
+#define UVD_SUVD_CGC_STATUS__ENT_DCLK_MASK 0x00010000L
+#define UVD_SUVD_CGC_STATUS__IME_DCLK_MASK 0x00020000L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK_MASK 0x00040000L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK_MASK 0x00080000L
+#define UVD_SUVD_CGC_STATUS__SITE_DCLK_MASK 0x00100000L
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK_MASK 0x00200000L
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK_MASK 0x00400000L
+#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK_MASK 0x00800000L
+#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK_MASK 0x01000000L
+#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK_MASK 0x02000000L
+#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK_MASK 0x04000000L
+#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L
+#define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_STATUS__SAOE_DCLK_MASK 0x20000000L
+#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK_MASK 0x40000000L
+#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK_MASK 0x80000000L
+//UVD_GPCOM_VCPU_CMD
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x0
+#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x1
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x1f
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7FFFFFFEL
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L
+
+
+// addressBlock: uvd_vcn_cdefe_cdefe_broadcast_dec0
+//CDEFE_SUVD_CGC_GATE
+#define CDEFE_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define CDEFE_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define CDEFE_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define CDEFE_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define CDEFE_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define CDEFE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define CDEFE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define CDEFE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define CDEFE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define CDEFE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define CDEFE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define CDEFE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define CDEFE_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define CDEFE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define CDEFE_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define CDEFE_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define CDEFE_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define CDEFE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define CDEFE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define CDEFE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define CDEFE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define CDEFE_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define CDEFE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define CDEFE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define CDEFE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define CDEFE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define CDEFE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define CDEFE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define CDEFE_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define CDEFE_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define CDEFE_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define CDEFE_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define CDEFE_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define CDEFE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define CDEFE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define CDEFE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define CDEFE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define CDEFE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define CDEFE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define CDEFE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define CDEFE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define CDEFE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define CDEFE_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define CDEFE_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define CDEFE_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define CDEFE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define CDEFE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define CDEFE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define CDEFE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define CDEFE_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define CDEFE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define CDEFE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define CDEFE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define CDEFE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define CDEFE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define CDEFE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//CDEFE_SUVD_CGC_GATE2
+#define CDEFE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define CDEFE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define CDEFE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define CDEFE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define CDEFE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define CDEFE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define CDEFE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define CDEFE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define CDEFE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define CDEFE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define CDEFE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define CDEFE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define CDEFE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define CDEFE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define CDEFE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define CDEFE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//CDEFE_SUVD_CGC_CTRL
+#define CDEFE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define CDEFE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define CDEFE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define CDEFE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define CDEFE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define CDEFE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define CDEFE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define CDEFE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define CDEFE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define CDEFE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define CDEFE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define CDEFE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define CDEFE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define CDEFE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define CDEFE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define CDEFE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define CDEFE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define CDEFE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+
+
+// addressBlock: uvd_ecpudec
+//UVD_VCPU_CACHE_OFFSET0
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE0
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET1
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE1
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET2
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE2
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET3
+#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE3
+#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET4
+#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE4
+#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET5
+#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE5
+#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET6
+#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE6
+#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET7
+#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE7
+#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET8
+#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE8
+#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8_MASK 0x001FFFFFL
+//UVD_VCPU_NONCACHE_OFFSET0
+#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0_MASK 0x01FFFFFFL
+//UVD_VCPU_NONCACHE_SIZE0
+#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0_MASK 0x001FFFFFL
+//UVD_VCPU_NONCACHE_OFFSET1
+#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1_MASK 0x01FFFFFFL
+//UVD_VCPU_NONCACHE_SIZE1
+#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1_MASK 0x001FFFFFL
+//UVD_VCPU_CNTL
+#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x0
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x4
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x5
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x6
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x7
+#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x8
+#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9
+#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0xa
+#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0xb
+#define UVD_VCPU_CNTL__DBG_MUX__SHIFT 0xd
+#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x10
+#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x12
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14
+#define UVD_VCPU_CNTL__BLK_RST__SHIFT 0x1c
+#define UVD_VCPU_CNTL__RUNSTALL__SHIFT 0x1d
+#define UVD_VCPU_CNTL__SRE_CMDIF_DRST__SHIFT 0x1e
+#define UVD_VCPU_CNTL__SRE_CMDIF_VRST__SHIFT 0x1f
+#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000FL
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000010L
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L
+#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L
+#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L
+#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L
+#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L
+#define UVD_VCPU_CNTL__DBG_MUX_MASK 0x0000E000L
+#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L
+#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L
+#define UVD_VCPU_CNTL__BLK_RST_MASK 0x10000000L
+#define UVD_VCPU_CNTL__RUNSTALL_MASK 0x20000000L
+#define UVD_VCPU_CNTL__SRE_CMDIF_DRST_MASK 0x40000000L
+#define UVD_VCPU_CNTL__SRE_CMDIF_VRST_MASK 0x80000000L
+//UVD_VCPU_PRID
+#define UVD_VCPU_PRID__PRID__SHIFT 0x0
+#define UVD_VCPU_PRID__PRID_MASK 0x0000FFFFL
+//UVD_VCPU_TRCE
+#define UVD_VCPU_TRCE__PC__SHIFT 0x0
+#define UVD_VCPU_TRCE__PC_MASK 0x0FFFFFFFL
+//UVD_VCPU_TRCE_RD
+#define UVD_VCPU_TRCE_RD__DATA__SHIFT 0x0
+#define UVD_VCPU_TRCE_RD__DATA_MASK 0xFFFFFFFFL
+//UVD_VCPU_IND_INDEX
+#define UVD_VCPU_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_VCPU_IND_INDEX__INDEX_MASK 0x000001FFL
+//UVD_VCPU_IND_DATA
+#define UVD_VCPU_IND_DATA__DATA__SHIFT 0x0
+#define UVD_VCPU_IND_DATA__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd_lmi_adpdec
+//UVD_LMI_RE_64BIT_BAR_LOW
+#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RE_64BIT_BAR_HIGH
+#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_IT_64BIT_BAR_LOW
+#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_IT_64BIT_BAR_HIGH
+#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MP_64BIT_BAR_LOW
+#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MP_64BIT_BAR_HIGH
+#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_CM_64BIT_BAR_LOW
+#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_CM_64BIT_BAR_HIGH
+#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_DB_64BIT_BAR_LOW
+#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_DB_64BIT_BAR_HIGH
+#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_DBW_64BIT_BAR_LOW
+#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_DBW_64BIT_BAR_HIGH
+#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_IDCT_64BIT_BAR_LOW
+#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_IDCT_64BIT_BAR_HIGH
+#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S0_64BIT_BAR_LOW
+#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S0_64BIT_BAR_HIGH
+#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S1_64BIT_BAR_LOW
+#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S1_64BIT_BAR_HIGH
+#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_DBW_64BIT_BAR_LOW
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_RB_64BIT_BAR_LOW
+#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_RB_64BIT_BAR_HIGH
+#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_IB_64BIT_BAR_LOW
+#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_IB_64BIT_BAR_HIGH
+#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_LBSI_64BIT_BAR_LOW
+#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_LBSI_64BIT_BAR_HIGH
+#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC0_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC1_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_CENC_64BIT_BAR_LOW
+#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_CENC_64BIT_BAR_HIGH
+#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_SRE_64BIT_BAR_LOW
+#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_SRE_64BIT_BAR_HIGH
+#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_DBW_64BIT_BAR_LOW
+#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_DBW_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP0_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP1_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP2_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP3_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD0_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD1_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD2_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD3_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD4_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR_64BIT_BAR_LOW
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_SPH_64BIT_BAR_HIGH
+#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_ADP_ATOMIC_CONFIG
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE__SHIFT 0x0
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE__SHIFT 0x4
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE__SHIFT 0x8
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE__SHIFT 0xc
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG__SHIFT 0x10
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE_MASK 0x0000000FL
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE_MASK 0x000000F0L
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE_MASK 0x00000F00L
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE_MASK 0x0000F000L
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG_MASK 0x000F0000L
+//UVD_LMI_ARB_CTRL2
+#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST__SHIFT 0x2
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST__SHIFT 0x6
+#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX__SHIFT 0xa
+#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX__SHIFT 0x14
+#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST_MASK 0x0000003CL
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST_MASK 0x000003C0L
+#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX_MASK 0x000FFC00L
+#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX_MASK 0xFFF00000L
+//UVD_LMI_VCPU_CACHE_VMIDS_MULTI
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID__SHIFT 0x4
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID__SHIFT 0x8
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID__SHIFT 0xc
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID__SHIFT 0x10
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID__SHIFT 0x14
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID__SHIFT 0x18
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID__SHIFT 0x1c
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID_MASK 0x0000000FL
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID_MASK 0x000000F0L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID_MASK 0x00000F00L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID_MASK 0x0000F000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID_MASK 0x000F0000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID_MASK 0x00F00000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID_MASK 0x0F000000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID_MASK 0xF0000000L
+//UVD_LMI_VCPU_NC_VMIDS_MULTI
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID__SHIFT 0x4
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID__SHIFT 0x8
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID__SHIFT 0xc
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID__SHIFT 0x10
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID__SHIFT 0x14
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID__SHIFT 0x18
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID_MASK 0x000000F0L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID_MASK 0x00000F00L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID_MASK 0x0000F000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID_MASK 0x000F0000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID_MASK 0x00F00000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID_MASK 0x0F000000L
+//UVD_LMI_LAT_CTRL
+#define UVD_LMI_LAT_CTRL__SCALE__SHIFT 0x0
+#define UVD_LMI_LAT_CTRL__MAX_START__SHIFT 0x8
+#define UVD_LMI_LAT_CTRL__MIN_START__SHIFT 0x9
+#define UVD_LMI_LAT_CTRL__AVG_START__SHIFT 0xa
+#define UVD_LMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb
+#define UVD_LMI_LAT_CTRL__SKIP__SHIFT 0x10
+#define UVD_LMI_LAT_CTRL__SCALE_MASK 0x000000FFL
+#define UVD_LMI_LAT_CTRL__MAX_START_MASK 0x00000100L
+#define UVD_LMI_LAT_CTRL__MIN_START_MASK 0x00000200L
+#define UVD_LMI_LAT_CTRL__AVG_START_MASK 0x00000400L
+#define UVD_LMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L
+#define UVD_LMI_LAT_CTRL__SKIP_MASK 0x000F0000L
+//UVD_LMI_LAT_CNTR
+#define UVD_LMI_LAT_CNTR__MAX_LAT__SHIFT 0x0
+#define UVD_LMI_LAT_CNTR__MIN_LAT__SHIFT 0x8
+#define UVD_LMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL
+#define UVD_LMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L
+//UVD_LMI_AVG_LAT_CNTR
+#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10
+#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L
+//UVD_LMI_SPH
+#define UVD_LMI_SPH__ADDR__SHIFT 0x0
+#define UVD_LMI_SPH__STS__SHIFT 0x1c
+#define UVD_LMI_SPH__STS_VALID__SHIFT 0x1e
+#define UVD_LMI_SPH__STS_OVERFLOW__SHIFT 0x1f
+#define UVD_LMI_SPH__ADDR_MASK 0x0FFFFFFFL
+#define UVD_LMI_SPH__STS_MASK 0x30000000L
+#define UVD_LMI_SPH__STS_VALID_MASK 0x40000000L
+#define UVD_LMI_SPH__STS_OVERFLOW_MASK 0x80000000L
+//UVD_LMI_VCPU_CACHE_VMID
+#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL
+//UVD_LMI_CTRL2
+#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x0
+#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x1
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x2
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x3
+#define UVD_LMI_CTRL2__CRC1_RESET__SHIFT 0x4
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x7
+#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0xd
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0xe
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0xf
+#define UVD_LMI_CTRL2__RE_OFFLOAD_EN__SHIFT 0x10
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11
+#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP__SHIFT 0x19
+#define UVD_LMI_CTRL2__NJ_MIF_GATING__SHIFT 0x1a
+#define UVD_LMI_CTRL2__CRC1_SEL__SHIFT 0x1b
+#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L
+#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L
+#define UVD_LMI_CTRL2__CRC1_RESET_MASK 0x00000010L
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L
+#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L
+#define UVD_LMI_CTRL2__RE_OFFLOAD_EN_MASK 0x00010000L
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L
+#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP_MASK 0x02000000L
+#define UVD_LMI_CTRL2__NJ_MIF_GATING_MASK 0x04000000L
+#define UVD_LMI_CTRL2__CRC1_SEL_MASK 0xF8000000L
+//UVD_LMI_URGENT_CTRL
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL__SHIFT 0x1
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x2
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x8
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL__SHIFT 0x9
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0xa
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL__SHIFT 0x10
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL__SHIFT 0x11
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT__SHIFT 0x12
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL__SHIFT 0x18
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL__SHIFT 0x19
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT__SHIFT 0x1a
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL_MASK 0x00000002L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x0000003CL
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00000100L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL_MASK 0x00000200L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00003C00L
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL_MASK 0x00010000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL_MASK 0x00020000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT_MASK 0x003C0000L
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL_MASK 0x01000000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL_MASK 0x02000000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT_MASK 0x3C000000L
+//UVD_LMI_CTRL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x0
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x8
+#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x9
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0xb
+#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0xc
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0xd
+#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0xe
+#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0xf
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL__SHIFT 0x14
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x15
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x16
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x17
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x18
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x19
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x1a
+#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ__SHIFT 0x1b
+#define UVD_LMI_CTRL__MC_BLK_RST__SHIFT 0x1c
+#define UVD_LMI_CTRL__UMC_BLK_RST__SHIFT 0x1d
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000FFL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L
+#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L
+#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L
+#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000F8000L
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK 0x00100000L
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L
+#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ_MASK 0x08000000L
+#define UVD_LMI_CTRL__MC_BLK_RST_MASK 0x10000000L
+#define UVD_LMI_CTRL__UMC_BLK_RST_MASK 0x20000000L
+//UVD_LMI_STATUS
+#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0
+#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3
+#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x4
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x5
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x7
+#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x8
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9
+#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0xa
+#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0xb
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0xc
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0xd
+#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN__SHIFT 0x12
+#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN__SHIFT 0x13
+#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN__SHIFT 0x14
+#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN__SHIFT 0x15
+#define UVD_LMI_STATUS__CENC_READ_CLEAN__SHIFT 0x16
+#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L
+#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L
+#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L
+#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L
+#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L
+#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN_MASK 0x00040000L
+#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN_MASK 0x00080000L
+#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN_MASK 0x00100000L
+#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN_MASK 0x00200000L
+#define UVD_LMI_STATUS__CENC_READ_CLEAN_MASK 0x00400000L
+//UVD_LMI_PERFMON_CTRL
+#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0
+#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8
+#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L
+#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L
+//UVD_LMI_PERFMON_COUNT_LO
+#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0
+#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL
+//UVD_LMI_PERFMON_COUNT_HI
+#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0
+#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL
+//UVD_LMI_ADP_SWAP_CNTL
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP__SHIFT 0x6
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x8
+#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP__SHIFT 0xa
+#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP__SHIFT 0xc
+#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP__SHIFT 0xe
+#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP__SHIFT 0x10
+#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP__SHIFT 0x12
+#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP__SHIFT 0x14
+#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP__SHIFT 0x18
+#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP__SHIFT 0x1c
+#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP__SHIFT 0x1e
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP_MASK 0x000000C0L
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000300L
+#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP_MASK 0x00000C00L
+#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP_MASK 0x00003000L
+#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP_MASK 0x0000C000L
+#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP_MASK 0x00030000L
+#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP_MASK 0x000C0000L
+#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP_MASK 0x00300000L
+#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP_MASK 0x03000000L
+#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L
+#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP_MASK 0xC0000000L
+//UVD_LMI_RBC_RB_VMID
+#define UVD_LMI_RBC_RB_VMID__RB_VMID__SHIFT 0x0
+#define UVD_LMI_RBC_RB_VMID__RB_VMID_MASK 0x0000000FL
+//UVD_LMI_RBC_IB_VMID
+#define UVD_LMI_RBC_IB_VMID__IB_VMID__SHIFT 0x0
+#define UVD_LMI_RBC_IB_VMID__IB_VMID_MASK 0x0000000FL
+//UVD_LMI_MC_CREDITS
+#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS__SHIFT 0x0
+#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS__SHIFT 0x8
+#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS__SHIFT 0x10
+#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS__SHIFT 0x18
+#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS_MASK 0x0000003FL
+#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS_MASK 0x00003F00L
+#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS_MASK 0x003F0000L
+#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS_MASK 0x3F000000L
+//UVD_LMI_ADP_IND_INDEX
+#define UVD_LMI_ADP_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_LMI_ADP_IND_INDEX__INDEX_MASK 0x00001FFFL
+//UVD_LMI_ADP_IND_DATA
+#define UVD_LMI_ADP_IND_DATA__DATA__SHIFT 0x0
+#define UVD_LMI_ADP_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//UVD_LMI_ADP_PF_EN
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN__SHIFT 0x0
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN__SHIFT 0x1
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN__SHIFT 0x2
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN_MASK 0x00000001L
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN_MASK 0x00000002L
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN_MASK 0x00000004L
+//UVD_LMI_PREF_CTRL
+#define UVD_LMI_PREF_CTRL__PREF_RST__SHIFT 0x0
+#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS__SHIFT 0x1
+#define UVD_LMI_PREF_CTRL__PREF_WSTRB__SHIFT 0x2
+#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE__SHIFT 0x3
+#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE__SHIFT 0x4
+#define UVD_LMI_PREF_CTRL__PREF_SIZE__SHIFT 0x13
+#define UVD_LMI_PREF_CTRL__PREF_RST_MASK 0x00000001L
+#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS_MASK 0x00000002L
+#define UVD_LMI_PREF_CTRL__PREF_WSTRB_MASK 0x00000004L
+#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE_MASK 0x00000008L
+#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE_MASK 0x00000070L
+#define UVD_LMI_PREF_CTRL__PREF_SIZE_MASK 0xFFF80000L
+
+
+// addressBlock: uvd_uvd_jpeg0_jpegnpdec
+//UVD_JPEG_CNTL
+#define UVD_JPEG_CNTL__REQUEST_EN__SHIFT 0x1
+#define UVD_JPEG_CNTL__ERR_RST_EN__SHIFT 0x2
+#define UVD_JPEG_CNTL__DBG_MUX_SEL__SHIFT 0x8
+#define UVD_JPEG_CNTL__REQUEST_EN_MASK 0x00000002L
+#define UVD_JPEG_CNTL__ERR_RST_EN_MASK 0x00000004L
+#define UVD_JPEG_CNTL__DBG_MUX_SEL_MASK 0x00007F00L
+//UVD_JPEG_RB_BASE
+#define UVD_JPEG_RB_BASE__RB_BYTE_OFF__SHIFT 0x0
+#define UVD_JPEG_RB_BASE__RB_BASE__SHIFT 0x6
+#define UVD_JPEG_RB_BASE__RB_BYTE_OFF_MASK 0x0000003FL
+#define UVD_JPEG_RB_BASE__RB_BASE_MASK 0xFFFFFFC0L
+//UVD_JPEG_RB_WPTR
+#define UVD_JPEG_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JPEG_RB_WPTR__RB_WPTR_MASK 0x3FFFFFF0L
+//UVD_JPEG_RB_RPTR
+#define UVD_JPEG_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JPEG_RB_RPTR__RB_RPTR_MASK 0x3FFFFFF0L
+//UVD_JPEG_RB_SIZE
+#define UVD_JPEG_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JPEG_RB_SIZE__RB_SIZE_MASK 0x3FFFFFF0L
+//UVD_JPEG_DEC_CNT
+#define UVD_JPEG_DEC_CNT__DECODE_COUNT__SHIFT 0x0
+#define UVD_JPEG_DEC_CNT__DECODE_COUNT_MASK 0xFFFFFFFFL
+//UVD_JPEG_SPS_INFO
+#define UVD_JPEG_SPS_INFO__PIC_WIDTH__SHIFT 0x0
+#define UVD_JPEG_SPS_INFO__PIC_HEIGHT__SHIFT 0x10
+#define UVD_JPEG_SPS_INFO__PIC_WIDTH_MASK 0x0000FFFFL
+#define UVD_JPEG_SPS_INFO__PIC_HEIGHT_MASK 0xFFFF0000L
+//UVD_JPEG_SPS1_INFO
+#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC__SHIFT 0x0
+#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT__SHIFT 0x3
+#define UVD_JPEG_SPS1_INFO__OUT_FMT_422__SHIFT 0x4
+#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC_MASK 0x00000007L
+#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT_MASK 0x00000008L
+#define UVD_JPEG_SPS1_INFO__OUT_FMT_422_MASK 0x00000010L
+//UVD_JPEG_RE_TIMER
+#define UVD_JPEG_RE_TIMER__TIMER_OUT__SHIFT 0x0
+#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN__SHIFT 0x10
+#define UVD_JPEG_RE_TIMER__TIMER_OUT_MASK 0x000000FFL
+#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN_MASK 0x00010000L
+//UVD_JPEG_DEC_SCRATCH0
+#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+//UVD_JPEG_INT_EN
+#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN__SHIFT 0x0
+#define UVD_JPEG_INT_EN__JOB_AVAIL_EN__SHIFT 0x1
+#define UVD_JPEG_INT_EN__FENCE_VAL_EN__SHIFT 0x2
+#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN__SHIFT 0x6
+#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN__SHIFT 0x7
+#define UVD_JPEG_INT_EN__EOI_ERR_EN__SHIFT 0x8
+#define UVD_JPEG_INT_EN__HFM_ERR_EN__SHIFT 0x9
+#define UVD_JPEG_INT_EN__RST_ERR_EN__SHIFT 0xa
+#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN__SHIFT 0xb
+#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN__SHIFT 0xc
+#define UVD_JPEG_INT_EN__MARKER_ERR_EN__SHIFT 0xd
+#define UVD_JPEG_INT_EN__FMT_ERR_EN__SHIFT 0xe
+#define UVD_JPEG_INT_EN__PROFILE_ERR_EN__SHIFT 0xf
+#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN_MASK 0x00000001L
+#define UVD_JPEG_INT_EN__JOB_AVAIL_EN_MASK 0x00000002L
+#define UVD_JPEG_INT_EN__FENCE_VAL_EN_MASK 0x00000004L
+#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN_MASK 0x00000040L
+#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN_MASK 0x00000080L
+#define UVD_JPEG_INT_EN__EOI_ERR_EN_MASK 0x00000100L
+#define UVD_JPEG_INT_EN__HFM_ERR_EN_MASK 0x00000200L
+#define UVD_JPEG_INT_EN__RST_ERR_EN_MASK 0x00000400L
+#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN_MASK 0x00000800L
+#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN_MASK 0x00001000L
+#define UVD_JPEG_INT_EN__MARKER_ERR_EN_MASK 0x00002000L
+#define UVD_JPEG_INT_EN__FMT_ERR_EN_MASK 0x00004000L
+#define UVD_JPEG_INT_EN__PROFILE_ERR_EN_MASK 0x00008000L
+//UVD_JPEG_INT_STAT
+#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT__SHIFT 0x0
+#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT__SHIFT 0x1
+#define UVD_JPEG_INT_STAT__FENCE_VAL_INT__SHIFT 0x2
+#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT__SHIFT 0x6
+#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT__SHIFT 0x7
+#define UVD_JPEG_INT_STAT__EOI_ERR_INT__SHIFT 0x8
+#define UVD_JPEG_INT_STAT__HFM_ERR_INT__SHIFT 0x9
+#define UVD_JPEG_INT_STAT__RST_ERR_INT__SHIFT 0xa
+#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT__SHIFT 0xb
+#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT__SHIFT 0xc
+#define UVD_JPEG_INT_STAT__MARKER_ERR_INT__SHIFT 0xd
+#define UVD_JPEG_INT_STAT__FMT_ERR_INT__SHIFT 0xe
+#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT__SHIFT 0xf
+#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT_MASK 0x00000001L
+#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT_MASK 0x00000002L
+#define UVD_JPEG_INT_STAT__FENCE_VAL_INT_MASK 0x00000004L
+#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT_MASK 0x00000040L
+#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT_MASK 0x00000080L
+#define UVD_JPEG_INT_STAT__EOI_ERR_INT_MASK 0x00000100L
+#define UVD_JPEG_INT_STAT__HFM_ERR_INT_MASK 0x00000200L
+#define UVD_JPEG_INT_STAT__RST_ERR_INT_MASK 0x00000400L
+#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT_MASK 0x00000800L
+#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT_MASK 0x00001000L
+#define UVD_JPEG_INT_STAT__MARKER_ERR_INT_MASK 0x00002000L
+#define UVD_JPEG_INT_STAT__FMT_ERR_INT_MASK 0x00004000L
+#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT_MASK 0x00008000L
+//UVD_JPEG_TIER_CNTL0
+#define UVD_JPEG_TIER_CNTL0__TIER_SEL__SHIFT 0x0
+#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID__SHIFT 0x2
+#define UVD_JPEG_TIER_CNTL0__U_COMP_ID__SHIFT 0x4
+#define UVD_JPEG_TIER_CNTL0__V_COMP_ID__SHIFT 0x6
+#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC__SHIFT 0x8
+#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC__SHIFT 0xb
+#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC__SHIFT 0xe
+#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC__SHIFT 0x11
+#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC__SHIFT 0x14
+#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC__SHIFT 0x17
+#define UVD_JPEG_TIER_CNTL0__Y_TQ__SHIFT 0x1a
+#define UVD_JPEG_TIER_CNTL0__U_TQ__SHIFT 0x1c
+#define UVD_JPEG_TIER_CNTL0__V_TQ__SHIFT 0x1e
+#define UVD_JPEG_TIER_CNTL0__TIER_SEL_MASK 0x00000003L
+#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID_MASK 0x0000000CL
+#define UVD_JPEG_TIER_CNTL0__U_COMP_ID_MASK 0x00000030L
+#define UVD_JPEG_TIER_CNTL0__V_COMP_ID_MASK 0x000000C0L
+#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC_MASK 0x00000700L
+#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC_MASK 0x00003800L
+#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC_MASK 0x0001C000L
+#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC_MASK 0x000E0000L
+#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC_MASK 0x00700000L
+#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC_MASK 0x03800000L
+#define UVD_JPEG_TIER_CNTL0__Y_TQ_MASK 0x0C000000L
+#define UVD_JPEG_TIER_CNTL0__U_TQ_MASK 0x30000000L
+#define UVD_JPEG_TIER_CNTL0__V_TQ_MASK 0xC0000000L
+//UVD_JPEG_TIER_CNTL1
+#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH__SHIFT 0x0
+#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT__SHIFT 0x10
+#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH_MASK 0x0000FFFFL
+#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT_MASK 0xFFFF0000L
+//UVD_JPEG_TIER_CNTL2
+#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL__SHIFT 0x0
+#define UVD_JPEG_TIER_CNTL2__TBL_TYPE__SHIFT 0x1
+#define UVD_JPEG_TIER_CNTL2__TQ__SHIFT 0x2
+#define UVD_JPEG_TIER_CNTL2__TH__SHIFT 0x4
+#define UVD_JPEG_TIER_CNTL2__TC__SHIFT 0x6
+#define UVD_JPEG_TIER_CNTL2__TD__SHIFT 0x7
+#define UVD_JPEG_TIER_CNTL2__TA__SHIFT 0xa
+#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN__SHIFT 0xe
+#define UVD_JPEG_TIER_CNTL2__DRI_VAL__SHIFT 0x10
+#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL_MASK 0x00000001L
+#define UVD_JPEG_TIER_CNTL2__TBL_TYPE_MASK 0x00000002L
+#define UVD_JPEG_TIER_CNTL2__TQ_MASK 0x0000000CL
+#define UVD_JPEG_TIER_CNTL2__TH_MASK 0x00000030L
+#define UVD_JPEG_TIER_CNTL2__TC_MASK 0x00000040L
+#define UVD_JPEG_TIER_CNTL2__TD_MASK 0x00000380L
+#define UVD_JPEG_TIER_CNTL2__TA_MASK 0x00001C00L
+#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN_MASK 0x00004000L
+#define UVD_JPEG_TIER_CNTL2__DRI_VAL_MASK 0xFFFF0000L
+//UVD_JPEG_TIER_STATUS
+#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE__SHIFT 0x0
+#define UVD_JPEG_TIER_STATUS__DECODE_DONE__SHIFT 0x1
+#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE_MASK 0x00000001L
+#define UVD_JPEG_TIER_STATUS__DECODE_DONE_MASK 0x00000002L
+
+
+// addressBlock: uvd_uvd_jpeg_sclk0_jpegnpsclkdec
+//UVD_JPEG_OUTBUF_CNTL
+#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT__SHIFT 0x0
+#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN__SHIFT 0x2
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX__SHIFT 0x6
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT__SHIFT 0x7
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER__SHIFT 0x9
+#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK__SHIFT 0x10
+#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT_MASK 0x00000003L
+#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN_MASK 0x00000004L
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX_MASK 0x00000040L
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT_MASK 0x00000180L
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER_MASK 0x00001E00L
+#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK_MASK 0x00010000L
+//UVD_JPEG_OUTBUF_WPTR
+#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR__SHIFT 0x0
+#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR_MASK 0xFFFFFFFFL
+//UVD_JPEG_OUTBUF_RPTR
+#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR__SHIFT 0x0
+#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR_MASK 0xFFFFFFFFL
+//UVD_JPEG_PITCH
+#define UVD_JPEG_PITCH__PITCH__SHIFT 0x0
+#define UVD_JPEG_PITCH__PITCH_MASK 0xFFFFFFFFL
+//UVD_JPEG_UV_PITCH
+#define UVD_JPEG_UV_PITCH__UV_PITCH__SHIFT 0x0
+#define UVD_JPEG_UV_PITCH__UV_PITCH_MASK 0xFFFFFFFFL
+//JPEG_DEC_Y_GFX8_TILING_SURFACE
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L
+//JPEG_DEC_UV_GFX8_TILING_SURFACE
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L
+//JPEG_DEC_GFX8_ADDR_CONFIG
+#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//JPEG_DEC_Y_GFX10_TILING_SURFACE
+#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_DEC_UV_GFX10_TILING_SURFACE
+#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_DEC_GFX10_ADDR_CONFIG
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//JPEG_DEC_ADDR_MODE
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y__SHIFT 0x0
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV__SHIFT 0x2
+#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL__SHIFT 0xc
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y_MASK 0x00000003L
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV_MASK 0x0000000CL
+#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL_MASK 0x00007000L
+//UVD_JPEG_OUTPUT_XY
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_X__SHIFT 0x0
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y__SHIFT 0x10
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_X_MASK 0x00003FFFL
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y_MASK 0x3FFF0000L
+//UVD_JPEG_GPCOM_CMD
+#define UVD_JPEG_GPCOM_CMD__CMD__SHIFT 0x1
+#define UVD_JPEG_GPCOM_CMD__CMD_MASK 0x0000000EL
+//UVD_JPEG_GPCOM_DATA0
+#define UVD_JPEG_GPCOM_DATA0__DATA0__SHIFT 0x0
+#define UVD_JPEG_GPCOM_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_JPEG_GPCOM_DATA1
+#define UVD_JPEG_GPCOM_DATA1__DATA1__SHIFT 0x0
+#define UVD_JPEG_GPCOM_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_JPEG_SCRATCH1
+#define UVD_JPEG_SCRATCH1__SCRATCH1__SHIFT 0x0
+#define UVD_JPEG_SCRATCH1__SCRATCH1_MASK 0xFFFFFFFFL
+//UVD_JPEG_DEC_SOFT_RST
+#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET__SHIFT 0x0
+#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS__SHIFT 0x10
+#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET_MASK 0x00000001L
+#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS_MASK 0x00010000L
+
+
+// addressBlock: uvd_uvd_jrbc0_uvd_jrbc_dec
+//UVD_JRBC_RB_WPTR
+#define UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC_RB_CNTL
+#define UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC_IB_SIZE
+#define UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC_SOFT_RESET
+#define UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC_STATUS
+#define UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC_RB_RPTR
+#define UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JPEG_PREEMPT_CMD
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC_RB_SIZE
+#define UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC_SCRATCH0
+#define UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd_uvd_jmi0_uvd_jmi_dec
+//UVD_JPEG_DEC_PF_CTRL
+#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_LMI_JRBC_CTRL
+#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_LMI_JPEG_CTRL
+#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//JPEG_LMI_DROP
+#define JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+//UVD_LMI_JRBC_IB_VMID
+#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_LMI_JRBC_RB_VMID
+#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_LMI_JPEG_VMID
+#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: uvd_uvd_jmi_common_dec
+//UVD_JADP_MCIF_URGENT_CTRL
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK__SHIFT 0x0
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK__SHIFT 0x6
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER__SHIFT 0xb
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP__SHIFT 0x11
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP__SHIFT 0x15
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN__SHIFT 0x19
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN__SHIFT 0x1a
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK_MASK 0x0000003FL
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK_MASK 0x000007C0L
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER_MASK 0x0001F800L
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP_MASK 0x001E0000L
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP_MASK 0x01E00000L
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN_MASK 0x02000000L
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN_MASK 0x04000000L
+//UVD_JMI_URGENT_CTRL
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x4
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x10
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0x14
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x000000F0L
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00010000L
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00F00000L
+//UVD_JMI_CTRL
+#define UVD_JMI_CTRL__STALL_MC_ARB__SHIFT 0x0
+#define UVD_JMI_CTRL__MASK_MC_URGENT__SHIFT 0x1
+#define UVD_JMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x2
+#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER__SHIFT 0x8
+#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER__SHIFT 0x10
+#define UVD_JMI_CTRL__STALL_MC_ARB_MASK 0x00000001L
+#define UVD_JMI_CTRL__MASK_MC_URGENT_MASK 0x00000002L
+#define UVD_JMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000004L
+#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER_MASK 0x0000FF00L
+#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER_MASK 0x00FF0000L
+//JPEG_MEMCHECK_CLAMPING_CNTL
+#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN__SHIFT 0x0
+#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN_MASK 0x00000001L
+//JPEG_MEMCHECK_SAFE_ADDR
+#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR__SHIFT 0x0
+#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR_MASK 0xFFFFFFFFL
+//JPEG_MEMCHECK_SAFE_ADDR_64BIT
+#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT__SHIFT 0x0
+#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT_MASK 0xFFFFFFFFL
+//UVD_JMI_LAT_CTRL
+#define UVD_JMI_LAT_CTRL__SCALE__SHIFT 0x0
+#define UVD_JMI_LAT_CTRL__MAX_START__SHIFT 0x8
+#define UVD_JMI_LAT_CTRL__MIN_START__SHIFT 0x9
+#define UVD_JMI_LAT_CTRL__AVG_START__SHIFT 0xa
+#define UVD_JMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb
+#define UVD_JMI_LAT_CTRL__SKIP__SHIFT 0x10
+#define UVD_JMI_LAT_CTRL__SCALE_MASK 0x000000FFL
+#define UVD_JMI_LAT_CTRL__MAX_START_MASK 0x00000100L
+#define UVD_JMI_LAT_CTRL__MIN_START_MASK 0x00000200L
+#define UVD_JMI_LAT_CTRL__AVG_START_MASK 0x00000400L
+#define UVD_JMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L
+#define UVD_JMI_LAT_CTRL__SKIP_MASK 0x000F0000L
+//UVD_JMI_LAT_CNTR
+#define UVD_JMI_LAT_CNTR__MAX_LAT__SHIFT 0x0
+#define UVD_JMI_LAT_CNTR__MIN_LAT__SHIFT 0x8
+#define UVD_JMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL
+#define UVD_JMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L
+//UVD_JMI_AVG_LAT_CNTR
+#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10
+#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L
+//UVD_JMI_PERFMON_CTRL
+#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0
+#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8
+#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L
+#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L
+//UVD_JMI_PERFMON_COUNT_LO
+#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0
+#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL
+//UVD_JMI_PERFMON_COUNT_HI
+#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0
+#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL
+//UVD_JMI_CLEAN_STATUS
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN__SHIFT 0x0
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW__SHIFT 0x1
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN__SHIFT 0x2
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW__SHIFT 0x3
+#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING__SHIFT 0x4
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN__SHIFT 0x8
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN__SHIFT 0x10
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_MASK 0x00000001L
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW_MASK 0x00000002L
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_MASK 0x00000004L
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW_MASK 0x00000008L
+#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING_MASK 0x00000010L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN_MASK 0x00000100L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN_MASK 0x00010000L
+//UVD_JMI_CNTL
+#define UVD_JMI_CNTL__SOFT_RESET__SHIFT 0x0
+#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX__SHIFT 0x8
+#define UVD_JMI_CNTL__SOFT_RESET_MASK 0x00000001L
+#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX_MASK 0x0003FF00L
+
+
+// addressBlock: uvd_uvd_jpeg_common_dec
+//JPEG_SOFT_RESET_STATUS
+#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS__SHIFT 0x0
+#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS__SHIFT 0x8
+#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS__SHIFT 0x11
+#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS__SHIFT 0x12
+#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS__SHIFT 0x18
+#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS_MASK 0x00000001L
+#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS_MASK 0x00000100L
+#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS_MASK 0x00020000L
+#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS_MASK 0x00040000L
+#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS_MASK 0x01000000L
+//JPEG_SYS_INT_EN
+#define JPEG_SYS_INT_EN__DJPEG0_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_EN__DJRBC0__SHIFT 0x8
+#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT__SHIFT 0x10
+#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL__SHIFT 0x18
+#define JPEG_SYS_INT_EN__DJPEG0_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_EN__DJRBC0_MASK 0x00000100L
+#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT_MASK 0x00010000L
+#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL_MASK 0x01000000L
+//JPEG_SYS_INT_EN1
+#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT__SHIFT 0x0
+#define JPEG_SYS_INT_EN1__EJPEG_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_EN1__EJRBC__SHIFT 0x2
+#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL__SHIFT 0x3
+#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT_MASK 0x00000001L
+#define JPEG_SYS_INT_EN1__EJPEG_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_EN1__EJRBC_MASK 0x00000004L
+#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL_MASK 0x00000008L
+//JPEG_SYS_INT_STATUS
+#define JPEG_SYS_INT_STATUS__DJPEG0_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_STATUS__DJRBC0__SHIFT 0x8
+#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT__SHIFT 0x10
+#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL__SHIFT 0x18
+#define JPEG_SYS_INT_STATUS__DJPEG0_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_STATUS__DJRBC0_MASK 0x00000100L
+#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT_MASK 0x00010000L
+#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL_MASK 0x01000000L
+//JPEG_SYS_INT_STATUS1
+#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT__SHIFT 0x0
+#define JPEG_SYS_INT_STATUS1__EJPEG_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_STATUS1__EJRBC__SHIFT 0x2
+#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL__SHIFT 0x3
+#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT_MASK 0x00000001L
+#define JPEG_SYS_INT_STATUS1__EJPEG_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_STATUS1__EJRBC_MASK 0x00000004L
+#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL_MASK 0x00000008L
+//JPEG_SYS_INT_ACK
+#define JPEG_SYS_INT_ACK__DJPEG0_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_ACK__DJRBC0__SHIFT 0x8
+#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT__SHIFT 0x10
+#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL__SHIFT 0x18
+#define JPEG_SYS_INT_ACK__DJPEG0_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_ACK__DJRBC0_MASK 0x00000100L
+#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT_MASK 0x00010000L
+#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL_MASK 0x01000000L
+//JPEG_SYS_INT_ACK1
+#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT__SHIFT 0x0
+#define JPEG_SYS_INT_ACK1__EJPEG_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_ACK1__EJRBC__SHIFT 0x2
+#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL__SHIFT 0x3
+#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT_MASK 0x00000001L
+#define JPEG_SYS_INT_ACK1__EJPEG_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_ACK1__EJRBC_MASK 0x00000004L
+#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL_MASK 0x00000008L
+//JPEG_MEMCHECK_SYS_INT_EN
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_EN1
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN_MASK 0x00000020L
+//JPEG_MEMCHECK_SYS_INT_STAT
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_STAT1
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_STAT2
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR_MASK 0x00000800L
+//JPEG_MEMCHECK_SYS_INT_ACK
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_ACK1
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_ACK2
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR_MASK 0x00000800L
+//JPEG_MASTINT_EN
+#define JPEG_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define JPEG_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define JPEG_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define JPEG_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+//JPEG_IH_CTRL
+#define JPEG_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0
+#define JPEG_IH_CTRL__IH_STALL_EN__SHIFT 0x1
+#define JPEG_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2
+#define JPEG_IH_CTRL__IH_VMID__SHIFT 0x3
+#define JPEG_IH_CTRL__IH_USER_DATA__SHIFT 0x7
+#define JPEG_IH_CTRL__IH_RINGID__SHIFT 0x13
+#define JPEG_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L
+#define JPEG_IH_CTRL__IH_STALL_EN_MASK 0x00000002L
+#define JPEG_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L
+#define JPEG_IH_CTRL__IH_VMID_MASK 0x00000078L
+#define JPEG_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L
+#define JPEG_IH_CTRL__IH_RINGID_MASK 0x07F80000L
+//JRBBM_ARB_CTRL
+#define JRBBM_ARB_CTRL__SRBM_DROP__SHIFT 0x0
+#define JRBBM_ARB_CTRL__EJRBC_DROP__SHIFT 0x1
+#define JRBBM_ARB_CTRL__DJRBC0_DROP__SHIFT 0x2
+#define JRBBM_ARB_CTRL__SRBM_DROP_MASK 0x00000001L
+#define JRBBM_ARB_CTRL__EJRBC_DROP_MASK 0x00000002L
+#define JRBBM_ARB_CTRL__DJRBC0_DROP_MASK 0x00000004L
+
+
+// addressBlock: uvd_uvd_jpeg_common_sclk_dec
+//JPEG_CGC_GATE
+#define JPEG_CGC_GATE__JPEG0_DEC__SHIFT 0x0
+#define JPEG_CGC_GATE__JPEG_ENC__SHIFT 0x8
+#define JPEG_CGC_GATE__JMCIF__SHIFT 0x9
+#define JPEG_CGC_GATE__JRBBM__SHIFT 0xa
+#define JPEG_CGC_GATE__JPEG0_DEC_MASK 0x00000001L
+#define JPEG_CGC_GATE__JPEG_ENC_MASK 0x00000100L
+#define JPEG_CGC_GATE__JMCIF_MASK 0x00000200L
+#define JPEG_CGC_GATE__JRBBM_MASK 0x00000400L
+//JPEG_CGC_CTRL
+#define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x1
+#define JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x5
+#define JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT 0x10
+#define JPEG_CGC_CTRL__JPEG_ENC_MODE__SHIFT 0x18
+#define JPEG_CGC_CTRL__JMCIF_MODE__SHIFT 0x19
+#define JPEG_CGC_CTRL__JRBBM_MODE__SHIFT 0x1a
+#define JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000001EL
+#define JPEG_CGC_CTRL__CLK_OFF_DELAY_MASK 0x00001FE0L
+#define JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK 0x00010000L
+#define JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK 0x01000000L
+#define JPEG_CGC_CTRL__JMCIF_MODE_MASK 0x02000000L
+#define JPEG_CGC_CTRL__JRBBM_MODE_MASK 0x04000000L
+//JPEG_CGC_STATUS
+#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE__SHIFT 0x0
+#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE__SHIFT 0x1
+#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE__SHIFT 0x10
+#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE__SHIFT 0x11
+#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE__SHIFT 0x12
+#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE__SHIFT 0x13
+#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE__SHIFT 0x14
+#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE_MASK 0x00000001L
+#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE_MASK 0x00000002L
+#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE_MASK 0x00010000L
+#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE_MASK 0x00020000L
+#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE_MASK 0x00040000L
+#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE_MASK 0x00080000L
+#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE_MASK 0x00100000L
+//JPEG_COMN_CGC_MEM_CTRL
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN__SHIFT 0x0
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN__SHIFT 0x1
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN__SHIFT 0x2
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN__SHIFT 0x3
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN_MASK 0x00000001L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN_MASK 0x00000002L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN_MASK 0x00000004L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN_MASK 0x00000008L
+//JPEG_DEC_CGC_MEM_CTRL
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN__SHIFT 0x0
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN__SHIFT 0x1
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN__SHIFT 0x2
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN__SHIFT 0x3
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN_MASK 0x00000001L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN_MASK 0x00000002L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN_MASK 0x00000004L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN_MASK 0x00000008L
+//JPEG_ENC_CGC_MEM_CTRL
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN__SHIFT 0x0
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN__SHIFT 0x1
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN__SHIFT 0x2
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN__SHIFT 0x3
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN_MASK 0x00000001L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN_MASK 0x00000002L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN_MASK 0x00000004L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN_MASK 0x00000008L
+//JPEG_PERF_BANK_CONF
+#define JPEG_PERF_BANK_CONF__RESET__SHIFT 0x0
+#define JPEG_PERF_BANK_CONF__PEEK__SHIFT 0x8
+#define JPEG_PERF_BANK_CONF__CONCATENATE__SHIFT 0x10
+#define JPEG_PERF_BANK_CONF__CORE_SEL__SHIFT 0x15
+#define JPEG_PERF_BANK_CONF__RESET_MASK 0x0000000FL
+#define JPEG_PERF_BANK_CONF__PEEK_MASK 0x00000F00L
+#define JPEG_PERF_BANK_CONF__CONCATENATE_MASK 0x00030000L
+#define JPEG_PERF_BANK_CONF__CORE_SEL_MASK 0x00E00000L
+//JPEG_PERF_BANK_EVENT_SEL
+#define JPEG_PERF_BANK_EVENT_SEL__SEL0__SHIFT 0x0
+#define JPEG_PERF_BANK_EVENT_SEL__SEL1__SHIFT 0x8
+#define JPEG_PERF_BANK_EVENT_SEL__SEL2__SHIFT 0x10
+#define JPEG_PERF_BANK_EVENT_SEL__SEL3__SHIFT 0x18
+#define JPEG_PERF_BANK_EVENT_SEL__SEL0_MASK 0x000000FFL
+#define JPEG_PERF_BANK_EVENT_SEL__SEL1_MASK 0x0000FF00L
+#define JPEG_PERF_BANK_EVENT_SEL__SEL2_MASK 0x00FF0000L
+#define JPEG_PERF_BANK_EVENT_SEL__SEL3_MASK 0xFF000000L
+//JPEG_PERF_BANK_COUNT0
+#define JPEG_PERF_BANK_COUNT0__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT0__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT1
+#define JPEG_PERF_BANK_COUNT1__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT1__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT2
+#define JPEG_PERF_BANK_COUNT2__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT2__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT3
+#define JPEG_PERF_BANK_COUNT3__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT3__COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd_uvd_pg_dec
+//UVD_IPX_DLDO_CONFIG
+#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT 0x2
+#define UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT 0x4
+#define UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT 0x6
+#define UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT 0x8
+#define UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT 0xa
+#define UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT 0xc
+#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG_MASK 0x0000000CL
+#define UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG_MASK 0x00000030L
+#define UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG_MASK 0x000000C0L
+#define UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG_MASK 0x00000300L
+#define UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG_MASK 0x00000C00L
+#define UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG_MASK 0x00003000L
+//UVD_IPX_DLDO_STATUS
+#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT 0x1
+#define UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT 0x2
+#define UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT 0x3
+#define UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT 0x4
+#define UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT 0x5
+#define UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT 0x6
+#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK 0x00000002L
+#define UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK 0x00000004L
+#define UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK 0x00000008L
+#define UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK 0x00000010L
+#define UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK 0x00000020L
+#define UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK 0x00000040L
+//UVD_POWER_STATUS
+#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x0
+#define UVD_POWER_STATUS__UVD_PG_MODE__SHIFT 0x2
+#define UVD_POWER_STATUS__UVD_CG_MODE__SHIFT 0x4
+#define UVD_POWER_STATUS__UVD_PG_EN__SHIFT 0x8
+#define UVD_POWER_STATUS__RBC_SNOOP_DIS__SHIFT 0x9
+#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS__SHIFT 0xb
+#define UVD_POWER_STATUS__STALL_DPG_POWER_UP__SHIFT 0x1f
+#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000001L
+#define UVD_POWER_STATUS__UVD_PG_MODE_MASK 0x00000004L
+#define UVD_POWER_STATUS__UVD_CG_MODE_MASK 0x00000030L
+#define UVD_POWER_STATUS__UVD_PG_EN_MASK 0x00000100L
+#define UVD_POWER_STATUS__RBC_SNOOP_DIS_MASK 0x00000200L
+#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS_MASK 0x00000800L
+#define UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK 0x80000000L
+//UVD_JPEG_POWER_STATUS
+#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS__SHIFT 0x0
+#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE__SHIFT 0x4
+#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS__SHIFT 0x8
+#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS__SHIFT 0x9
+#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP__SHIFT 0x1f
+#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK 0x00000001L
+#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK 0x00000010L
+#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS_MASK 0x00000100L
+#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS_MASK 0x00000200L
+#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP_MASK 0x80000000L
+//UVD_MC_DJPEG_RD_SPACE
+#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE__SHIFT 0x0
+#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE_MASK 0x0003FFFFL
+//UVD_MC_DJPEG_WR_SPACE
+#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE__SHIFT 0x0
+#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE_MASK 0x0003FFFFL
+//UVD_PG_IND_INDEX
+#define UVD_PG_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_PG_IND_INDEX__INDEX_MASK 0x0000003FL
+//UVD_PG_IND_DATA
+#define UVD_PG_IND_DATA__DATA__SHIFT 0x0
+#define UVD_PG_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//CC_UVD_HARVESTING
+#define CC_UVD_HARVESTING__MMSCH_DISABLE__SHIFT 0x0
+#define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
+#define CC_UVD_HARVESTING__MMSCH_DISABLE_MASK 0x00000001L
+#define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
+//UVD_DPG_LMA_CTL
+#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0
+#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2
+#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0xe
+#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L
+#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L
+#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFFC000L
+//UVD_DPG_LMA_DATA
+#define UVD_DPG_LMA_DATA__LMA_DATA__SHIFT 0x0
+#define UVD_DPG_LMA_DATA__LMA_DATA_MASK 0xFFFFFFFFL
+//UVD_DPG_LMA_MASK
+#define UVD_DPG_LMA_MASK__LMA_MASK__SHIFT 0x0
+#define UVD_DPG_LMA_MASK__LMA_MASK_MASK 0xFFFFFFFFL
+//UVD_DPG_PAUSE
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L
+//UVD_SCRATCH1
+#define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0
+#define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH2
+#define UVD_SCRATCH2__SCRATCH2_DATA__SHIFT 0x0
+#define UVD_SCRATCH2__SCRATCH2_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH3
+#define UVD_SCRATCH3__SCRATCH3_DATA__SHIFT 0x0
+#define UVD_SCRATCH3__SCRATCH3_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH4
+#define UVD_SCRATCH4__SCRATCH4_DATA__SHIFT 0x0
+#define UVD_SCRATCH4__SCRATCH4_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH5
+#define UVD_SCRATCH5__SCRATCH5_DATA__SHIFT 0x0
+#define UVD_SCRATCH5__SCRATCH5_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH6
+#define UVD_SCRATCH6__SCRATCH6_DATA__SHIFT 0x0
+#define UVD_SCRATCH6__SCRATCH6_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH7
+#define UVD_SCRATCH7__SCRATCH7_DATA__SHIFT 0x0
+#define UVD_SCRATCH7__SCRATCH7_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH8
+#define UVD_SCRATCH8__SCRATCH8_DATA__SHIFT 0x0
+#define UVD_SCRATCH8__SCRATCH8_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH9
+#define UVD_SCRATCH9__SCRATCH9_DATA__SHIFT 0x0
+#define UVD_SCRATCH9__SCRATCH9_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH10
+#define UVD_SCRATCH10__SCRATCH10_DATA__SHIFT 0x0
+#define UVD_SCRATCH10__SCRATCH10_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH11
+#define UVD_SCRATCH11__SCRATCH11_DATA__SHIFT 0x0
+#define UVD_SCRATCH11__SCRATCH11_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH12
+#define UVD_SCRATCH12__SCRATCH12_DATA__SHIFT 0x0
+#define UVD_SCRATCH12__SCRATCH12_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH13
+#define UVD_SCRATCH13__SCRATCH13_DATA__SHIFT 0x0
+#define UVD_SCRATCH13__SCRATCH13_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH14
+#define UVD_SCRATCH14__SCRATCH14_DATA__SHIFT 0x0
+#define UVD_SCRATCH14__SCRATCH14_DATA_MASK 0xFFFFFFFFL
+//UVD_FREE_COUNTER_REG
+#define UVD_FREE_COUNTER_REG__FREE_COUNTER__SHIFT 0x0
+#define UVD_FREE_COUNTER_REG__FREE_COUNTER_MASK 0xFFFFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_DPG_VCPU_CACHE_OFFSET0
+#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0
+#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01FFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_VMID
+#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL
+//UVD_REG_FILTER_EN
+#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN__SHIFT 0x0
+#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV__SHIFT 0x1
+#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN__SHIFT 0x2
+#define UVD_REG_FILTER_EN__JPEG_PRIV_EN__SHIFT 0x3
+#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN_MASK 0x00000001L
+#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV_MASK 0x00000002L
+#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN_MASK 0x00000004L
+#define UVD_REG_FILTER_EN__JPEG_PRIV_EN_MASK 0x00000008L
+//UVD_SECURITY_REG_VIO_REPORT
+#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO__SHIFT 0x0
+#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO__SHIFT 0x1
+#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO__SHIFT 0x2
+#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO__SHIFT 0x3
+#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO__SHIFT 0x4
+#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO__SHIFT 0x5
+#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO_MASK 0x00000001L
+#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO_MASK 0x00000002L
+#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO_MASK 0x00000004L
+#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO_MASK 0x00000008L
+#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO_MASK 0x00000010L
+#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO_MASK 0x00000020L
+//UVD_FW_VERSION
+#define UVD_FW_VERSION__FW_VERSION__SHIFT 0x0
+#define UVD_FW_VERSION__FW_VERSION_MASK 0xFFFFFFFFL
+//UVD_PF_STATUS
+#define UVD_PF_STATUS__JPEG_PF_OCCURED__SHIFT 0x0
+#define UVD_PF_STATUS__NJ_PF_OCCURED__SHIFT 0x1
+#define UVD_PF_STATUS__ENCODER0_PF_OCCURED__SHIFT 0x2
+#define UVD_PF_STATUS__ENCODER1_PF_OCCURED__SHIFT 0x3
+#define UVD_PF_STATUS__ENCODER2_PF_OCCURED__SHIFT 0x4
+#define UVD_PF_STATUS__ENCODER3_PF_OCCURED__SHIFT 0x5
+#define UVD_PF_STATUS__ENCODER4_PF_OCCURED__SHIFT 0x6
+#define UVD_PF_STATUS__EJPEG_PF_OCCURED__SHIFT 0x7
+#define UVD_PF_STATUS__JPEG_PF_CLEAR__SHIFT 0x8
+#define UVD_PF_STATUS__NJ_PF_CLEAR__SHIFT 0x9
+#define UVD_PF_STATUS__ENCODER0_PF_CLEAR__SHIFT 0xa
+#define UVD_PF_STATUS__ENCODER1_PF_CLEAR__SHIFT 0xb
+#define UVD_PF_STATUS__ENCODER2_PF_CLEAR__SHIFT 0xc
+#define UVD_PF_STATUS__ENCODER3_PF_CLEAR__SHIFT 0xd
+#define UVD_PF_STATUS__ENCODER4_PF_CLEAR__SHIFT 0xe
+#define UVD_PF_STATUS__EJPEG_PF_CLEAR__SHIFT 0xf
+#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED__SHIFT 0x10
+#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED__SHIFT 0x11
+#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED__SHIFT 0x12
+#define UVD_PF_STATUS__JPEG2_PF_OCCURED__SHIFT 0x13
+#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED__SHIFT 0x14
+#define UVD_PF_STATUS__JPEG2_PF_CLEAR__SHIFT 0x15
+#define UVD_PF_STATUS__ENCODER5_PF_OCCURED__SHIFT 0x16
+#define UVD_PF_STATUS__ENCODER5_PF_CLEAR__SHIFT 0x17
+#define UVD_PF_STATUS__JPEG_PF_OCCURED_MASK 0x00000001L
+#define UVD_PF_STATUS__NJ_PF_OCCURED_MASK 0x00000002L
+#define UVD_PF_STATUS__ENCODER0_PF_OCCURED_MASK 0x00000004L
+#define UVD_PF_STATUS__ENCODER1_PF_OCCURED_MASK 0x00000008L
+#define UVD_PF_STATUS__ENCODER2_PF_OCCURED_MASK 0x00000010L
+#define UVD_PF_STATUS__ENCODER3_PF_OCCURED_MASK 0x00000020L
+#define UVD_PF_STATUS__ENCODER4_PF_OCCURED_MASK 0x00000040L
+#define UVD_PF_STATUS__EJPEG_PF_OCCURED_MASK 0x00000080L
+#define UVD_PF_STATUS__JPEG_PF_CLEAR_MASK 0x00000100L
+#define UVD_PF_STATUS__NJ_PF_CLEAR_MASK 0x00000200L
+#define UVD_PF_STATUS__ENCODER0_PF_CLEAR_MASK 0x00000400L
+#define UVD_PF_STATUS__ENCODER1_PF_CLEAR_MASK 0x00000800L
+#define UVD_PF_STATUS__ENCODER2_PF_CLEAR_MASK 0x00001000L
+#define UVD_PF_STATUS__ENCODER3_PF_CLEAR_MASK 0x00002000L
+#define UVD_PF_STATUS__ENCODER4_PF_CLEAR_MASK 0x00004000L
+#define UVD_PF_STATUS__EJPEG_PF_CLEAR_MASK 0x00008000L
+#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED_MASK 0x00010000L
+#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED_MASK 0x00020000L
+#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED_MASK 0x00040000L
+#define UVD_PF_STATUS__JPEG2_PF_OCCURED_MASK 0x00080000L
+#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED_MASK 0x00100000L
+#define UVD_PF_STATUS__JPEG2_PF_CLEAR_MASK 0x00200000L
+#define UVD_PF_STATUS__ENCODER5_PF_OCCURED_MASK 0x00400000L
+#define UVD_PF_STATUS__ENCODER5_PF_CLEAR_MASK 0x00800000L
+//UVD_DPG_CLK_EN_VCPU_REPORT
+#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN__SHIFT 0x0
+#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT__SHIFT 0x1
+#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN_MASK 0x00000001L
+#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT_MASK 0x000000FEL
+//CC_UVD_VCPU_ERR_DETECT_BOT_LO
+#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO__SHIFT 0xc
+#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO_MASK 0xFFFFF000L
+//CC_UVD_VCPU_ERR_DETECT_BOT_HI
+#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI_MASK 0x0000FFFFL
+//CC_UVD_VCPU_ERR_DETECT_TOP_LO
+#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO__SHIFT 0xc
+#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO_MASK 0xFFFFF000L
+//CC_UVD_VCPU_ERR_DETECT_TOP_HI
+#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI_MASK 0x0000FFFFL
+//CC_UVD_VCPU_ERR
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS__SHIFT 0x0
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR__SHIFT 0x1
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN__SHIFT 0x2
+#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS__SHIFT 0x3
+#define CC_UVD_VCPU_ERR__RESET_ON_FAULT__SHIFT 0x4
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS_MASK 0x00000001L
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR_MASK 0x00000002L
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN_MASK 0x00000004L
+#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS_MASK 0x00000008L
+#define CC_UVD_VCPU_ERR__RESET_ON_FAULT_MASK 0x00000010L
+//CC_UVD_VCPU_ERR_INST_ADDR_LO
+#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO_MASK 0xFFFFFFFFL
+//CC_UVD_VCPU_ERR_INST_ADDR_HI
+#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI_MASK 0x0000FFFFL
+//UVD_LMI_MMSCH_NC_SPACE
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE__SHIFT 0x3
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE__SHIFT 0x6
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE__SHIFT 0x9
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE__SHIFT 0xc
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE__SHIFT 0xf
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE__SHIFT 0x12
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE__SHIFT 0x15
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE_MASK 0x00000007L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE_MASK 0x00000038L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE_MASK 0x000001C0L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE_MASK 0x00000E00L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE_MASK 0x00007000L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE_MASK 0x00038000L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE_MASK 0x001C0000L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE_MASK 0x00E00000L
+//UVD_LMI_ATOMIC_SPACE
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE__SHIFT 0x0
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE__SHIFT 0x3
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE__SHIFT 0x6
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE__SHIFT 0x9
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE_MASK 0x00000007L
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE_MASK 0x00000038L
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE_MASK 0x000001C0L
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE_MASK 0x00000E00L
+//UVD_GFX8_ADDR_CONFIG
+#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//UVD_GFX10_ADDR_CONFIG
+#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//UVD_GPCNT2_CNTL
+#define UVD_GPCNT2_CNTL__CLR__SHIFT 0x0
+#define UVD_GPCNT2_CNTL__START__SHIFT 0x1
+#define UVD_GPCNT2_CNTL__COUNTUP__SHIFT 0x2
+#define UVD_GPCNT2_CNTL__CLR_MASK 0x00000001L
+#define UVD_GPCNT2_CNTL__START_MASK 0x00000002L
+#define UVD_GPCNT2_CNTL__COUNTUP_MASK 0x00000004L
+//UVD_GPCNT2_TARGET_LOWER
+#define UVD_GPCNT2_TARGET_LOWER__TARGET__SHIFT 0x0
+#define UVD_GPCNT2_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL
+//UVD_GPCNT2_STATUS_LOWER
+#define UVD_GPCNT2_STATUS_LOWER__COUNT__SHIFT 0x0
+#define UVD_GPCNT2_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_GPCNT2_TARGET_UPPER
+#define UVD_GPCNT2_TARGET_UPPER__TARGET__SHIFT 0x0
+#define UVD_GPCNT2_TARGET_UPPER__TARGET_MASK 0x0000FFFFL
+//UVD_GPCNT2_STATUS_UPPER
+#define UVD_GPCNT2_STATUS_UPPER__COUNT__SHIFT 0x0
+#define UVD_GPCNT2_STATUS_UPPER__COUNT_MASK 0x0000FFFFL
+//UVD_GPCNT3_CNTL
+#define UVD_GPCNT3_CNTL__CLR__SHIFT 0x0
+#define UVD_GPCNT3_CNTL__START__SHIFT 0x1
+#define UVD_GPCNT3_CNTL__COUNTUP__SHIFT 0x2
+#define UVD_GPCNT3_CNTL__FREQ__SHIFT 0x3
+#define UVD_GPCNT3_CNTL__DIV__SHIFT 0xa
+#define UVD_GPCNT3_CNTL__CLR_MASK 0x00000001L
+#define UVD_GPCNT3_CNTL__START_MASK 0x00000002L
+#define UVD_GPCNT3_CNTL__COUNTUP_MASK 0x00000004L
+#define UVD_GPCNT3_CNTL__FREQ_MASK 0x000003F8L
+#define UVD_GPCNT3_CNTL__DIV_MASK 0x0001FC00L
+//UVD_GPCNT3_TARGET_LOWER
+#define UVD_GPCNT3_TARGET_LOWER__TARGET__SHIFT 0x0
+#define UVD_GPCNT3_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL
+//UVD_GPCNT3_STATUS_LOWER
+#define UVD_GPCNT3_STATUS_LOWER__COUNT__SHIFT 0x0
+#define UVD_GPCNT3_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_GPCNT3_TARGET_UPPER
+#define UVD_GPCNT3_TARGET_UPPER__TARGET__SHIFT 0x0
+#define UVD_GPCNT3_TARGET_UPPER__TARGET_MASK 0x0000FFFFL
+//UVD_GPCNT3_STATUS_UPPER
+#define UVD_GPCNT3_STATUS_UPPER__COUNT__SHIFT 0x0
+#define UVD_GPCNT3_STATUS_UPPER__COUNT_MASK 0x0000FFFFL
+//UVD_VCLK_DS_CNTL
+#define UVD_VCLK_DS_CNTL__VCLK_DS_EN__SHIFT 0x0
+#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS__SHIFT 0x4
+#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT__SHIFT 0x10
+#define UVD_VCLK_DS_CNTL__VCLK_DS_EN_MASK 0x00000001L
+#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS_MASK 0x00000010L
+#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L
+//UVD_DCLK_DS_CNTL
+#define UVD_DCLK_DS_CNTL__DCLK_DS_EN__SHIFT 0x0
+#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS__SHIFT 0x4
+#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT__SHIFT 0x10
+#define UVD_DCLK_DS_CNTL__DCLK_DS_EN_MASK 0x00000001L
+#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS_MASK 0x00000010L
+#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L
+//UVD_TSC_LOWER
+#define UVD_TSC_LOWER__COUNT__SHIFT 0x0
+#define UVD_TSC_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_TSC_UPPER
+#define UVD_TSC_UPPER__COUNT__SHIFT 0x0
+#define UVD_TSC_UPPER__COUNT_MASK 0x00FFFFFFL
+//VCN_FEATURES
+#define VCN_FEATURES__HAS_VIDEO_DEC__SHIFT 0x0
+#define VCN_FEATURES__HAS_VIDEO_ENC__SHIFT 0x1
+#define VCN_FEATURES__HAS_MJPEG_DEC__SHIFT 0x2
+#define VCN_FEATURES__HAS_MJPEG_ENC__SHIFT 0x3
+#define VCN_FEATURES__HAS_VIDEO_VIRT__SHIFT 0x4
+#define VCN_FEATURES__HAS_H264_LEGACY_DEC__SHIFT 0x5
+#define VCN_FEATURES__HAS_UDEC_DEC__SHIFT 0x6
+#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC__SHIFT 0x7
+#define VCN_FEATURES__HAS_SCLR_DEC__SHIFT 0x8
+#define VCN_FEATURES__HAS_VP9_DEC__SHIFT 0x9
+#define VCN_FEATURES__HAS_AV1_DEC__SHIFT 0xa
+#define VCN_FEATURES__HAS_EFC_ENC__SHIFT 0xb
+#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC__SHIFT 0xc
+#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC__SHIFT 0xd
+#define VCN_FEATURES__HAS_AV1_ENC__SHIFT 0xe
+#define VCN_FEATURES__INSTANCE_ID__SHIFT 0x1c
+#define VCN_FEATURES__HAS_VIDEO_DEC_MASK 0x00000001L
+#define VCN_FEATURES__HAS_VIDEO_ENC_MASK 0x00000002L
+#define VCN_FEATURES__HAS_MJPEG_DEC_MASK 0x00000004L
+#define VCN_FEATURES__HAS_MJPEG_ENC_MASK 0x00000008L
+#define VCN_FEATURES__HAS_VIDEO_VIRT_MASK 0x00000010L
+#define VCN_FEATURES__HAS_H264_LEGACY_DEC_MASK 0x00000020L
+#define VCN_FEATURES__HAS_UDEC_DEC_MASK 0x00000040L
+#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC_MASK 0x00000080L
+#define VCN_FEATURES__HAS_SCLR_DEC_MASK 0x00000100L
+#define VCN_FEATURES__HAS_VP9_DEC_MASK 0x00000200L
+#define VCN_FEATURES__HAS_AV1_DEC_MASK 0x00000400L
+#define VCN_FEATURES__HAS_EFC_ENC_MASK 0x00000800L
+#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC_MASK 0x00001000L
+#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC_MASK 0x00002000L
+#define VCN_FEATURES__HAS_AV1_ENC_MASK 0x00004000L
+#define VCN_FEATURES__INSTANCE_ID_MASK 0xF0000000L
+//UVD_GPUIOV_STATUS
+#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0
+#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L
+//UVD_SCRATCH15
+#define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0
+#define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL
+//UVD_VERSION
+#define UVD_VERSION__VARIANT_TYPE__SHIFT 0x0
+#define UVD_VERSION__MINOR_VERSION__SHIFT 0x8
+#define UVD_VERSION__MAJOR_VERSION__SHIFT 0x10
+#define UVD_VERSION__INSTANCE_ID__SHIFT 0x1c
+#define UVD_VERSION__VARIANT_TYPE_MASK 0x000000FFL
+#define UVD_VERSION__MINOR_VERSION_MASK 0x0000FF00L
+#define UVD_VERSION__MAJOR_VERSION_MASK 0x0FFF0000L
+#define UVD_VERSION__INSTANCE_ID_MASK 0xF0000000L
+//VCN_UMSCH_CNTL
+#define VCN_UMSCH_CNTL__umsch_fw_en__SHIFT 0x0
+#define VCN_UMSCH_CNTL__umsch_fw_en_MASK 0x00000001L
+//VCN_JPEG_DB_CTRL
+#define VCN_JPEG_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB1_DB_CTRL
+#define VCN_RB1_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB1_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB1_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB1_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB1_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB1_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB2_DB_CTRL
+#define VCN_RB2_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB2_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB2_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB2_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB2_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB2_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB3_DB_CTRL
+#define VCN_RB3_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB3_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB3_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB3_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB3_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB3_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB4_DB_CTRL
+#define VCN_RB4_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB4_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB4_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB4_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB4_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB4_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_UMSCH_RB_DB_CTRL
+#define VCN_UMSCH_RB_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_UMSCH_RB_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_UMSCH_RB_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_UMSCH_RB_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_UMSCH_RB_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_UMSCH_RB_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB_DB_CTRL
+#define VCN_RB_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL0
+#define VCN_AGDB_CTRL0__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL0__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL0__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL0__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL0__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL0__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL1
+#define VCN_AGDB_CTRL1__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL1__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL1__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL1__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL1__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL1__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL2
+#define VCN_AGDB_CTRL2__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL2__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL2__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL2__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL2__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL2__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL3
+#define VCN_AGDB_CTRL3__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL3__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL3__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL3__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL3__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL3__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL4
+#define VCN_AGDB_CTRL4__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL4__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL4__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL4__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL4__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL4__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL5
+#define VCN_AGDB_CTRL5__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL5__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL5__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL5__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL5__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL5__HIT_MASK 0x80000000L
+//VCN_AGDB_MASK0
+#define VCN_AGDB_MASK0__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK0__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK1
+#define VCN_AGDB_MASK1__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK1__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK2
+#define VCN_AGDB_MASK2__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK2__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK3
+#define VCN_AGDB_MASK3__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK3__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK4
+#define VCN_AGDB_MASK4__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK4__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK5
+#define VCN_AGDB_MASK5__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK5__MASK_MASK 0x0FFFFFFCL
+//VCN_RB_ENABLE
+#define VCN_RB_ENABLE__RB_EN__SHIFT 0x0
+#define VCN_RB_ENABLE__JPEG_RB_EN__SHIFT 0x1
+#define VCN_RB_ENABLE__RB1_EN__SHIFT 0x2
+#define VCN_RB_ENABLE__RB2_EN__SHIFT 0x3
+#define VCN_RB_ENABLE__RB3_EN__SHIFT 0x4
+#define VCN_RB_ENABLE__RB4_EN__SHIFT 0x5
+#define VCN_RB_ENABLE__UMSCH_RB_EN__SHIFT 0x6
+#define VCN_RB_ENABLE__EJPEG_RB_EN__SHIFT 0x7
+#define VCN_RB_ENABLE__AUDIO_RB_EN__SHIFT 0x8
+#define VCN_RB_ENABLE__RB_EN_MASK 0x00000001L
+#define VCN_RB_ENABLE__JPEG_RB_EN_MASK 0x00000002L
+#define VCN_RB_ENABLE__RB1_EN_MASK 0x00000004L
+#define VCN_RB_ENABLE__RB2_EN_MASK 0x00000008L
+#define VCN_RB_ENABLE__RB3_EN_MASK 0x00000010L
+#define VCN_RB_ENABLE__RB4_EN_MASK 0x00000020L
+#define VCN_RB_ENABLE__UMSCH_RB_EN_MASK 0x00000040L
+#define VCN_RB_ENABLE__EJPEG_RB_EN_MASK 0x00000080L
+#define VCN_RB_ENABLE__AUDIO_RB_EN_MASK 0x00000100L
+//VCN_RB_WPTR_CTRL
+#define VCN_RB_WPTR_CTRL__RB_CS_EN__SHIFT 0x0
+#define VCN_RB_WPTR_CTRL__JPEG_CS_EN__SHIFT 0x1
+#define VCN_RB_WPTR_CTRL__RB1_CS_EN__SHIFT 0x2
+#define VCN_RB_WPTR_CTRL__RB2_CS_EN__SHIFT 0x3
+#define VCN_RB_WPTR_CTRL__RB3_CS_EN__SHIFT 0x4
+#define VCN_RB_WPTR_CTRL__RB4_CS_EN__SHIFT 0x5
+#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN__SHIFT 0x6
+#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN__SHIFT 0x7
+#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN__SHIFT 0x8
+#define VCN_RB_WPTR_CTRL__RB_CS_EN_MASK 0x00000001L
+#define VCN_RB_WPTR_CTRL__JPEG_CS_EN_MASK 0x00000002L
+#define VCN_RB_WPTR_CTRL__RB1_CS_EN_MASK 0x00000004L
+#define VCN_RB_WPTR_CTRL__RB2_CS_EN_MASK 0x00000008L
+#define VCN_RB_WPTR_CTRL__RB3_CS_EN_MASK 0x00000010L
+#define VCN_RB_WPTR_CTRL__RB4_CS_EN_MASK 0x00000020L
+#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN_MASK 0x00000040L
+#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN_MASK 0x00000080L
+#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN_MASK 0x00000100L
+//UVD_RB_RPTR
+#define UVD_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR
+#define UVD_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_RPTR2
+#define UVD_RB_RPTR2__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR2__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR2
+#define UVD_RB_WPTR2__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR2__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_RPTR3
+#define UVD_RB_RPTR3__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR3__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR3
+#define UVD_RB_WPTR3__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR3__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_RPTR4
+#define UVD_RB_RPTR4__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR4__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR4
+#define UVD_RB_WPTR4__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR4__RB_WPTR_MASK 0x007FFFF0L
+//UVD_OUT_RB_RPTR
+#define UVD_OUT_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_OUT_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_OUT_RB_WPTR
+#define UVD_OUT_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_OUT_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_AUDIO_RB_RPTR
+#define UVD_AUDIO_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_AUDIO_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_AUDIO_RB_WPTR
+#define UVD_AUDIO_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_AUDIO_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RBC_RB_RPTR
+#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RBC_RB_WPTR
+#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_DPG_LMA_CTL2
+#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL__SHIFT 0x0
+#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN__SHIFT 0x1
+#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR__SHIFT 0x2
+#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR__SHIFT 0x9
+#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL_MASK 0x00000001L
+#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN_MASK 0x00000002L
+#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR_MASK 0x000001FCL
+#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L
+
+
+// addressBlock: uvd_vcn_umsch_dec
+//VCN_UMSCH_MES_CNTL
+#define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0
+#define VCN_UMSCH_MES_CNTL__PerfPipeSel__SHIFT 0x2
+#define VCN_UMSCH_MES_CNTL__RamClkGatingDisable__SHIFT 0x4
+#define VCN_UMSCH_MES_CNTL__InterruptChickenBit__SHIFT 0x5
+#define VCN_UMSCH_MES_CNTL__CpTcOneCycleWrDis__SHIFT 0x6
+#define VCN_UMSCH_MES_CNTL__PIPE_ID_MASK 0x00000003L
+#define VCN_UMSCH_MES_CNTL__PerfPipeSel_MASK 0x0000000CL
+#define VCN_UMSCH_MES_CNTL__RamClkGatingDisable_MASK 0x00000010L
+#define VCN_UMSCH_MES_CNTL__InterruptChickenBit_MASK 0x00000020L
+#define VCN_UMSCH_MES_CNTL__CpTcOneCycleWrDis_MASK 0x00000040L
+//UMSCH_CTL
+#define UMSCH_CTL__P_RESET__SHIFT 0x0
+#define UMSCH_CTL__UTCL2_CLIENT_ID__SHIFT 0x1
+#define UMSCH_CTL__UMSCH_BUSY__SHIFT 0xa
+#define UMSCH_CTL__IllegalRegReadAckLatency__SHIFT 0xd
+#define UMSCH_CTL__P_RESET_MASK 0x00000001L
+#define UMSCH_CTL__UTCL2_CLIENT_ID_MASK 0x000003FEL
+#define UMSCH_CTL__UMSCH_BUSY_MASK 0x00000400L
+#define UMSCH_CTL__IllegalRegReadAckLatency_MASK 0x0000E000L
+//UMSCH_CTL2
+#define UMSCH_CTL2__Spare__SHIFT 0x0
+#define UMSCH_CTL2__Spare_MASK 0xFFFFFFFFL
+//VCN_UMSCH_AGDB_WPTR0
+#define VCN_UMSCH_AGDB_WPTR0__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR0__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR1
+#define VCN_UMSCH_AGDB_WPTR1__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR1__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR2
+#define VCN_UMSCH_AGDB_WPTR2__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR2__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR3
+#define VCN_UMSCH_AGDB_WPTR3__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR3__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR4
+#define VCN_UMSCH_AGDB_WPTR4__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR4__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR5
+#define VCN_UMSCH_AGDB_WPTR5__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR5__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_MAILBOX0
+#define VCN_UMSCH_MAILBOX0__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX0__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX_RESP0
+#define VCN_UMSCH_MAILBOX_RESP0__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX_RESP0__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX1
+#define VCN_UMSCH_MAILBOX1__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX1__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX_RESP1
+#define VCN_UMSCH_MAILBOX_RESP1__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX_RESP1__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX2
+#define VCN_UMSCH_MAILBOX2__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX2__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX_RESP2
+#define VCN_UMSCH_MAILBOX_RESP2__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX_RESP2__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX3
+#define VCN_UMSCH_MAILBOX3__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX3__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX_RESP3
+#define VCN_UMSCH_MAILBOX_RESP3__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX_RESP3__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER0
+#define VCN_UMSCH_SPARE_REGISTER0__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER0__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER1
+#define VCN_UMSCH_SPARE_REGISTER1__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER1__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER2
+#define VCN_UMSCH_SPARE_REGISTER2__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER2__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER3
+#define VCN_UMSCH_SPARE_REGISTER3__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER3__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER4
+#define VCN_UMSCH_SPARE_REGISTER4__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER4__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER5
+#define VCN_UMSCH_SPARE_REGISTER5__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER5__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER6
+#define VCN_UMSCH_SPARE_REGISTER6__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER6__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER7
+#define VCN_UMSCH_SPARE_REGISTER7__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER7__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MES_UTCL1_CNTL
+#define VCN_UMSCH_MES_UTCL1_CNTL__REDO_LATENCY__SHIFT 0x0
+#define VCN_UMSCH_MES_UTCL1_CNTL__ForceSnoop__SHIFT 0x14
+#define VCN_UMSCH_MES_UTCL1_CNTL__FragLimitMode__SHIFT 0x15
+#define VCN_UMSCH_MES_UTCL1_CNTL__DropMode__SHIFT 0x16
+#define VCN_UMSCH_MES_UTCL1_CNTL__Invalidate__SHIFT 0x17
+#define VCN_UMSCH_MES_UTCL1_CNTL__REDO_LATENCY_MASK 0x000FFFFFL
+#define VCN_UMSCH_MES_UTCL1_CNTL__ForceSnoop_MASK 0x00100000L
+#define VCN_UMSCH_MES_UTCL1_CNTL__FragLimitMode_MASK 0x00200000L
+#define VCN_UMSCH_MES_UTCL1_CNTL__DropMode_MASK 0x00400000L
+#define VCN_UMSCH_MES_UTCL1_CNTL__Invalidate_MASK 0x00800000L
+//VCN_UMSCH_MES_BUSY
+#define VCN_UMSCH_MES_BUSY__MesScratchRamBusy__SHIFT 0x0
+#define VCN_UMSCH_MES_BUSY__MesInstrCacheBusy__SHIFT 0x1
+#define VCN_UMSCH_MES_BUSY__MesDataCacheBusy__SHIFT 0x2
+#define VCN_UMSCH_MES_BUSY__MesBusy__SHIFT 0x3
+#define VCN_UMSCH_MES_BUSY__MesLoadBusy__SHIFT 0x4
+#define VCN_UMSCH_MES_BUSY__MesMutexBusy__SHIFT 0x5
+#define VCN_UMSCH_MES_BUSY__MesThreadBusy__SHIFT 0x6
+#define VCN_UMSCH_MES_BUSY__MesMessageBusy__SHIFT 0x8
+#define VCN_UMSCH_MES_BUSY__MesTcBusy__SHIFT 0xa
+#define VCN_UMSCH_MES_BUSY__MesDmaPending__SHIFT 0xc
+#define VCN_UMSCH_MES_BUSY__MesScratchRamBusy_MASK 0x00000001L
+#define VCN_UMSCH_MES_BUSY__MesInstrCacheBusy_MASK 0x00000002L
+#define VCN_UMSCH_MES_BUSY__MesDataCacheBusy_MASK 0x00000004L
+#define VCN_UMSCH_MES_BUSY__MesBusy_MASK 0x00000008L
+#define VCN_UMSCH_MES_BUSY__MesLoadBusy_MASK 0x00000010L
+#define VCN_UMSCH_MES_BUSY__MesMutexBusy_MASK 0x00000020L
+#define VCN_UMSCH_MES_BUSY__MesThreadBusy_MASK 0x000000C0L
+#define VCN_UMSCH_MES_BUSY__MesMessageBusy_MASK 0x00000300L
+#define VCN_UMSCH_MES_BUSY__MesTcBusy_MASK 0x00000C00L
+#define VCN_UMSCH_MES_BUSY__MesDmaPending_MASK 0x00003000L
+//VCN_UMSCH_RB_BASE_LO
+#define VCN_UMSCH_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define VCN_UMSCH_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//VCN_UMSCH_RB_BASE_HI
+#define VCN_UMSCH_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define VCN_UMSCH_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//VCN_UMSCH_RB_SIZE
+#define VCN_UMSCH_RB_SIZE__WPTR__SHIFT 0x4
+#define VCN_UMSCH_RB_SIZE__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_RB_RPTR
+#define VCN_UMSCH_RB_RPTR__WPTR__SHIFT 0x4
+#define VCN_UMSCH_RB_RPTR__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_RB_WPTR
+#define VCN_UMSCH_RB_WPTR__WPTR__SHIFT 0x4
+#define VCN_UMSCH_RB_WPTR__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_MASTINT_EN
+#define VCN_UMSCH_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define VCN_UMSCH_MASTINT_EN__SYS_EN__SHIFT 0x2
+#define VCN_UMSCH_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define VCN_UMSCH_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define VCN_UMSCH_MASTINT_EN__SYS_EN_MASK 0x00000004L
+#define VCN_UMSCH_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+//VCN_UMSCH_IH_CTRL
+#define VCN_UMSCH_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0
+#define VCN_UMSCH_IH_CTRL__IH_STALL_EN__SHIFT 0x1
+#define VCN_UMSCH_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2
+#define VCN_UMSCH_IH_CTRL__IH_VMID__SHIFT 0x3
+#define VCN_UMSCH_IH_CTRL__IH_USER_DATA__SHIFT 0x7
+#define VCN_UMSCH_IH_CTRL__IH_RINGID__SHIFT 0x13
+#define VCN_UMSCH_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L
+#define VCN_UMSCH_IH_CTRL__IH_STALL_EN_MASK 0x00000002L
+#define VCN_UMSCH_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L
+#define VCN_UMSCH_IH_CTRL__IH_VMID_MASK 0x00000078L
+#define VCN_UMSCH_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L
+#define VCN_UMSCH_IH_CTRL__IH_RINGID_MASK 0x07F80000L
+//VCN_UMSCH_SYS_INT_EN
+#define VCN_UMSCH_SYS_INT_EN__INT0__SHIFT 0x0
+#define VCN_UMSCH_SYS_INT_EN__INT1__SHIFT 0x1
+#define VCN_UMSCH_SYS_INT_EN__INT2__SHIFT 0x2
+#define VCN_UMSCH_SYS_INT_EN__INT3__SHIFT 0x3
+#define VCN_UMSCH_SYS_INT_EN__INT4__SHIFT 0x4
+#define VCN_UMSCH_SYS_INT_EN__INT5__SHIFT 0x5
+#define VCN_UMSCH_SYS_INT_EN__INT6__SHIFT 0x6
+#define VCN_UMSCH_SYS_INT_EN__INT7__SHIFT 0x7
+#define VCN_UMSCH_SYS_INT_EN__INT0_MASK 0x00000001L
+#define VCN_UMSCH_SYS_INT_EN__INT1_MASK 0x00000002L
+#define VCN_UMSCH_SYS_INT_EN__INT2_MASK 0x00000004L
+#define VCN_UMSCH_SYS_INT_EN__INT3_MASK 0x00000008L
+#define VCN_UMSCH_SYS_INT_EN__INT4_MASK 0x00000010L
+#define VCN_UMSCH_SYS_INT_EN__INT5_MASK 0x00000020L
+#define VCN_UMSCH_SYS_INT_EN__INT6_MASK 0x00000040L
+#define VCN_UMSCH_SYS_INT_EN__INT7_MASK 0x00000080L
+//VCN_UMSCH_SYS_INT_STATUS
+#define VCN_UMSCH_SYS_INT_STATUS__INT0__SHIFT 0x0
+#define VCN_UMSCH_SYS_INT_STATUS__INT1__SHIFT 0x1
+#define VCN_UMSCH_SYS_INT_STATUS__INT2__SHIFT 0x2
+#define VCN_UMSCH_SYS_INT_STATUS__INT3__SHIFT 0x3
+#define VCN_UMSCH_SYS_INT_STATUS__INT4__SHIFT 0x4
+#define VCN_UMSCH_SYS_INT_STATUS__INT5__SHIFT 0x5
+#define VCN_UMSCH_SYS_INT_STATUS__INT6__SHIFT 0x6
+#define VCN_UMSCH_SYS_INT_STATUS__INT7__SHIFT 0x7
+#define VCN_UMSCH_SYS_INT_STATUS__INT0_MASK 0x00000001L
+#define VCN_UMSCH_SYS_INT_STATUS__INT1_MASK 0x00000002L
+#define VCN_UMSCH_SYS_INT_STATUS__INT2_MASK 0x00000004L
+#define VCN_UMSCH_SYS_INT_STATUS__INT3_MASK 0x00000008L
+#define VCN_UMSCH_SYS_INT_STATUS__INT4_MASK 0x00000010L
+#define VCN_UMSCH_SYS_INT_STATUS__INT5_MASK 0x00000020L
+#define VCN_UMSCH_SYS_INT_STATUS__INT6_MASK 0x00000040L
+#define VCN_UMSCH_SYS_INT_STATUS__INT7_MASK 0x00000080L
+//VCN_UMSCH_SYS_INT_ACK
+#define VCN_UMSCH_SYS_INT_ACK__INT0__SHIFT 0x0
+#define VCN_UMSCH_SYS_INT_ACK__INT1__SHIFT 0x1
+#define VCN_UMSCH_SYS_INT_ACK__INT2__SHIFT 0x2
+#define VCN_UMSCH_SYS_INT_ACK__INT3__SHIFT 0x3
+#define VCN_UMSCH_SYS_INT_ACK__INT4__SHIFT 0x4
+#define VCN_UMSCH_SYS_INT_ACK__INT5__SHIFT 0x5
+#define VCN_UMSCH_SYS_INT_ACK__INT6__SHIFT 0x6
+#define VCN_UMSCH_SYS_INT_ACK__INT7__SHIFT 0x7
+#define VCN_UMSCH_SYS_INT_ACK__INT0_MASK 0x00000001L
+#define VCN_UMSCH_SYS_INT_ACK__INT1_MASK 0x00000002L
+#define VCN_UMSCH_SYS_INT_ACK__INT2_MASK 0x00000004L
+#define VCN_UMSCH_SYS_INT_ACK__INT3_MASK 0x00000008L
+#define VCN_UMSCH_SYS_INT_ACK__INT4_MASK 0x00000010L
+#define VCN_UMSCH_SYS_INT_ACK__INT5_MASK 0x00000020L
+#define VCN_UMSCH_SYS_INT_ACK__INT6_MASK 0x00000040L
+#define VCN_UMSCH_SYS_INT_ACK__INT7_MASK 0x00000080L
+//VCN_UMSCH_SYS_INT_SRC
+#define VCN_UMSCH_SYS_INT_SRC__INT0__SHIFT 0x0
+#define VCN_UMSCH_SYS_INT_SRC__INT1__SHIFT 0x1
+#define VCN_UMSCH_SYS_INT_SRC__INT2__SHIFT 0x2
+#define VCN_UMSCH_SYS_INT_SRC__INT3__SHIFT 0x3
+#define VCN_UMSCH_SYS_INT_SRC__INT4__SHIFT 0x4
+#define VCN_UMSCH_SYS_INT_SRC__INT5__SHIFT 0x5
+#define VCN_UMSCH_SYS_INT_SRC__INT6__SHIFT 0x6
+#define VCN_UMSCH_SYS_INT_SRC__INT7__SHIFT 0x7
+#define VCN_UMSCH_SYS_INT_SRC__INT0_MASK 0x00000001L
+#define VCN_UMSCH_SYS_INT_SRC__INT1_MASK 0x00000002L
+#define VCN_UMSCH_SYS_INT_SRC__INT2_MASK 0x00000004L
+#define VCN_UMSCH_SYS_INT_SRC__INT3_MASK 0x00000008L
+#define VCN_UMSCH_SYS_INT_SRC__INT4_MASK 0x00000010L
+#define VCN_UMSCH_SYS_INT_SRC__INT5_MASK 0x00000020L
+#define VCN_UMSCH_SYS_INT_SRC__INT6_MASK 0x00000040L
+#define VCN_UMSCH_SYS_INT_SRC__INT7_MASK 0x00000080L
+//VCN_UMSCH_IH_CTX_CTRL
+#define VCN_UMSCH_IH_CTX_CTRL__IH_CTX_ID__SHIFT 0x0
+#define VCN_UMSCH_IH_CTX_CTRL__IH_CTX_ID_MASK 0x0FFFFFFFL
+//UVD_UMSCH_FORCE
+#define UVD_UMSCH_FORCE__IC_FORCE_GPUVM__SHIFT 0x0
+#define UVD_UMSCH_FORCE__DC_FORCE_GPUVM__SHIFT 0x1
+#define UVD_UMSCH_FORCE__FORCE_DROP_DISABLE__SHIFT 0x2
+#define UVD_UMSCH_FORCE__FORCE_DROP_INT_DISABLE__SHIFT 0x3
+#define UVD_UMSCH_FORCE__BYPASS_UTCL2_ATC_AUTO_RESP__SHIFT 0x4
+#define UVD_UMSCH_FORCE__IC_FORCE_GPUVM_MASK 0x00000001L
+#define UVD_UMSCH_FORCE__DC_FORCE_GPUVM_MASK 0x00000002L
+#define UVD_UMSCH_FORCE__FORCE_DROP_DISABLE_MASK 0x00000004L
+#define UVD_UMSCH_FORCE__FORCE_DROP_INT_DISABLE_MASK 0x00000008L
+#define UVD_UMSCH_FORCE__BYPASS_UTCL2_ATC_AUTO_RESP_MASK 0x00000010L
+//UMSCH_MES_RESET_CTRL
+#define UMSCH_MES_RESET_CTRL__MES_CORE_SOFT_RESET__SHIFT 0x0
+#define UMSCH_MES_RESET_CTRL__MES_CORE_SOFT_RESET_MASK 0x00000001L
+
+
+// addressBlock: uvd_vcn_cprs64dec
+//VCN_MES_PRGRM_CNTR_START
+#define VCN_MES_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define VCN_MES_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//VCN_MES_INTR_ROUTINE_START
+#define VCN_MES_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define VCN_MES_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//VCN_MES_MTVEC_LO
+#define VCN_MES_MTVEC_LO__ADDR_LO__SHIFT 0x0
+#define VCN_MES_MTVEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_INTR_ROUTINE_START_HI
+#define VCN_MES_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define VCN_MES_INTR_ROUTINE_START_HI__IR_START_MASK 0xFFFFFFFFL
+//VCN_MES_MTVEC_HI
+#define VCN_MES_MTVEC_HI__ADDR_LO__SHIFT 0x0
+#define VCN_MES_MTVEC_HI__ADDR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_CNTL
+#define VCN_MES_CNTL__MES_INVALIDATE_ICACHE__SHIFT 0x4
+#define VCN_MES_CNTL__MES_PIPE0_RESET__SHIFT 0x10
+#define VCN_MES_CNTL__MES_PIPE1_RESET__SHIFT 0x11
+#define VCN_MES_CNTL__MES_PIPE2_RESET__SHIFT 0x12
+#define VCN_MES_CNTL__MES_PIPE3_RESET__SHIFT 0x13
+#define VCN_MES_CNTL__MES_PIPE0_ACTIVE__SHIFT 0x1a
+#define VCN_MES_CNTL__MES_PIPE1_ACTIVE__SHIFT 0x1b
+#define VCN_MES_CNTL__MES_PIPE2_ACTIVE__SHIFT 0x1c
+#define VCN_MES_CNTL__MES_PIPE3_ACTIVE__SHIFT 0x1d
+#define VCN_MES_CNTL__MES_HALT__SHIFT 0x1e
+#define VCN_MES_CNTL__MES_STEP__SHIFT 0x1f
+#define VCN_MES_CNTL__MES_INVALIDATE_ICACHE_MASK 0x00000010L
+#define VCN_MES_CNTL__MES_PIPE0_RESET_MASK 0x00010000L
+#define VCN_MES_CNTL__MES_PIPE1_RESET_MASK 0x00020000L
+#define VCN_MES_CNTL__MES_PIPE2_RESET_MASK 0x00040000L
+#define VCN_MES_CNTL__MES_PIPE3_RESET_MASK 0x00080000L
+#define VCN_MES_CNTL__MES_PIPE0_ACTIVE_MASK 0x04000000L
+#define VCN_MES_CNTL__MES_PIPE1_ACTIVE_MASK 0x08000000L
+#define VCN_MES_CNTL__MES_PIPE2_ACTIVE_MASK 0x10000000L
+#define VCN_MES_CNTL__MES_PIPE3_ACTIVE_MASK 0x20000000L
+#define VCN_MES_CNTL__MES_HALT_MASK 0x40000000L
+#define VCN_MES_CNTL__MES_STEP_MASK 0x80000000L
+//VCN_MES_PIPE_PRIORITY_CNTS
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//VCN_MES_PIPE0_PRIORITY
+#define VCN_MES_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define VCN_MES_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//VCN_MES_PIPE1_PRIORITY
+#define VCN_MES_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define VCN_MES_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//VCN_MES_PIPE2_PRIORITY
+#define VCN_MES_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define VCN_MES_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//VCN_MES_PIPE3_PRIORITY
+#define VCN_MES_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define VCN_MES_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//VCN_MES_HEADER_DUMP
+#define VCN_MES_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define VCN_MES_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//VCN_MES_MIE_LO
+#define VCN_MES_MIE_LO__MES_INT__SHIFT 0x0
+#define VCN_MES_MIE_LO__MES_INT_MASK 0xFFFFFFFFL
+//VCN_MES_MIE_HI
+#define VCN_MES_MIE_HI__MES_INT__SHIFT 0x0
+#define VCN_MES_MIE_HI__MES_INT_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT
+#define VCN_MES_INTERRUPT__MES_INT__SHIFT 0x0
+#define VCN_MES_INTERRUPT__MES_INT_MASK 0xFFFFFFFFL
+//VCN_MES_SCRATCH_INDEX
+#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//VCN_MES_SCRATCH_DATA
+#define VCN_MES_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define VCN_MES_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INSTR_PNTR
+#define VCN_MES_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define VCN_MES_INSTR_PNTR__INSTR_PNTR_MASK 0x000FFFFFL
+//VCN_MES_MSCRATCH_HI
+#define VCN_MES_MSCRATCH_HI__DATA__SHIFT 0x0
+#define VCN_MES_MSCRATCH_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_MSCRATCH_LO
+#define VCN_MES_MSCRATCH_LO__DATA__SHIFT 0x0
+#define VCN_MES_MSCRATCH_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_MSTATUS_LO
+#define VCN_MES_MSTATUS_LO__STATUS_LO__SHIFT 0x0
+#define VCN_MES_MSTATUS_LO__STATUS_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MSTATUS_HI
+#define VCN_MES_MSTATUS_HI__STATUS_HI__SHIFT 0x0
+#define VCN_MES_MSTATUS_HI__STATUS_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MEPC_LO
+#define VCN_MES_MEPC_LO__MEPC_LO__SHIFT 0x0
+#define VCN_MES_MEPC_LO__MEPC_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MEPC_HI
+#define VCN_MES_MEPC_HI__MEPC_HI__SHIFT 0x0
+#define VCN_MES_MEPC_HI__MEPC_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MCAUSE_LO
+#define VCN_MES_MCAUSE_LO__CAUSE_LO__SHIFT 0x0
+#define VCN_MES_MCAUSE_LO__CAUSE_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MCAUSE_HI
+#define VCN_MES_MCAUSE_HI__CAUSE_HI__SHIFT 0x0
+#define VCN_MES_MCAUSE_HI__CAUSE_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MBADADDR_LO
+#define VCN_MES_MBADADDR_LO__ADDR_LO__SHIFT 0x0
+#define VCN_MES_MBADADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MBADADDR_HI
+#define VCN_MES_MBADADDR_HI__ADDR_HI__SHIFT 0x0
+#define VCN_MES_MBADADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MIP_LO
+#define VCN_MES_MIP_LO__MIP_LO__SHIFT 0x0
+#define VCN_MES_MIP_LO__MIP_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MIP_HI
+#define VCN_MES_MIP_HI__MIP_HI__SHIFT 0x0
+#define VCN_MES_MIP_HI__MIP_HI_MASK 0xFFFFFFFFL
+//VCN_MES_IC_OP_CNTL
+#define VCN_MES_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define VCN_MES_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define VCN_MES_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define VCN_MES_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define VCN_MES_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define VCN_MES_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//VCN_MES_MCYCLE_LO
+#define VCN_MES_MCYCLE_LO__CYCLE_LO__SHIFT 0x0
+#define VCN_MES_MCYCLE_LO__CYCLE_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MCYCLE_HI
+#define VCN_MES_MCYCLE_HI__CYCLE_HI__SHIFT 0x0
+#define VCN_MES_MCYCLE_HI__CYCLE_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MTIME_LO
+#define VCN_MES_MTIME_LO__TIME_LO__SHIFT 0x0
+#define VCN_MES_MTIME_LO__TIME_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MTIME_HI
+#define VCN_MES_MTIME_HI__TIME_HI__SHIFT 0x0
+#define VCN_MES_MTIME_HI__TIME_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MINSTRET_LO
+#define VCN_MES_MINSTRET_LO__INSTRET_LO__SHIFT 0x0
+#define VCN_MES_MINSTRET_LO__INSTRET_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MINSTRET_HI
+#define VCN_MES_MINSTRET_HI__INSTRET_HI__SHIFT 0x0
+#define VCN_MES_MINSTRET_HI__INSTRET_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MISA_LO
+#define VCN_MES_MISA_LO__MISA_LO__SHIFT 0x0
+#define VCN_MES_MISA_LO__MISA_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MISA_HI
+#define VCN_MES_MISA_HI__MISA_HI__SHIFT 0x0
+#define VCN_MES_MISA_HI__MISA_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MVENDORID_LO
+#define VCN_MES_MVENDORID_LO__MVENDORID_LO__SHIFT 0x0
+#define VCN_MES_MVENDORID_LO__MVENDORID_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MVENDORID_HI
+#define VCN_MES_MVENDORID_HI__MVENDORID_HI__SHIFT 0x0
+#define VCN_MES_MVENDORID_HI__MVENDORID_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MARCHID_LO
+#define VCN_MES_MARCHID_LO__MARCHID_LO__SHIFT 0x0
+#define VCN_MES_MARCHID_LO__MARCHID_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MARCHID_HI
+#define VCN_MES_MARCHID_HI__MARCHID_HI__SHIFT 0x0
+#define VCN_MES_MARCHID_HI__MARCHID_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MIMPID_LO
+#define VCN_MES_MIMPID_LO__MIMPID_LO__SHIFT 0x0
+#define VCN_MES_MIMPID_LO__MIMPID_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MIMPID_HI
+#define VCN_MES_MIMPID_HI__MIMPID_HI__SHIFT 0x0
+#define VCN_MES_MIMPID_HI__MIMPID_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MHARTID_LO
+#define VCN_MES_MHARTID_LO__MHARTID_LO__SHIFT 0x0
+#define VCN_MES_MHARTID_LO__MHARTID_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MHARTID_HI
+#define VCN_MES_MHARTID_HI__MHARTID_HI__SHIFT 0x0
+#define VCN_MES_MHARTID_HI__MHARTID_HI_MASK 0xFFFFFFFFL
+//VCN_MES_DC_BASE_CNTL
+#define VCN_MES_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define VCN_MES_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//VCN_MES_DC_OP_CNTL
+#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define VCN_MES_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define VCN_MES_DC_OP_CNTL__DEPRECATED__SHIFT 0x3
+#define VCN_MES_DC_OP_CNTL__DEPRACATED__SHIFT 0x4
+#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define VCN_MES_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+#define VCN_MES_DC_OP_CNTL__DEPRECATED_MASK 0x00000008L
+#define VCN_MES_DC_OP_CNTL__DEPRACATED_MASK 0x00000010L
+//VCN_MES_MTIMECMP_LO
+#define VCN_MES_MTIMECMP_LO__TIME_LO__SHIFT 0x0
+#define VCN_MES_MTIMECMP_LO__TIME_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MTIMECMP_HI
+#define VCN_MES_MTIMECMP_HI__TIME_HI__SHIFT 0x0
+#define VCN_MES_MTIMECMP_HI__TIME_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP0_LO
+#define VCN_MES_GP0_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define VCN_MES_GP0_LO__DATA__SHIFT 0x1
+#define VCN_MES_GP0_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define VCN_MES_GP0_LO__DATA_MASK 0xFFFFFFFEL
+//VCN_MES_GP0_HI
+#define VCN_MES_GP0_HI__M_RET_ADDR__SHIFT 0x0
+#define VCN_MES_GP0_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//VCN_MES_GP1_LO
+#define VCN_MES_GP1_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define VCN_MES_GP1_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//VCN_MES_GP1_HI
+#define VCN_MES_GP1_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define VCN_MES_GP1_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP2_LO
+#define VCN_MES_GP2_LO__STACK_PNTR_LO__SHIFT 0x0
+#define VCN_MES_GP2_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_GP2_HI
+#define VCN_MES_GP2_HI__STACK_PNTR_HI__SHIFT 0x0
+#define VCN_MES_GP2_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP3_LO
+#define VCN_MES_GP3_LO__DATA__SHIFT 0x0
+#define VCN_MES_GP3_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP3_HI
+#define VCN_MES_GP3_HI__DATA__SHIFT 0x0
+#define VCN_MES_GP3_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP4_LO
+#define VCN_MES_GP4_LO__DATA__SHIFT 0x0
+#define VCN_MES_GP4_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP4_HI
+#define VCN_MES_GP4_HI__DATA__SHIFT 0x0
+#define VCN_MES_GP4_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP5_LO
+#define VCN_MES_GP5_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define VCN_MES_GP5_LO__DATA__SHIFT 0x1
+#define VCN_MES_GP5_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define VCN_MES_GP5_LO__DATA_MASK 0xFFFFFFFEL
+//VCN_MES_GP5_HI
+#define VCN_MES_GP5_HI__M_RET_ADDR__SHIFT 0x0
+#define VCN_MES_GP5_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//VCN_MES_GP6_LO
+#define VCN_MES_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define VCN_MES_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//VCN_MES_GP6_HI
+#define VCN_MES_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define VCN_MES_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP7_LO
+#define VCN_MES_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define VCN_MES_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_GP7_HI
+#define VCN_MES_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define VCN_MES_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP8_LO
+#define VCN_MES_GP8_LO__DATA__SHIFT 0x0
+#define VCN_MES_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP8_HI
+#define VCN_MES_GP8_HI__DATA__SHIFT 0x0
+#define VCN_MES_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP9_LO
+#define VCN_MES_GP9_LO__DATA__SHIFT 0x0
+#define VCN_MES_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP9_HI
+#define VCN_MES_GP9_HI__DATA__SHIFT 0x0
+#define VCN_MES_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_DM_INDEX_ADDR
+#define VCN_MES_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define VCN_MES_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//VCN_MES_DM_INDEX_DATA
+#define VCN_MES_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define VCN_MES_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_LOCAL_BASE0_LO
+#define VCN_MES_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_BASE0_HI
+#define VCN_MES_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//VCN_MES_LOCAL_MASK0_LO
+#define VCN_MES_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_MASK0_HI
+#define VCN_MES_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//VCN_MES_LOCAL_APERTURE
+#define VCN_MES_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define VCN_MES_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//VCN_MES_LOCAL_INSTR_BASE_LO
+#define VCN_MES_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_INSTR_BASE_HI
+#define VCN_MES_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_LOCAL_INSTR_MASK_LO
+#define VCN_MES_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_INSTR_MASK_HI
+#define VCN_MES_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//VCN_MES_LOCAL_INSTR_APERTURE
+#define VCN_MES_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define VCN_MES_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//VCN_MES_LOCAL_SCRATCH_APERTURE
+#define VCN_MES_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define VCN_MES_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//VCN_MES_LOCAL_SCRATCH_BASE_LO
+#define VCN_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_SCRATCH_BASE_HI
+#define VCN_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_PERFCOUNT_CNTL
+#define VCN_MES_PERFCOUNT_CNTL__EVENT_SEL__SHIFT 0x0
+#define VCN_MES_PERFCOUNT_CNTL__EVENT_SEL_MASK 0x0000001FL
+//VCN_MES_PENDING_INTERRUPT
+#define VCN_MES_PENDING_INTERRUPT__PENDING_INTERRUPT__SHIFT 0x0
+#define VCN_MES_PENDING_INTERRUPT__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//VCN_MES_PRGRM_CNTR_START_HI
+#define VCN_MES_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define VCN_MES_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//VCN_MES_INTERRUPT_DATA_16
+#define VCN_MES_INTERRUPT_DATA_16__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_16__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_17
+#define VCN_MES_INTERRUPT_DATA_17__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_17__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_18
+#define VCN_MES_INTERRUPT_DATA_18__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_18__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_19
+#define VCN_MES_INTERRUPT_DATA_19__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_19__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_20
+#define VCN_MES_INTERRUPT_DATA_20__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_20__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_21
+#define VCN_MES_INTERRUPT_DATA_21__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_21__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_22
+#define VCN_MES_INTERRUPT_DATA_22__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_22__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_23
+#define VCN_MES_INTERRUPT_DATA_23__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_23__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_24
+#define VCN_MES_INTERRUPT_DATA_24__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_24__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_25
+#define VCN_MES_INTERRUPT_DATA_25__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_25__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_26
+#define VCN_MES_INTERRUPT_DATA_26__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_26__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_27
+#define VCN_MES_INTERRUPT_DATA_27__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_27__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_28
+#define VCN_MES_INTERRUPT_DATA_28__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_28__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_29
+#define VCN_MES_INTERRUPT_DATA_29__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_29__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_30
+#define VCN_MES_INTERRUPT_DATA_30__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_30__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_31
+#define VCN_MES_INTERRUPT_DATA_31__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_31__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE0_BASE
+#define VCN_MES_DC_APERTURE0_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE0_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE0_MASK
+#define VCN_MES_DC_APERTURE0_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE0_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE0_CNTL
+#define VCN_MES_DC_APERTURE0_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE0_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE0_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE0_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE1_BASE
+#define VCN_MES_DC_APERTURE1_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE1_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE1_MASK
+#define VCN_MES_DC_APERTURE1_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE1_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE1_CNTL
+#define VCN_MES_DC_APERTURE1_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE1_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE1_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE1_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE2_BASE
+#define VCN_MES_DC_APERTURE2_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE2_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE2_MASK
+#define VCN_MES_DC_APERTURE2_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE2_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE2_CNTL
+#define VCN_MES_DC_APERTURE2_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE2_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE2_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE2_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE3_BASE
+#define VCN_MES_DC_APERTURE3_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE3_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE3_MASK
+#define VCN_MES_DC_APERTURE3_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE3_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE3_CNTL
+#define VCN_MES_DC_APERTURE3_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE3_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE3_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE3_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE4_BASE
+#define VCN_MES_DC_APERTURE4_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE4_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE4_MASK
+#define VCN_MES_DC_APERTURE4_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE4_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE4_CNTL
+#define VCN_MES_DC_APERTURE4_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE4_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE4_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE4_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE5_BASE
+#define VCN_MES_DC_APERTURE5_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE5_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE5_MASK
+#define VCN_MES_DC_APERTURE5_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE5_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE5_CNTL
+#define VCN_MES_DC_APERTURE5_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE5_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE5_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE5_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE6_BASE
+#define VCN_MES_DC_APERTURE6_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE6_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE6_MASK
+#define VCN_MES_DC_APERTURE6_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE6_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE6_CNTL
+#define VCN_MES_DC_APERTURE6_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE6_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE6_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE6_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE7_BASE
+#define VCN_MES_DC_APERTURE7_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE7_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE7_MASK
+#define VCN_MES_DC_APERTURE7_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE7_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE7_CNTL
+#define VCN_MES_DC_APERTURE7_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE7_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE7_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE7_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE8_BASE
+#define VCN_MES_DC_APERTURE8_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE8_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE8_MASK
+#define VCN_MES_DC_APERTURE8_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE8_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE8_CNTL
+#define VCN_MES_DC_APERTURE8_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE8_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE8_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE8_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE9_BASE
+#define VCN_MES_DC_APERTURE9_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE9_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE9_MASK
+#define VCN_MES_DC_APERTURE9_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE9_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE9_CNTL
+#define VCN_MES_DC_APERTURE9_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE9_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE9_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE9_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE10_BASE
+#define VCN_MES_DC_APERTURE10_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE10_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE10_MASK
+#define VCN_MES_DC_APERTURE10_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE10_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE10_CNTL
+#define VCN_MES_DC_APERTURE10_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE10_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE10_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE10_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE11_BASE
+#define VCN_MES_DC_APERTURE11_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE11_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE11_MASK
+#define VCN_MES_DC_APERTURE11_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE11_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE11_CNTL
+#define VCN_MES_DC_APERTURE11_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE11_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE11_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE11_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE12_BASE
+#define VCN_MES_DC_APERTURE12_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE12_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE12_MASK
+#define VCN_MES_DC_APERTURE12_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE12_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE12_CNTL
+#define VCN_MES_DC_APERTURE12_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE12_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE12_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE12_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE13_BASE
+#define VCN_MES_DC_APERTURE13_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE13_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE13_MASK
+#define VCN_MES_DC_APERTURE13_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE13_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE13_CNTL
+#define VCN_MES_DC_APERTURE13_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE13_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE13_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE13_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE14_BASE
+#define VCN_MES_DC_APERTURE14_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE14_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE14_MASK
+#define VCN_MES_DC_APERTURE14_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE14_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE14_CNTL
+#define VCN_MES_DC_APERTURE14_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE14_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE14_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE14_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE15_BASE
+#define VCN_MES_DC_APERTURE15_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE15_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE15_MASK
+#define VCN_MES_DC_APERTURE15_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE15_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE15_CNTL
+#define VCN_MES_DC_APERTURE15_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE15_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE15_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE15_CNTL__BYPASS_MODE_MASK 0x00000010L
+
+
+// addressBlock: uvd_vcn_hypdec
+//VCN_MES_IC_BASE_LO
+#define VCN_MES_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define VCN_MES_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//VCN_MES_MIBASE_LO
+#define VCN_MES_MIBASE_LO__IC_BASE_LO__SHIFT 0xc
+#define VCN_MES_MIBASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//VCN_MES_IC_BASE_HI
+#define VCN_MES_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define VCN_MES_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_MIBASE_HI
+#define VCN_MES_MIBASE_HI__IC_BASE_HI__SHIFT 0x0
+#define VCN_MES_MIBASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_IC_BASE_CNTL
+#define VCN_MES_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define VCN_MES_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define VCN_MES_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define VCN_MES_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//VCN_MES_DC_BASE_LO
+#define VCN_MES_DC_BASE_LO__DC_BASE_LO__SHIFT 0x10
+#define VCN_MES_DC_BASE_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//VCN_MES_MDBASE_LO
+#define VCN_MES_MDBASE_LO__BASE_LO__SHIFT 0x10
+#define VCN_MES_MDBASE_LO__BASE_LO_MASK 0xFFFF0000L
+//VCN_MES_DC_BASE_HI
+#define VCN_MES_DC_BASE_HI__DC_BASE_HI__SHIFT 0x0
+#define VCN_MES_DC_BASE_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_MDBASE_HI
+#define VCN_MES_MDBASE_HI__BASE_HI__SHIFT 0x0
+#define VCN_MES_MDBASE_HI__BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_MIBOUND_LO
+#define VCN_MES_MIBOUND_LO__BOUND_LO__SHIFT 0x0
+#define VCN_MES_MIBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MIBOUND_HI
+#define VCN_MES_MIBOUND_HI__BOUND_HI__SHIFT 0x0
+#define VCN_MES_MIBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MDBOUND_LO
+#define VCN_MES_MDBOUND_LO__BOUND_LO__SHIFT 0x0
+#define VCN_MES_MDBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MDBOUND_HI
+#define VCN_MES_MDBOUND_HI__BOUND_HI__SHIFT 0x0
+#define VCN_MES_MDBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd_slmi_adpdec
+//UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC_VMID
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID__SHIFT 0x4
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID__SHIFT 0x8
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID__SHIFT 0xc
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID__SHIFT 0x10
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID__SHIFT 0x14
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID__SHIFT 0x18
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID__SHIFT 0x1c
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID_MASK 0x0000000FL
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID_MASK 0x000000F0L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID_MASK 0x00000F00L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID_MASK 0x0000F000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID_MASK 0x000F0000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID_MASK 0x00F00000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID_MASK 0x0F000000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID_MASK 0xF0000000L
+//UVD_LMI_MMSCH_CTRL
+#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN__SHIFT 0x0
+#define UVD_LMI_MMSCH_CTRL__MMSCH_VM__SHIFT 0x1
+#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH__SHIFT 0x2
+#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP__SHIFT 0x3
+#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP__SHIFT 0x5
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD__SHIFT 0x7
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR__SHIFT 0x9
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP__SHIFT 0xb
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP__SHIFT 0xc
+#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN_MASK 0x00000001L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_VM_MASK 0x00000002L
+#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH_MASK 0x00000004L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP_MASK 0x00000018L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP_MASK 0x00000060L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_MASK 0x00000180L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_MASK 0x00000600L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP_MASK 0x00000800L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP_MASK 0x00001000L
+//UVD_MMSCH_LMI_STATUS
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT__SHIFT 0x0
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0x1
+#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN__SHIFT 0x2
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN__SHIFT 0x4
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS__SHIFT 0x8
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE__SHIFT 0xc
+#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN__SHIFT 0xd
+#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN__SHIFT 0xe
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT_MASK 0x00000001L
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00000002L
+#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN_MASK 0x00000004L
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN_MASK 0x000000F0L
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS_MASK 0x00000700L
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE_MASK 0x00001000L
+#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN_MASK 0x00002000L
+#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN_MASK 0x00004000L
+//UMSCH_IOV_ACTIVE_FCN_ID
+#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID__SHIFT 0x0
+#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF__SHIFT 0x1f
+#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID_MASK 0x0000003FL
+#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF_MASK 0x80000000L
+//UVD_UMSCH_LMI_STATUS
+#define UVD_UMSCH_LMI_STATUS__UMSCHIC_RD_CLEAN__SHIFT 0x0
+#define UVD_UMSCH_LMI_STATUS__UMSCHDC_RD_CLEAN__SHIFT 0x1
+#define UVD_UMSCH_LMI_STATUS__UMSCHDC_WR_CLEAN__SHIFT 0x2
+#define UVD_UMSCH_LMI_STATUS__UMSCHIC_RD_CLEAN_MASK 0x00000001L
+#define UVD_UMSCH_LMI_STATUS__UMSCHDC_RD_CLEAN_MASK 0x00000002L
+#define UVD_UMSCH_LMI_STATUS__UMSCHDC_WR_CLEAN_MASK 0x00000004L
+
+
+// addressBlock: uvdctxind
+//UVD_CGC_MEM_CTRL
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN__SHIFT 0x0
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN__SHIFT 0x1
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN__SHIFT 0x2
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN__SHIFT 0x3
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN__SHIFT 0x4
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN__SHIFT 0x5
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN__SHIFT 0x6
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN__SHIFT 0x7
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN__SHIFT 0x8
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN__SHIFT 0x9
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN__SHIFT 0xa
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN__SHIFT 0xc
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN__SHIFT 0xd
+#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN__SHIFT 0xe
+#define UVD_CGC_MEM_CTRL__MPC1_LS_EN__SHIFT 0xf
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x10
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x14
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN_MASK 0x00004000L
+#define UVD_CGC_MEM_CTRL__MPC1_LS_EN_MASK 0x00008000L
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000F0000L
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00F00000L
+//UVD_CGC_CTRL2
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN__SHIFT 0x0
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN__SHIFT 0x1
+#define UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT 0x2
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK 0x00000001L
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK 0x00000002L
+#define UVD_CGC_CTRL2__GATER_DIV_ID_MASK 0x0000001CL
+//UVD_CGC_MEM_DS_CTRL
+#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN__SHIFT 0x0
+#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN__SHIFT 0x1
+#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN__SHIFT 0x2
+#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN__SHIFT 0x3
+#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN__SHIFT 0x4
+#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN__SHIFT 0x5
+#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN__SHIFT 0x6
+#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN__SHIFT 0x7
+#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN__SHIFT 0x8
+#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN__SHIFT 0x9
+#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN__SHIFT 0xa
+#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN__SHIFT 0xc
+#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN__SHIFT 0xd
+#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN__SHIFT 0xe
+#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN__SHIFT 0xf
+#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN_MASK 0x00004000L
+#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN_MASK 0x00008000L
+//UVD_CGC_MEM_SD_CTRL
+#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN__SHIFT 0x0
+#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN__SHIFT 0x1
+#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN__SHIFT 0x2
+#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN__SHIFT 0x3
+#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN__SHIFT 0x4
+#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN__SHIFT 0x5
+#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN__SHIFT 0x6
+#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN__SHIFT 0x7
+#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN__SHIFT 0x8
+#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN__SHIFT 0x9
+#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN__SHIFT 0xa
+#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN__SHIFT 0xc
+#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN__SHIFT 0xd
+#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN__SHIFT 0xe
+#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN__SHIFT 0xf
+#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN_MASK 0x00004000L
+#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN_MASK 0x00008000L
+//UVD_SW_SCRATCH_00
+#define UVD_SW_SCRATCH_00__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_00__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_01
+#define UVD_SW_SCRATCH_01__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_01__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_02
+#define UVD_SW_SCRATCH_02__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_02__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_03
+#define UVD_SW_SCRATCH_03__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_03__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_04
+#define UVD_SW_SCRATCH_04__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_04__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_05
+#define UVD_SW_SCRATCH_05__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_05__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_06
+#define UVD_SW_SCRATCH_06__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_06__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_07
+#define UVD_SW_SCRATCH_07__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_07__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_08
+#define UVD_SW_SCRATCH_08__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_08__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_09
+#define UVD_SW_SCRATCH_09__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_09__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_10
+#define UVD_SW_SCRATCH_10__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_10__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_11
+#define UVD_SW_SCRATCH_11__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_11__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_12
+#define UVD_SW_SCRATCH_12__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_12__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_13
+#define UVD_SW_SCRATCH_13__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_13__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_14
+#define UVD_SW_SCRATCH_14__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_14__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_15
+#define UVD_SW_SCRATCH_15__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_15__DATA_MASK 0xFFFFFFFFL
+//UVD_IH_SEM_CTRL
+#define UVD_IH_SEM_CTRL__IH_STALL_EN__SHIFT 0x0
+#define UVD_IH_SEM_CTRL__SEM_STALL_EN__SHIFT 0x1
+#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN__SHIFT 0x2
+#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN__SHIFT 0x3
+#define UVD_IH_SEM_CTRL__IH_VMID__SHIFT 0x4
+#define UVD_IH_SEM_CTRL__IH_USER_DATA__SHIFT 0x8
+#define UVD_IH_SEM_CTRL__IH_RINGID__SHIFT 0x14
+#define UVD_IH_SEM_CTRL__IH_STALL_EN_MASK 0x00000001L
+#define UVD_IH_SEM_CTRL__SEM_STALL_EN_MASK 0x00000002L
+#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L
+#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN_MASK 0x00000008L
+#define UVD_IH_SEM_CTRL__IH_VMID_MASK 0x000000F0L
+#define UVD_IH_SEM_CTRL__IH_USER_DATA_MASK 0x000FFF00L
+#define UVD_IH_SEM_CTRL__IH_RINGID_MASK 0x0FF00000L
+//UVD_MISC_FEATURE_CTL
+#define UVD_MISC_FEATURE_CTL__ROW_PREEMPT_EN__SHIFT 0x0
+#define UVD_MISC_FEATURE_CTL__PREEMPT_BLOCKIF_DIS_EN__SHIFT 0x1
+#define UVD_MISC_FEATURE_CTL__ROW_PREEMPT_EN_MASK 0x00000001L
+#define UVD_MISC_FEATURE_CTL__PREEMPT_BLOCKIF_DIS_EN_MASK 0x00000002L
+
+
+// addressBlock: lmi_adp_indirect
+//UVD_LMI_CRC0
+#define UVD_LMI_CRC0__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC0__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC1
+#define UVD_LMI_CRC1__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC1__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC2
+#define UVD_LMI_CRC2__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC2__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC3
+#define UVD_LMI_CRC3__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC3__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC10
+#define UVD_LMI_CRC10__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC10__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC11
+#define UVD_LMI_CRC11__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC11__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC12
+#define UVD_LMI_CRC12__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC12__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC13
+#define UVD_LMI_CRC13__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC13__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC14
+#define UVD_LMI_CRC14__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC14__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC15
+#define UVD_LMI_CRC15__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC15__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_SWAP_CNTL2
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP__SHIFT 0x0
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP__SHIFT 0x2
+#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x4
+#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP__SHIFT 0xc
+#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP__SHIFT 0xe
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP_MASK 0x00000003L
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP_MASK 0x0000000CL
+#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP_MASK 0x00000FF0L
+#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP_MASK 0x00003000L
+#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP_MASK 0x0000C000L
+//UVD_MEMCHECK_SYS_INT_EN
+#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN__SHIFT 0x0
+#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN__SHIFT 0x1
+#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN__SHIFT 0x2
+#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN__SHIFT 0x3
+#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN__SHIFT 0x4
+#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN__SHIFT 0x5
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6
+#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN__SHIFT 0x7
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb
+#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN__SHIFT 0xc
+#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN__SHIFT 0xf
+#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN__SHIFT 0x10
+#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN__SHIFT 0x11
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12
+#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13
+#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14
+#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15
+#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x1b
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1c
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1d
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1e
+#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN__SHIFT 0x1f
+#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN_MASK 0x00000001L
+#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN_MASK 0x00000002L
+#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN_MASK 0x00000004L
+#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN_MASK 0x00000008L
+#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN_MASK 0x00000010L
+#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN_MASK 0x00000020L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L
+#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN_MASK 0x00000080L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L
+#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN_MASK 0x00001000L
+#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L
+#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L
+#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L
+#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L
+#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L
+#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L
+#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN_MASK 0x08000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN_MASK 0x10000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN_MASK 0x20000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x40000000L
+#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN_MASK 0x80000000L
+//UVD_MEMCHECK_SYS_INT_STAT
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR__SHIFT 0xe
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR__SHIFT 0xf
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L
+//UVD_MEMCHECK_SYS_INT_ACK
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK__SHIFT 0xe
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK__SHIFT 0xf
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L
+//UVD_MEMCHECK_VCPU_INT_EN
+#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN__SHIFT 0x0
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN__SHIFT 0x1
+#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN__SHIFT 0x2
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN__SHIFT 0x3
+#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN__SHIFT 0x4
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN__SHIFT 0x5
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6
+#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN__SHIFT 0x7
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb
+#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN__SHIFT 0xc
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN__SHIFT 0xf
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN__SHIFT 0x10
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN__SHIFT 0x11
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12
+#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13
+#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14
+#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15
+#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x19
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1a
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1b
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1c
+#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN__SHIFT 0x1d
+#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN_MASK 0x00000001L
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN_MASK 0x00000002L
+#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN_MASK 0x00000004L
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN_MASK 0x00000008L
+#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN_MASK 0x00000010L
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN_MASK 0x00000020L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L
+#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN_MASK 0x00000080L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L
+#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN_MASK 0x00001000L
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L
+#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L
+#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L
+#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN_MASK 0x02000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN_MASK 0x04000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN_MASK 0x08000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x10000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN_MASK 0x20000000L
+//UVD_MEMCHECK_VCPU_INT_STAT
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR__SHIFT 0xe
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR__SHIFT 0xf
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L
+//UVD_MEMCHECK_VCPU_INT_ACK
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK__SHIFT 0xe
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK__SHIFT 0xf
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L
+//UVD_MEMCHECK2_SYS_INT_STAT
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x1a
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x1b
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x1c
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x1d
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR__SHIFT 0x1e
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR__SHIFT 0x1f
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x04000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x08000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x10000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x20000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR_MASK 0x40000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR_MASK 0x80000000L
+//UVD_MEMCHECK2_SYS_INT_ACK
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x1a
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x1b
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x1c
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x1d
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK__SHIFT 0x1e
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK__SHIFT 0x1f
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x04000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x08000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x10000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x20000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK_MASK 0x40000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK_MASK 0x80000000L
+//UVD_MEMCHECK2_VCPU_INT_STAT
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x12
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x13
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x14
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x15
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR__SHIFT 0x1a
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR__SHIFT 0x1b
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00040000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00080000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x00100000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x00200000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR_MASK 0x04000000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR_MASK 0x08000000L
+//UVD_MEMCHECK2_VCPU_INT_ACK
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x12
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x13
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x14
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x15
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK__SHIFT 0x1a
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK__SHIFT 0x1b
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00040000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00080000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x00100000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x00200000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK_MASK 0x04000000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK_MASK 0x08000000L
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h
index e8fae5c77514..2bfd6d0ff050 100644
--- a/drivers/gpu/drm/amd/include/atom-bits.h
+++ b/drivers/gpu/drm/amd/include/atom-bits.h
@@ -33,7 +33,7 @@ static inline uint8_t get_u8(void *bios, int ptr)
#define CU8(ptr) get_u8(ctx->bios, (ptr))
static inline uint16_t get_u16(void *bios, int ptr)
{
- return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+ return get_u8(bios, ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
}
#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
#define CU16(ptr) get_u16(ctx->bios, (ptr))
diff --git a/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h b/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h
index 26044cb285d2..48542ea6882a 100644
--- a/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h
@@ -26,13 +26,11 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 60a6536ff656..f40b6a03fe63 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -149,27 +149,26 @@ struct cgs_ops {
struct cgs_os_ops; /* To be define in OS-specific CGS header */
-struct cgs_device
-{
+struct cgs_device {
const struct cgs_ops *ops;
/* to be embedded at the start of driver private structure */
};
/* Convenience macros that make CGS indirect function calls look like
* normal function calls */
-#define CGS_CALL(func,dev,...) \
+#define CGS_CALL(func, dev, ...) \
(((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__))
-#define CGS_OS_CALL(func,dev,...) \
+#define CGS_OS_CALL(func, dev, ...) \
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
-#define cgs_read_register(dev,offset) \
- CGS_CALL(read_register,dev,offset)
-#define cgs_write_register(dev,offset,value) \
- CGS_CALL(write_register,dev,offset,value)
-#define cgs_read_ind_register(dev,space,index) \
- CGS_CALL(read_ind_register,dev,space,index)
-#define cgs_write_ind_register(dev,space,index,value) \
- CGS_CALL(write_ind_register,dev,space,index,value)
+#define cgs_read_register(dev, offset) \
+ CGS_CALL(read_register, dev, offset)
+#define cgs_write_register(dev, offset, value) \
+ CGS_CALL(write_register, dev, offset, value)
+#define cgs_read_ind_register(dev, space, index) \
+ CGS_CALL(read_ind_register, dev, space, index)
+#define cgs_write_ind_register(dev, space, index, value) \
+ CGS_CALL(write_ind_register, dev, space, index, value)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
diff --git a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
index ce79e5de8ce3..1a73296a9a74 100644
--- a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
} __maybe_unused;
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h b/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h
index f84996a73de9..53cb4296df88 100644
--- a/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index 1d93a0c574c9..acd1cef61b7c 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -27,7 +27,7 @@
#define PP_MAX_CLOCK_LEVELS 16
-enum amd_pp_display_config_type{
+enum amd_pp_display_config_type {
AMD_PP_DisplayConfigType_None = 0,
AMD_PP_DisplayConfigType_DP54 ,
AMD_PP_DisplayConfigType_DP432 ,
@@ -36,8 +36,8 @@ enum amd_pp_display_config_type{
AMD_PP_DisplayConfigType_DP243,
AMD_PP_DisplayConfigType_DP216,
AMD_PP_DisplayConfigType_DP162,
- AMD_PP_DisplayConfigType_HDMI6G ,
- AMD_PP_DisplayConfigType_HDMI297 ,
+ AMD_PP_DisplayConfigType_HDMI6G,
+ AMD_PP_DisplayConfigType_HDMI297,
AMD_PP_DisplayConfigType_HDMI162,
AMD_PP_DisplayConfigType_LVDS,
AMD_PP_DisplayConfigType_DVI,
@@ -45,8 +45,7 @@ enum amd_pp_display_config_type{
AMD_PP_DisplayConfigType_VGA
};
-struct single_display_configuration
-{
+struct single_display_configuration {
uint32_t controller_index;
uint32_t controller_id;
uint32_t signal_type;
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index edcb85560ced..32054ecf0b87 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -244,8 +244,7 @@ enum pp_df_cstate {
* @PP_PWR_LIMIT_DEFAULT: Default Power Limit
* @PP_PWR_LIMIT_MAX: Maximum Power Limit
*/
-enum pp_power_limit_level
-{
+enum pp_power_limit_level {
PP_PWR_LIMIT_MIN = -1,
PP_PWR_LIMIT_CURRENT,
PP_PWR_LIMIT_DEFAULT,
@@ -260,8 +259,7 @@ enum pp_power_limit_level
* @PP_PWR_TYPE_FAST: manages the ~10 ms moving average of APU power,
* where supported.
*/
-enum pp_power_type
-{
+enum pp_power_type {
PP_PWR_TYPE_SUSTAINED,
PP_PWR_TYPE_FAST,
};
diff --git a/drivers/gpu/drm/amd/include/navi12_ip_offset.h b/drivers/gpu/drm/amd/include/navi12_ip_offset.h
index d8fc00478b6a..e94d80ec8d92 100644
--- a/drivers/gpu/drm/amd/include/navi12_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/navi12_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/navi14_ip_offset.h b/drivers/gpu/drm/amd/include/navi14_ip_offset.h
index c39ef651adc6..508011288dea 100644
--- a/drivers/gpu/drm/amd/include/navi14_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/navi14_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
index 5aac8d545bdc..2e8e6c9875f6 100644
--- a/drivers/gpu/drm/amd/include/pptable.h
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -491,7 +491,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[1];
+ UCHAR clockInfo[];
}ClockInfoArray;
typedef struct _NonClockInfoArray{
@@ -501,7 +501,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[];
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
@@ -658,7 +658,7 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
UCHAR numEntries;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[];
}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_SAMU_Table
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 7dff85c81e5a..fa023cfdf72d 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h b/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h
index b07bc2dd895d..054790470800 100644
--- a/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/v10_structs.h b/drivers/gpu/drm/amd/include/v10_structs.h
index c0e98a98a641..58002a83d1df 100644
--- a/drivers/gpu/drm/amd/include/v10_structs.h
+++ b/drivers/gpu/drm/amd/include/v10_structs.h
@@ -24,8 +24,7 @@
#ifndef V10_STRUCTS_H_
#define V10_STRUCTS_H_
-struct v10_gfx_mqd
-{
+struct v10_gfx_mqd {
uint32_t reserved_0; // offset: 0 (0x0)
uint32_t reserved_1; // offset: 1 (0x1)
uint32_t reserved_2; // offset: 2 (0x2)
diff --git a/drivers/gpu/drm/amd/include/vangogh_ip_offset.h b/drivers/gpu/drm/amd/include/vangogh_ip_offset.h
index 691073ed780e..695d7d04dfa6 100644
--- a/drivers/gpu/drm/amd/include/vangogh_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vangogh_ip_offset.h
@@ -28,13 +28,11 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/vega10_ip_offset.h b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
index 3a22a5d16919..1e1ca69f21f7 100644
--- a/drivers/gpu/drm/amd/include/vega10_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
@@ -24,13 +24,11 @@
#define MAX_INSTANCE 5
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
index 1deb68f3d334..92cf2d9e767f 100644
--- a/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
@@ -25,139 +25,137 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
-static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C20, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } },
+static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DCE_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } },
+static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE GC_BASE ={ { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } },
+static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } },
+static const struct IP_BASE SDMA1_BASE = { { { { 0x00001860, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA0_BASE ={ { { { 0x00001260, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA1_BASE ={ { { { 0x00001860, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UVD_BASE ={ { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } },
+static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } },
{ { 0, 0x00009000, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
/* Adjust VCE_BASE to make vce_4_1 use vce_4_0 offset header files*/
-static const struct IP_BASE VCE_BASE ={ { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE VCE_BASE = { { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE XDMA_BASE ={ { { { 0x00003400, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE RSMU_BASE ={ { { { 0x00012000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 6627ee07d52d..f84bfed50681 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -693,6 +693,21 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si
return ret;
}
+int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_send_rma_reason(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 3047ffe7f244..621200e0823f 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -450,6 +450,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable);
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size);
int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size);
+int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev);
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index f503e61faa60..b1b4c09c3467 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -226,7 +226,7 @@ int atomctrl_set_engine_dram_timings_rv770(
return amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
- (uint32_t *)&engine_clock_parameters);
+ (uint32_t *)&engine_clock_parameters, sizeof(engine_clock_parameters));
}
/*
@@ -297,7 +297,7 @@ int atomctrl_get_memory_pll_dividers_si(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
- (uint32_t *)&mpll_parameters);
+ (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
if (0 == result) {
mpll_param->mpll_fb_divider.clk_frac =
@@ -345,7 +345,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
- (uint32_t *)&mpll_parameters);
+ (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
if (!result)
mpll_param->mpll_post_divider =
@@ -366,7 +366,7 @@ int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
- (uint32_t *)&mpll_parameters);
+ (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
/* VEGAM's mpll takes sometime to finish computing */
udelay(10);
@@ -396,7 +396,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- (uint32_t *)&pll_parameters);
+ (uint32_t *)&pll_parameters, sizeof(pll_parameters));
if (0 == result) {
dividers->pll_post_divider = pll_parameters.ucPostDiv;
@@ -420,7 +420,7 @@ int atomctrl_get_engine_pll_dividers_vi(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- (uint32_t *)&pll_patameters);
+ (uint32_t *)&pll_patameters, sizeof(pll_patameters));
if (0 == result) {
dividers->pll_post_divider =
@@ -457,7 +457,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- (uint32_t *)&pll_patameters);
+ (uint32_t *)&pll_patameters, sizeof(pll_patameters));
if (0 == result) {
dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
@@ -490,7 +490,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- (uint32_t *)&pll_patameters);
+ (uint32_t *)&pll_patameters, sizeof(pll_patameters));
if (0 == result) {
dividers->pll_post_divider =
@@ -773,7 +773,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -794,7 +794,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -814,7 +814,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -835,7 +835,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -857,7 +857,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -878,7 +878,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -909,7 +909,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -1134,7 +1134,7 @@ int atomctrl_get_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
- (uint32_t *)&get_voltage_info_param_space);
+ (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
*voltage = result ? 0 :
le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
@@ -1179,7 +1179,7 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
- (uint32_t *)&get_voltage_info_param_space);
+ (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
if (0 != result)
return result;
@@ -1359,7 +1359,7 @@ int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&efuse_param);
+ (uint32_t *)&efuse_param, sizeof(efuse_param));
*efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
return result;
@@ -1380,7 +1380,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
- (uint32_t *)&memory_clock_parameters);
+ (uint32_t *)&memory_clock_parameters, sizeof(memory_clock_parameters));
return result;
}
@@ -1399,7 +1399,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
- (uint32_t *)&get_voltage_info_param_space);
+ (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
*voltage = result ? 0 :
le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
@@ -1526,7 +1526,7 @@ int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, SetVoltage),
- (uint32_t *)voltage_parameters);
+ (uint32_t *)voltage_parameters, sizeof(*voltage_parameters));
*virtual_voltage_id = voltage_parameters->usVoltageLevel;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
index a47a47238e2b..82d540334318 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
@@ -258,7 +258,7 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
idx = GetIndexIntoMasterCmdTable(computegpuclockparam);
if (amdgpu_atom_execute_table(
- adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters))
+ adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters, sizeof(pll_parameters)))
return -EINVAL;
pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *)
@@ -505,7 +505,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
ix = GetIndexIntoMasterCmdTable(getsmuclockinfo);
if (amdgpu_atom_execute_table(
- adev->mode_info.atom_context, ix, (uint32_t *)&parameters))
+ adev->mode_info.atom_context, ix, (uint32_t *)&parameters, sizeof(parameters)))
return -EINVAL;
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 0ad947df777a..eedb9a4f7e2d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -751,6 +751,7 @@ static int smu_early_init(void *handle)
static int smu_set_default_dpm_table(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
int vcn_gate, jpeg_gate;
@@ -759,25 +760,34 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
if (!smu->ppt_funcs->set_default_dpm_table)
return 0;
- vcn_gate = atomic_read(&power_gate->vcn_gated);
- jpeg_gate = atomic_read(&power_gate->jpeg_gated);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
+ vcn_gate = atomic_read(&power_gate->vcn_gated);
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
+ jpeg_gate = atomic_read(&power_gate->jpeg_gated);
- ret = smu_dpm_set_vcn_enable(smu, true);
- if (ret)
- return ret;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ ret = smu_dpm_set_vcn_enable(smu, true);
+ if (ret)
+ return ret;
+ }
- ret = smu_dpm_set_jpeg_enable(smu, true);
- if (ret)
- goto err_out;
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ ret = smu_dpm_set_jpeg_enable(smu, true);
+ if (ret)
+ goto err_out;
+ }
ret = smu->ppt_funcs->set_default_dpm_table(smu);
if (ret)
dev_err(smu->adev->dev,
"Failed to setup default dpm clock tables!\n");
- smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
+ smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
err_out:
- smu_dpm_set_vcn_enable(smu, !vcn_gate);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
+ smu_dpm_set_vcn_enable(smu, !vcn_gate);
+
return ret;
}
@@ -3669,3 +3679,13 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
return ret;
}
+
+int smu_send_rma_reason(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
+ ret = smu->ppt_funcs->send_rma_reason(smu);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 66e84defd0b6..a870bdd49a4e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1342,6 +1342,11 @@ struct pptable_funcs {
int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size);
/**
+ * @send_rma_reason: message rma reason event to SMU.
+ */
+ int (*send_rma_reason)(struct smu_context *smu);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1588,5 +1593,6 @@ int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
+int smu_send_rma_reason(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 509e3cd483fb..86758051cb93 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -91,7 +91,8 @@
#define PPSMC_MSG_QueryValidMcaCeCount 0x3A
#define PPSMC_MSG_McaBankCeDumpDW 0x3B
#define PPSMC_MSG_SelectPLPDMode 0x40
-#define PPSMC_Message_Count 0x41
+#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
+#define PPSMC_Message_Count 0x44
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 953a767613b1..a941fdbf78b6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -261,7 +261,8 @@
__SMU_DUMMY_MAP(SetSoftMaxVpe), \
__SMU_DUMMY_MAP(SetSoftMinVpe), \
__SMU_DUMMY_MAP(GetMetricsVersion), \
- __SMU_DUMMY_MAP(EnableUCLKShadow),
+ __SMU_DUMMY_MAP(EnableUCLKShadow), \
+ __SMU_DUMMY_MAP(RmaDueToBadPageThreshold),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index c7bfa68bf00f..f6545093bfc1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -514,7 +514,7 @@ static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
getsmuclockinfo);
ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
+ (uint32_t *)&input, sizeof(input));
if (ret)
return -EINVAL;
@@ -1432,24 +1432,24 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
- if (src_id == 0xfe) {
+ if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
/* ACK SMUToHost interrupt */
data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
switch (ctxid) {
- case 0x3:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
schedule_work(&smu->interrupt_work);
adev->pm.ac_power = true;
break;
- case 0x4:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
schedule_work(&smu->interrupt_work);
adev->pm.ac_power = false;
break;
- case 0x7:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
/*
* Increment the throttle interrupt counter
*/
@@ -1462,6 +1462,10 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
schedule_work(&smu->throttling_logging_work);
break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
+ break;
}
}
}
@@ -1504,7 +1508,7 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
- 0xfe,
+ SMU_IH_INTERRUPT_ID_TO_DRIVER,
irq_src);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 5e408a195860..ed15f5a0fd11 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -301,7 +301,7 @@ static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
getsmuclockinfo);
ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
+ (uint32_t *)&input, sizeof(input));
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index c486182ff275..48170bb5112e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1369,24 +1369,24 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
- if (src_id == 0xfe) {
+ if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
/* ACK SMUToHost interrupt */
data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
switch (ctxid) {
- case 0x3:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(smu);
adev->pm.ac_power = true;
break;
- case 0x4:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(smu);
adev->pm.ac_power = false;
break;
- case 0x7:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
/*
* Increment the throttle interrupt counter
*/
@@ -1399,7 +1399,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
schedule_work(&smu->throttling_logging_work);
break;
- case 0x8:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
high = smu->thermal_range.software_shutdown_temp +
smu->thermal_range.software_shutdown_temp_offset;
high = min_t(typeof(high),
@@ -1416,7 +1416,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
break;
- case 0x9:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
high = min_t(typeof(high),
SMU_THERMAL_MAXIMUM_ALERT_TEMP,
smu->thermal_range.software_shutdown_temp);
@@ -1429,6 +1429,10 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
+ break;
}
}
}
@@ -1473,7 +1477,7 @@ int smu_v13_0_register_irq_handler(struct smu_context *smu)
return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
- 0xfe,
+ SMU_IH_INTERRUPT_ID_TO_DRIVER,
irq_src);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 7e1941cf1796..45a84fd5dc04 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -45,6 +45,7 @@
#include <linux/pci.h>
#include "amdgpu_ras.h"
#include "amdgpu_mca.h"
+#include "amdgpu_aca.h"
#include "smu_cmn.h"
#include "mp/mp_13_0_6_offset.h"
#include "mp/mp_13_0_6_sh_mask.h"
@@ -171,6 +172,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0),
MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0),
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
+ MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
};
// clang-format on
@@ -1438,7 +1440,10 @@ static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
entry->src_data[1]);
schedule_work(&smu->throttling_logging_work);
}
-
+ break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
break;
}
}
@@ -1574,6 +1579,8 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
+ struct smu_13_0_dpm_table *uclk_table =
+ &dpm_context->dpm_tables.uclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
int ret;
@@ -1589,17 +1596,27 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
return 0;
case AMD_DPM_FORCED_LEVEL_AUTO:
- if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) &&
- (gfx_table->max == pstate_table->gfxclk_pstate.curr.max))
- return 0;
+ if ((gfx_table->min != pstate_table->gfxclk_pstate.curr.min) ||
+ (gfx_table->max != pstate_table->gfxclk_pstate.curr.max)) {
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
+ smu, gfx_table->min, gfx_table->max);
+ if (ret)
+ return ret;
- ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
- smu, gfx_table->min, gfx_table->max);
- if (ret)
- return ret;
+ pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ }
+
+ if (uclk_table->max != pstate_table->uclk_pstate.curr.max) {
+ /* Min UCLK is not expected to be changed */
+ ret = smu_v13_0_set_soft_freq_limited_range(
+ smu, SMU_UCLK, 0, uclk_table->max);
+ if (ret)
+ return ret;
+ pstate_table->uclk_pstate.curr.max = uclk_table->max;
+ }
+ pstate_table->uclk_pstate.custom.max = 0;
- pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
return 0;
@@ -1622,7 +1639,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
uint32_t max_clk;
int ret = 0;
- if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK &&
+ clk_type != SMU_UCLK)
return -EINVAL;
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
@@ -1632,18 +1650,31 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
if (min >= max) {
dev_err(smu->adev->dev,
- "Minimum GFX clk should be less than the maximum allowed clock\n");
+ "Minimum clk should be less than the maximum allowed clock\n");
return -EINVAL;
}
- if ((min == pstate_table->gfxclk_pstate.curr.min) &&
- (max == pstate_table->gfxclk_pstate.curr.max))
- return 0;
+ if (clk_type == SMU_GFXCLK) {
+ if ((min == pstate_table->gfxclk_pstate.curr.min) &&
+ (max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
- ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max);
- if (!ret) {
- pstate_table->gfxclk_pstate.curr.min = min;
- pstate_table->gfxclk_pstate.curr.max = max;
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
+ smu, min, max);
+ if (!ret) {
+ pstate_table->gfxclk_pstate.curr.min = min;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+ }
+
+ if (clk_type == SMU_UCLK) {
+ if (max == pstate_table->uclk_pstate.curr.max)
+ return 0;
+ /* Only max clock limiting is allowed for UCLK */
+ ret = smu_v13_0_set_soft_freq_limited_range(
+ smu, SMU_UCLK, 0, max);
+ if (!ret)
+ pstate_table->uclk_pstate.curr.max = max;
}
return ret;
@@ -1736,6 +1767,40 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu,
+ SMU_FEATURE_DPM_UCLK_BIT)) {
+ dev_warn(smu->adev->dev,
+ "UCLK_LIMITS setting not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (input[0] == 0) {
+ dev_info(smu->adev->dev,
+ "Setting min UCLK level is not supported");
+ return -EINVAL;
+ } else if (input[0] == 1) {
+ if (input[1] > dpm_context->dpm_tables.uclk_table.max) {
+ dev_warn(
+ smu->adev->dev,
+ "Maximum UCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1],
+ dpm_context->dpm_tables.uclk_table.max);
+ pstate_table->uclk_pstate.custom.max =
+ pstate_table->uclk_pstate.curr.max;
+ return -EINVAL;
+ }
+
+ pstate_table->uclk_pstate.custom.max = input[1];
+ }
+ break;
+
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size != 0) {
dev_err(smu->adev->dev,
@@ -1746,8 +1811,19 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return smu_v13_0_6_set_soft_freq_limited_range(
+ ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk);
+
+ if (ret)
+ return ret;
+
+ min_clk = dpm_context->dpm_tables.uclk_table.min;
+ max_clk = dpm_context->dpm_tables.uclk_table.max;
+ ret = smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_UCLK, min_clk, max_clk);
+ if (ret)
+ return ret;
+ pstate_table->uclk_pstate.custom.max = 0;
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1767,8 +1843,19 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
min_clk = pstate_table->gfxclk_pstate.custom.min;
max_clk = pstate_table->gfxclk_pstate.custom.max;
- return smu_v13_0_6_set_soft_freq_limited_range(
+ ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk);
+
+ if (ret)
+ return ret;
+
+ if (!pstate_table->uclk_pstate.custom.max)
+ return 0;
+
+ min_clk = pstate_table->uclk_pstate.curr.min;
+ max_clk = pstate_table->uclk_pstate.custom.max;
+ return smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_UCLK, min_clk, max_clk);
}
break;
default:
@@ -2376,6 +2463,24 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ /* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
+ if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00)
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "[%s] failed to send BadPageThreshold event to SMU\n",
+ __func__);
+
+ return ret;
+}
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -2547,18 +2652,22 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{
uint64_t status0;
+ uint32_t ext_error_code;
+ uint32_t odecc_err_cnt;
status0 = entry->regs[MCA_REG_IDX_STATUS];
+ ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status0);
+ odecc_err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
*count = 0;
return 0;
}
- if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0))
- *count = 1;
- else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0))
- *count = 1;
+ if (umc_v12_0_is_deferred_error(adev, status0) ||
+ umc_v12_0_is_uncorrectable_error(adev, status0) ||
+ umc_v12_0_is_correctable_error(adev, status0))
+ *count = (ext_error_code == 0) ? odecc_err_cnt : 1;
return 0;
}
@@ -2857,6 +2966,143 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = {
.mca_get_valid_mca_count = mca_smu_get_valid_mca_count,
};
+static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_v13_0_6_mca_set_debug_mode(smu, enable);
+}
+
+static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count)
+{
+ uint32_t msg;
+ int ret;
+
+ if (!count)
+ return -EINVAL;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ msg = SMU_MSG_QueryValidMcaCount;
+ break;
+ case ACA_ERROR_TYPE_CE:
+ msg = SMU_MSG_QueryValidMcaCeCount;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg(smu, msg, count);
+ if (ret) {
+ *count = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev,
+ enum aca_error_type type, u32 *count)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ case ACA_ERROR_TYPE_CE:
+ ret = smu_v13_0_6_get_valid_aca_count(smu, type, count);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+ int idx, int offset, u32 *val)
+{
+ uint32_t msg, param;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ msg = SMU_MSG_McaBankDumpDW;
+ break;
+ case ACA_ERROR_TYPE_CE:
+ msg = SMU_MSG_McaBankCeDumpDW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ param = ((idx & 0xffff) << 16) | (offset & 0xfffc);
+
+ return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val);
+}
+
+static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+ int idx, int offset, u32 *val, int count)
+{
+ int ret, i;
+
+ if (!val)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ ret = __smu_v13_0_6_aca_bank_dump(smu, type, idx, offset + (i << 2), &val[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type,
+ int idx, int reg_idx, u64 *val)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ u32 data[2] = {0, 0};
+ int ret;
+
+ if (!val || reg_idx >= ACA_REG_IDX_COUNT)
+ return -EINVAL;
+
+ ret = smu_v13_0_6_aca_bank_dump(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+
+ *val = (u64)data[1] << 32 | data[0];
+
+ dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n",
+ type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
+
+ return 0;
+}
+
+static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
+ enum aca_error_type type, int idx, struct aca_bank *bank)
+{
+ int i, ret, count;
+
+ count = min_t(int, 16, ARRAY_SIZE(bank->regs));
+ for (i = 0; i < count; i++) {
+ ret = aca_bank_read_reg(adev, type, idx, i, &bank->regs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
+ .max_ue_bank_count = 12,
+ .max_ce_bank_count = 12,
+ .set_debug_mode = aca_smu_set_debug_mode,
+ .get_valid_aca_count = aca_smu_get_valid_aca_count,
+ .get_valid_aca_bank = aca_smu_get_valid_aca_bank,
+};
+
static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
enum pp_xgmi_plpd_mode mode)
{
@@ -2895,13 +3141,6 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
return ret;
}
-static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu,
- void *table)
-{
- /* Support ecc info by default */
- return 0;
-}
-
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -2956,7 +3195,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
- .get_ecc_info = smu_v13_0_6_get_ecc_info,
+ .send_rma_reason = smu_v13_0_6_send_rma_reason,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
@@ -2969,4 +3208,5 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
+ amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 4894f7ee737b..2aa7e9945a0b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -892,7 +892,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu)
// TODO: THM related
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
- 0xfe,
+ SMU_IH_INTERRUPT_ID_TO_DRIVER,
irq_src);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 00cd615bbcdc..b8dbd4e25348 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -378,8 +378,15 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
res = __smu_cmn_reg2errno(smu, reg);
if (res != 0)
__smu_cmn_reg_print_error(smu, reg, index, param, msg);
- if (read_arg)
+ if (read_arg) {
smu_cmn_read_arg(smu, read_arg);
+ dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\
+ readval: 0x%08x\n",
+ smu_get_message_name(smu, msg), index, param, reg, *read_arg);
+ } else {
+ dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
+ smu_get_message_name(smu, msg), index, param, reg);
+ }
Out:
if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
amdgpu_device_halt(adev);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index cc590e27d88a..81bfce1406e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -30,6 +30,16 @@
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
+#define SMU_IH_INTERRUPT_ID_TO_DRIVER 0xFE
+#define SMU_IH_INTERRUPT_CONTEXT_ID_BACO 0x2
+#define SMU_IH_INTERRUPT_CONTEXT_ID_AC 0x3
+#define SMU_IH_INTERRUPT_CONTEXT_ID_DC 0x4
+#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
+#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
+#define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
+#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
+#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 8be235144f6d..b5518ff97165 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -604,10 +604,10 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
* ADV75xx helpers
*/
-static struct edid *adv7511_get_edid(struct adv7511 *adv7511,
- struct drm_connector *connector)
+static const struct drm_edid *adv7511_edid_read(struct adv7511 *adv7511,
+ struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
@@ -621,31 +621,44 @@ static struct edid *adv7511_get_edid(struct adv7511 *adv7511,
edid_i2c_addr);
}
- edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+ drm_edid = drm_edid_read_custom(connector, adv7511_get_edid_block, adv7511);
if (!adv7511->powered)
__adv7511_power_off(adv7511);
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
- drm_detect_hdmi_monitor(edid));
+ if (drm_edid) {
+ /*
+ * FIXME: The CEC physical address should be set using
+ * cec_s_phys_addr(adap,
+ * connector->display_info.source_physical_address, false) from
+ * a path that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
- cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
+ adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
+ drm_detect_hdmi_monitor(edid));
- return edid;
+ cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
+ } else {
+ cec_s_phys_addr_from_edid(adv7511->cec_adap, NULL);
+ }
+
+ return drm_edid;
}
static int adv7511_get_modes(struct adv7511 *adv7511,
struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
unsigned int count;
- edid = adv7511_get_edid(adv7511, connector);
+ drm_edid = adv7511_edid_read(adv7511, connector);
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
- kfree(edid);
+ drm_edid_free(drm_edid);
return count;
}
@@ -953,12 +966,12 @@ static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge
return adv7511_detect(adv, NULL);
}
-static struct edid *adv7511_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- return adv7511_get_edid(adv, connector);
+ return adv7511_edid_read(adv, connector);
}
static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge,
@@ -977,7 +990,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = {
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
- .get_edid = adv7511_bridge_get_edid,
+ .edid_read = adv7511_bridge_edid_read,
.hpd_notify = adv7511_bridge_hpd_notify,
};
@@ -1277,17 +1290,6 @@ static int adv7511_probe(struct i2c_client *i2c)
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
- if (i2c->irq) {
- init_waitqueue_head(&adv7511->wq);
-
- ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
- adv7511_irq_handler,
- IRQF_ONESHOT, dev_name(dev),
- adv7511);
- if (ret)
- goto err_unregister_cec;
- }
-
adv7511_power_off(adv7511);
i2c_set_clientdata(i2c, adv7511);
@@ -1311,6 +1313,17 @@ static int adv7511_probe(struct i2c_client *i2c)
adv7511_audio_init(dev, adv7511);
+ if (i2c->irq) {
+ init_waitqueue_head(&adv7511->wq);
+
+ ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+ adv7511_irq_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ adv7511);
+ if (ret)
+ goto err_unregister_audio;
+ }
+
if (adv7511->info->has_dsi) {
ret = adv7533_attach_dsi(adv7511);
if (ret)
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 29d91493b101..9d96d28d6fe8 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1784,24 +1784,14 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
return ret;
}
-static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
+static const struct drm_edid *anx7625_edid_read(struct anx7625_data *ctx)
{
struct device *dev = ctx->dev;
struct s_edid_data *p_edid = &ctx->slimport_edid_p;
int edid_num;
- u8 *edid;
- edid = kmalloc(FOUR_BLOCK_SIZE, GFP_KERNEL);
- if (!edid) {
- DRM_DEV_ERROR(dev, "Fail to allocate buffer\n");
- return NULL;
- }
-
- if (ctx->slimport_edid_p.edid_block_num > 0) {
- memcpy(edid, ctx->slimport_edid_p.edid_raw_data,
- FOUR_BLOCK_SIZE);
- return (struct edid *)edid;
- }
+ if (ctx->slimport_edid_p.edid_block_num > 0)
+ goto out;
pm_runtime_get_sync(dev);
_anx7625_hpd_polling(ctx, 5000 * 100);
@@ -1810,14 +1800,14 @@ static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
if (edid_num < 1) {
DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num);
- kfree(edid);
return NULL;
}
p_edid->edid_block_num = edid_num;
- memcpy(edid, ctx->slimport_edid_p.edid_raw_data, FOUR_BLOCK_SIZE);
- return (struct edid *)edid;
+out:
+ return drm_edid_alloc(ctx->slimport_edid_p.edid_raw_data,
+ FOUR_BLOCK_SIZE);
}
static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
@@ -2492,15 +2482,15 @@ anx7625_bridge_detect(struct drm_bridge *bridge)
return anx7625_sink_detect(ctx);
}
-static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n");
- return anx7625_get_edid(ctx);
+ return anx7625_edid_read(ctx);
}
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
@@ -2515,7 +2505,7 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.detect = anx7625_bridge_detect,
- .get_edid = anx7625_bridge_get_edid,
+ .edid_read = anx7625_bridge_edid_read,
};
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 7d470527455b..e226acc5c15e 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -1505,33 +1505,35 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
mhdp->link_up = false;
}
-static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
- struct drm_connector *connector)
+static const struct drm_edid *cdns_mhdp_edid_read(struct cdns_mhdp_device *mhdp,
+ struct drm_connector *connector)
{
if (!mhdp->plugged)
return NULL;
- return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
+ return drm_edid_read_custom(connector, cdns_mhdp_get_edid_block, mhdp);
}
static int cdns_mhdp_get_modes(struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int num_modes;
if (!mhdp->plugged)
return 0;
- edid = cdns_mhdp_get_edid(mhdp, connector);
- if (!edid) {
+ drm_edid = cdns_mhdp_edid_read(mhdp, connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ if (!drm_edid) {
dev_err(mhdp->dev, "Failed to read EDID\n");
return 0;
}
- drm_connector_update_edid_property(connector, edid);
- num_modes = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ num_modes = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
/*
* HACK: Warn about unsupported display formats until we deal
@@ -2220,12 +2222,12 @@ static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *brid
return cdns_mhdp_detect(mhdp);
}
-static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *cdns_mhdp_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
- return cdns_mhdp_get_edid(mhdp, connector);
+ return cdns_mhdp_edid_read(mhdp, connector);
}
static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
@@ -2239,7 +2241,7 @@ static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
.atomic_reset = cdns_mhdp_bridge_atomic_reset,
.atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts,
.detect = cdns_mhdp_bridge_detect,
- .get_edid = cdns_mhdp_bridge_get_edid,
+ .edid_read = cdns_mhdp_bridge_edid_read,
.hpd_enable = cdns_mhdp_bridge_hpd_enable,
.hpd_disable = cdns_mhdp_bridge_hpd_disable,
};
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index 483c28c7fc99..c83486cf6b15 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -230,14 +230,14 @@ static const struct drm_connector_funcs ch7033_connector_funcs = {
static int ch7033_connector_get_modes(struct drm_connector *connector)
{
struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
- edid = drm_bridge_get_edid(priv->next_bridge, connector);
- drm_connector_update_edid_property(connector, edid);
- if (edid) {
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = drm_bridge_edid_read(priv->next_bridge, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (drm_edid) {
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
} else {
ret = drm_add_modes_noedid(connector, 1920, 1080);
drm_set_preferred_mode(connector, 1024, 768);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 08bd5695ddae..ab8e00baf3f1 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -81,12 +81,12 @@ display_connector_detect(struct drm_bridge *bridge)
}
}
-static struct edid *display_connector_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *display_connector_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct display_connector *conn = to_display_connector(bridge);
- return drm_get_edid(connector, conn->bridge.ddc);
+ return drm_edid_read_ddc(connector, conn->bridge.ddc);
}
/*
@@ -172,7 +172,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs display_connector_bridge_funcs = {
.attach = display_connector_attach,
.detect = display_connector_detect,
- .get_edid = display_connector_get_edid,
+ .edid_read = display_connector_edid_read,
.atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts,
.atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
index 5a4f3d58501e..5965e8027529 100644
--- a/drivers/gpu/drm/bridge/imx/Kconfig
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -3,6 +3,24 @@ if ARCH_MXC || COMPILE_TEST
config DRM_IMX_LDB_HELPER
tristate
+config DRM_IMX8MP_DW_HDMI_BRIDGE
+ tristate "Freescale i.MX8MP HDMI-TX bridge support"
+ depends on OF
+ depends on COMMON_CLK
+ select DRM_DW_HDMI
+ select DRM_IMX8MP_HDMI_PVI
+ select PHY_FSL_SAMSUNG_HDMI_PHY
+ help
+ Choose this to enable support for the internal HDMI encoder found
+ on the i.MX8MP SoC.
+
+config DRM_IMX8MP_HDMI_PVI
+ tristate "Freescale i.MX8MP HDMI PVI bridge support"
+ depends on OF
+ help
+ Choose this to enable support for the internal HDMI TX Parallel
+ Video Interface found on the Freescale i.MX8MP SoC.
+
config DRM_IMX8QM_LDB
tristate "Freescale i.MX8QM LVDS display bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile
index 2b0c2e44aa1b..edb0a7b71b30 100644
--- a/drivers/gpu/drm/bridge/imx/Makefile
+++ b/drivers/gpu/drm/bridge/imx/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o
+obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o
+obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o
obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o
obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
obj-$(CONFIG_DRM_IMX8QXP_PIXEL_COMBINER) += imx8qxp-pixel-combiner.o
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
new file mode 100644
index 000000000000..f2a09c879e3d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright (C) 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define HTX_PVI_CTRL 0x0
+#define PVI_CTRL_OP_VSYNC_POL BIT(18)
+#define PVI_CTRL_OP_HSYNC_POL BIT(17)
+#define PVI_CTRL_OP_DE_POL BIT(16)
+#define PVI_CTRL_INP_VSYNC_POL BIT(14)
+#define PVI_CTRL_INP_HSYNC_POL BIT(13)
+#define PVI_CTRL_INP_DE_POL BIT(12)
+#define PVI_CTRL_MODE_MASK GENMASK(2, 1)
+#define PVI_CTRL_MODE_LCDIF 2
+#define PVI_CTRL_EN BIT(0)
+
+struct imx8mp_hdmi_pvi {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct drm_bridge *next_bridge;
+ void __iomem *regs;
+};
+
+static inline struct imx8mp_hdmi_pvi *
+to_imx8mp_hdmi_pvi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct imx8mp_hdmi_pvi, bridge);
+}
+
+static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, pvi->next_bridge,
+ bridge, flags);
+}
+
+static void imx8mp_hdmi_pvi_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
+ struct drm_connector_state *conn_state;
+ const struct drm_display_mode *mode;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ u32 bus_flags = 0, val;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ if (WARN_ON(pm_runtime_resume_and_get(pvi->dev)))
+ return;
+
+ mode = &crtc_state->adjusted_mode;
+
+ val = FIELD_PREP(PVI_CTRL_MODE_MASK, PVI_CTRL_MODE_LCDIF) | PVI_CTRL_EN;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= PVI_CTRL_OP_VSYNC_POL | PVI_CTRL_INP_VSYNC_POL;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= PVI_CTRL_OP_HSYNC_POL | PVI_CTRL_INP_HSYNC_POL;
+
+ if (pvi->next_bridge->timings)
+ bus_flags = pvi->next_bridge->timings->input_bus_flags;
+ else if (bridge_state)
+ bus_flags = bridge_state->input_bus_cfg.flags;
+
+ if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ val |= PVI_CTRL_OP_DE_POL | PVI_CTRL_INP_DE_POL;
+
+ writel(val, pvi->regs + HTX_PVI_CTRL);
+}
+
+static void imx8mp_hdmi_pvi_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
+
+ writel(0x0, pvi->regs + HTX_PVI_CTRL);
+
+ pm_runtime_put(pvi->dev);
+}
+
+static u32 *
+imx8mp_hdmi_pvi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
+ struct drm_bridge *next_bridge = pvi->next_bridge;
+ struct drm_bridge_state *next_state;
+
+ if (!next_bridge->funcs->atomic_get_input_bus_fmts)
+ return NULL;
+
+ next_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ next_bridge);
+
+ return next_bridge->funcs->atomic_get_input_bus_fmts(next_bridge,
+ next_state,
+ crtc_state,
+ conn_state,
+ output_fmt,
+ num_input_fmts);
+}
+
+static const struct drm_bridge_funcs imx_hdmi_pvi_bridge_funcs = {
+ .attach = imx8mp_hdmi_pvi_bridge_attach,
+ .atomic_enable = imx8mp_hdmi_pvi_bridge_enable,
+ .atomic_disable = imx8mp_hdmi_pvi_bridge_disable,
+ .atomic_get_input_bus_fmts = imx8mp_hdmi_pvi_bridge_get_input_bus_fmts,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
+{
+ struct device_node *remote;
+ struct imx8mp_hdmi_pvi *pvi;
+
+ pvi = devm_kzalloc(&pdev->dev, sizeof(*pvi), GFP_KERNEL);
+ if (!pvi)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pvi);
+ pvi->dev = &pdev->dev;
+
+ pvi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pvi->regs))
+ return PTR_ERR(pvi->regs);
+
+ /* Get the next bridge in the pipeline. */
+ remote = of_graph_get_remote_node(pdev->dev.of_node, 1, -1);
+ if (!remote)
+ return -EINVAL;
+
+ pvi->next_bridge = of_drm_find_bridge(remote);
+ of_node_put(remote);
+
+ if (!pvi->next_bridge)
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
+ "could not find next bridge\n");
+
+ pm_runtime_enable(&pdev->dev);
+
+ /* Register the bridge. */
+ pvi->bridge.funcs = &imx_hdmi_pvi_bridge_funcs;
+ pvi->bridge.of_node = pdev->dev.of_node;
+ pvi->bridge.timings = pvi->next_bridge->timings;
+
+ drm_bridge_add(&pvi->bridge);
+
+ return 0;
+}
+
+static int imx8mp_hdmi_pvi_remove(struct platform_device *pdev)
+{
+ struct imx8mp_hdmi_pvi *pvi = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&pvi->bridge);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id imx8mp_hdmi_pvi_match[] = {
+ {
+ .compatible = "fsl,imx8mp-hdmi-pvi",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pvi_match);
+
+static struct platform_driver imx8mp_hdmi_pvi_driver = {
+ .probe = imx8mp_hdmi_pvi_probe,
+ .remove = imx8mp_hdmi_pvi_remove,
+ .driver = {
+ .name = "imx-hdmi-pvi",
+ .of_match_table = imx8mp_hdmi_pvi_match,
+ },
+};
+module_platform_driver(imx8mp_hdmi_pvi_driver);
+
+MODULE_DESCRIPTION("i.MX8MP HDMI TX Parallel Video Interface bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
new file mode 100644
index 000000000000..89fc432ac611
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright (C) 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_modes.h>
+
+struct imx8mp_hdmi {
+ struct dw_hdmi_plat_data plat_data;
+ struct dw_hdmi *dw_hdmi;
+ struct clk *pixclk;
+};
+
+static enum drm_mode_status
+imx8mp_hdmi_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct imx8mp_hdmi *hdmi = (struct imx8mp_hdmi *)data;
+
+ if (mode->clock < 13500)
+ return MODE_CLOCK_LOW;
+
+ if (mode->clock > 297000)
+ return MODE_CLOCK_HIGH;
+
+ if (clk_round_rate(hdmi->pixclk, mode->clock * 1000) !=
+ mode->clock * 1000)
+ return MODE_CLOCK_RANGE;
+
+ /* We don't support double-clocked and Interlaced modes */
+ if ((mode->flags & DRM_MODE_FLAG_DBLCLK) ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static int imx8mp_hdmi_phy_init(struct dw_hdmi *dw_hdmi, void *data,
+ const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ return 0;
+}
+
+static void imx8mp_hdmi_phy_disable(struct dw_hdmi *dw_hdmi, void *data)
+{
+}
+
+static void im8mp_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data)
+{
+ /*
+ * Just release PHY core from reset, all other power management is done
+ * by the PHY driver.
+ */
+ dw_hdmi_phy_gen1_reset(hdmi);
+
+ dw_hdmi_phy_setup_hpd(hdmi, data);
+}
+
+static const struct dw_hdmi_phy_ops imx8mp_hdmi_phy_ops = {
+ .init = imx8mp_hdmi_phy_init,
+ .disable = imx8mp_hdmi_phy_disable,
+ .setup_hpd = im8mp_hdmi_phy_setup_hpd,
+ .read_hpd = dw_hdmi_phy_read_hpd,
+ .update_hpd = dw_hdmi_phy_update_hpd,
+};
+
+static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_hdmi_plat_data *plat_data;
+ struct imx8mp_hdmi *hdmi;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ plat_data = &hdmi->plat_data;
+
+ hdmi->pixclk = devm_clk_get(dev, "pix");
+ if (IS_ERR(hdmi->pixclk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->pixclk),
+ "Unable to get pixel clock\n");
+
+ plat_data->mode_valid = imx8mp_hdmi_mode_valid;
+ plat_data->phy_ops = &imx8mp_hdmi_phy_ops;
+ plat_data->phy_name = "SAMSUNG HDMI TX PHY";
+ plat_data->priv_data = hdmi;
+ plat_data->phy_force_vendor = true;
+
+ hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
+ if (IS_ERR(hdmi->dw_hdmi))
+ return PTR_ERR(hdmi->dw_hdmi);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
+}
+
+static int imx8mp_dw_hdmi_remove(struct platform_device *pdev)
+{
+ struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev);
+
+ dw_hdmi_remove(hdmi->dw_hdmi);
+
+ return 0;
+}
+
+static int __maybe_unused imx8mp_dw_hdmi_pm_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused imx8mp_dw_hdmi_pm_resume(struct device *dev)
+{
+ struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_resume(hdmi->dw_hdmi);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx8mp_dw_hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx8mp_dw_hdmi_pm_suspend,
+ imx8mp_dw_hdmi_pm_resume)
+};
+
+static const struct of_device_id imx8mp_dw_hdmi_of_table[] = {
+ { .compatible = "fsl,imx8mp-hdmi-tx" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_dw_hdmi_of_table);
+
+static struct platform_driver imx8mp_dw_hdmi_platform_driver = {
+ .probe = imx8mp_dw_hdmi_probe,
+ .remove = imx8mp_dw_hdmi_remove,
+ .driver = {
+ .name = "imx8mp-dw-hdmi-tx",
+ .of_match_table = imx8mp_dw_hdmi_of_table,
+ .pm = &imx8mp_dw_hdmi_pm_ops,
+ },
+};
+
+module_platform_driver(imx8mp_dw_hdmi_platform_driver);
+
+MODULE_DESCRIPTION("i.MX8MP HDMI encoder driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 2f300f5ca051..27334173e911 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -458,7 +458,7 @@ struct it6505 {
/* it6505 driver hold option */
bool enable_drv_hold;
- struct edid *cached_edid;
+ const struct drm_edid *cached_edid;
};
struct it6505_step_train_para {
@@ -2240,11 +2240,13 @@ static void it6505_link_training_work(struct work_struct *work)
ret = it6505_link_start_auto_train(it6505);
DRM_DEV_DEBUG_DRIVER(dev, "auto train %s, auto_train_retry: %d",
ret ? "pass" : "failed", it6505->auto_train_retry);
- it6505->auto_train_retry--;
if (ret) {
+ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
it6505_link_train_ok(it6505);
return;
+ } else {
+ it6505->auto_train_retry--;
}
it6505_dump(it6505);
@@ -2261,7 +2263,7 @@ static void it6505_plugged_status_to_codec(struct it6505 *it6505)
static void it6505_remove_edid(struct it6505 *it6505)
{
- kfree(it6505->cached_edid);
+ drm_edid_free(it6505->cached_edid);
it6505->cached_edid = NULL;
}
@@ -3032,15 +3034,16 @@ it6505_bridge_detect(struct drm_bridge *bridge)
return it6505_detect(it6505);
}
-static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *it6505_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
if (!it6505->cached_edid) {
- it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block,
- it6505);
+ it6505->cached_edid = drm_edid_read_custom(connector,
+ it6505_get_edid_block,
+ it6505);
if (!it6505->cached_edid) {
DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!");
@@ -3048,7 +3051,7 @@ static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
}
}
- return drm_edid_duplicate(it6505->cached_edid);
+ return drm_edid_dup(it6505->cached_edid);
}
static const struct drm_bridge_funcs it6505_bridge_funcs = {
@@ -3063,7 +3066,7 @@ static const struct drm_bridge_funcs it6505_bridge_funcs = {
.atomic_pre_enable = it6505_bridge_atomic_pre_enable,
.atomic_post_disable = it6505_bridge_atomic_post_disable,
.detect = it6505_bridge_detect,
- .get_edid = it6505_bridge_get_edid,
+ .edid_read = it6505_bridge_edid_read,
};
static __maybe_unused int it6505_bridge_resume(struct device *dev)
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 1cf3fb1f13dc..1c3433b5e366 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -874,33 +874,33 @@ static void it66121_bridge_hpd_disable(struct drm_bridge *bridge)
dev_err(ctx->dev, "failed to disable HPD IRQ\n");
}
-static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *it66121_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
mutex_lock(&ctx->lock);
ret = it66121_preamble_ddc(ctx);
if (ret) {
- edid = NULL;
+ drm_edid = NULL;
goto out_unlock;
}
ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
IT66121_DDC_HEADER_EDID);
if (ret) {
- edid = NULL;
+ drm_edid = NULL;
goto out_unlock;
}
- edid = drm_do_get_edid(connector, it66121_get_edid_block, ctx);
+ drm_edid = drm_edid_read_custom(connector, it66121_get_edid_block, ctx);
out_unlock:
mutex_unlock(&ctx->lock);
- return edid;
+ return drm_edid;
}
static const struct drm_bridge_funcs it66121_bridge_funcs = {
@@ -916,7 +916,7 @@ static const struct drm_bridge_funcs it66121_bridge_funcs = {
.mode_set = it66121_bridge_mode_set,
.mode_valid = it66121_bridge_mode_valid,
.detect = it66121_bridge_detect,
- .get_edid = it66121_bridge_get_edid,
+ .edid_read = it66121_bridge_edid_read,
.hpd_enable = it66121_bridge_hpd_enable,
.hpd_disable = it66121_bridge_hpd_disable,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 273157428c82..e7c4bef74aa4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -440,16 +440,16 @@ lt8912_connector_mode_valid(struct drm_connector *connector,
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret = -1;
int num = 0;
struct lt8912 *lt = connector_to_lt8912(connector);
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- edid = drm_bridge_get_edid(lt->hdmi_port, connector);
- if (edid) {
- drm_connector_update_edid_property(connector, edid);
- num = drm_add_edid_modes(connector, edid);
+ drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (drm_edid) {
+ num = drm_edid_connector_add_modes(connector);
} else {
return ret;
}
@@ -459,7 +459,7 @@ static int lt8912_connector_get_modes(struct drm_connector *connector)
if (ret)
num = ret;
- kfree(edid);
+ drm_edid_free(drm_edid);
return num;
}
@@ -620,8 +620,8 @@ lt8912_bridge_detect(struct drm_bridge *bridge)
return lt8912_check_cable_status(lt);
}
-static struct edid *lt8912_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *lt8912_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
@@ -630,7 +630,7 @@ static struct edid *lt8912_bridge_get_edid(struct drm_bridge *bridge,
* given to the hdmi connector node.
*/
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_EDID)
- return drm_bridge_get_edid(lt->hdmi_port, connector);
+ return drm_bridge_edid_read(lt->hdmi_port, connector);
dev_warn(lt->dev, "The connected bridge does not supports DRM_BRIDGE_OP_EDID\n");
return NULL;
@@ -642,7 +642,7 @@ static const struct drm_bridge_funcs lt8912_bridge_funcs = {
.mode_set = lt8912_bridge_mode_set,
.enable = lt8912_bridge_enable,
.detect = lt8912_bridge_detect,
- .get_edid = lt8912_bridge_get_edid,
+ .edid_read = lt8912_bridge_edid_read,
};
static int lt8912_bridge_resume(struct device *dev)
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 9663601ce098..a9c7e2b07ea1 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -18,6 +18,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -846,13 +847,13 @@ lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
lt9611_sleep_setup(lt9611);
}
-static struct edid *lt9611_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *lt9611_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
lt9611_power_on(lt9611);
- return drm_do_get_edid(connector, lt9611_get_edid_block, lt9611);
+ return drm_edid_read_custom(connector, lt9611_get_edid_block, lt9611);
}
static void lt9611_bridge_hpd_enable(struct drm_bridge *bridge)
@@ -892,7 +893,7 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.attach = lt9611_bridge_attach,
.mode_valid = lt9611_bridge_mode_valid,
.detect = lt9611_bridge_detect,
- .get_edid = lt9611_bridge_get_edid,
+ .edid_read = lt9611_bridge_edid_read,
.hpd_enable = lt9611_bridge_hpd_enable,
.atomic_pre_enable = lt9611_bridge_atomic_pre_enable,
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index e971b75e90ad..bcf8bccd86d6 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -294,12 +295,12 @@ static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
unsigned int count;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
- edid = drm_bridge_get_edid(&lt9611uxc->bridge, connector);
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = drm_bridge_edid_read(&lt9611uxc->bridge, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return count;
}
@@ -494,8 +495,8 @@ static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, siz
return 0;
};
-static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *lt9611uxc_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
int ret;
@@ -509,7 +510,7 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
return NULL;
}
- return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
+ return drm_edid_read_custom(connector, lt9611uxc_get_edid_block, lt9611uxc);
}
static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = {
@@ -517,7 +518,7 @@ static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = {
.mode_valid = lt9611uxc_bridge_mode_valid,
.mode_set = lt9611uxc_bridge_mode_set,
.detect = lt9611uxc_bridge_detect,
- .get_edid = lt9611uxc_bridge_get_edid,
+ .edid_read = lt9611uxc_bridge_edid_read,
};
static int lt9611uxc_parse_dt(struct device *dev,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index e93083bbec9d..4480523244e4 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -91,26 +91,26 @@ static int stdp2690_read_block(void *context, u8 *buf, unsigned int block, size_
return 0;
}
-static struct edid *ge_b850v3_lvds_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *ge_b850v3_lvds_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct i2c_client *client;
client = ge_b850v3_lvds_ptr->stdp2690_i2c;
- return drm_do_get_edid(connector, stdp2690_read_block, client);
+ return drm_edid_read_custom(connector, stdp2690_read_block, client);
}
static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int num_modes;
- edid = ge_b850v3_lvds_get_edid(&ge_b850v3_lvds_ptr->bridge, connector);
+ drm_edid = ge_b850v3_lvds_edid_read(&ge_b850v3_lvds_ptr->bridge, connector);
- drm_connector_update_edid_property(connector, edid);
- num_modes = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid_connector_update(connector, drm_edid);
+ num_modes = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return num_modes;
}
@@ -226,7 +226,7 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
static const struct drm_bridge_funcs ge_b850v3_lvds_funcs = {
.attach = ge_b850v3_lvds_attach,
.detect = ge_b850v3_lvds_bridge_detect,
- .get_edid = ge_b850v3_lvds_get_edid,
+ .edid_read = ge_b850v3_lvds_edid_read,
};
static int ge_b850v3_lvds_init(struct device *dev)
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 7c0076e49953..ed93fd4c3265 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -154,10 +154,11 @@ static void ptn3460_disable(struct drm_bridge *bridge)
}
-static struct edid *ptn3460_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *ptn3460_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
+ const struct drm_edid *drm_edid = NULL;
bool power_off;
u8 *edid;
int ret;
@@ -175,27 +176,28 @@ static struct edid *ptn3460_get_edid(struct drm_bridge *bridge,
EDID_LENGTH);
if (ret) {
kfree(edid);
- edid = NULL;
goto out;
}
+ drm_edid = drm_edid_alloc(edid, EDID_LENGTH);
+
out:
if (power_off)
ptn3460_disable(&ptn_bridge->bridge);
- return (struct edid *)edid;
+ return drm_edid;
}
static int ptn3460_connector_get_modes(struct drm_connector *connector)
{
struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int num_modes;
- edid = ptn3460_get_edid(&ptn_bridge->bridge, connector);
- drm_connector_update_edid_property(connector, edid);
- num_modes = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = ptn3460_edid_read(&ptn_bridge->bridge, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ num_modes = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return num_modes;
}
@@ -254,7 +256,7 @@ static const struct drm_bridge_funcs ptn3460_bridge_funcs = {
.pre_enable = ptn3460_pre_enable,
.disable = ptn3460_disable,
.attach = ptn3460_bridge_attach,
- .get_edid = ptn3460_get_edid,
+ .edid_read = ptn3460_edid_read,
};
static int ptn3460_probe(struct i2c_client *client)
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 63a1a0c88be4..95fedc68b0ae 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -96,6 +96,7 @@
#define DSIM_MFLUSH_VS BIT(29)
/* This flag is valid only for exynos3250/3472/5260/5430 */
#define DSIM_CLKLANE_STOP BIT(30)
+#define DSIM_NON_CONTINUOUS_CLKLANE BIT(31)
/* DSIM_ESCMODE */
#define DSIM_TX_TRIGGER_RST BIT(4)
@@ -945,8 +946,12 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
* power consumption.
*/
if (driver_data->has_clklane_stop &&
- dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+ if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
+ reg |= DSIM_NON_CONTINUOUS_CLKLANE;
+
reg |= DSIM_CLKLANE_STOP;
+ }
samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg);
lanes_mask = BIT(dsi->lanes) - 1;
@@ -1498,6 +1503,7 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
+ samsung_dsim_set_display_enable(dsi, false);
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
}
@@ -1506,8 +1512,6 @@ static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge,
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- samsung_dsim_set_display_enable(dsi, false);
-
dsi->state &= ~DSIM_STATE_ENABLED;
pm_runtime_put_sync(dsi->dev);
}
@@ -1992,11 +1996,11 @@ int samsung_dsim_probe(struct platform_device *pdev)
else
dsi->bridge.timings = &samsung_dsim_bridge_timings_de_high;
- if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->register_host)
+ if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->register_host) {
ret = dsi->plat_data->host_ops->register_host(dsi);
-
- if (ret)
- goto err_disable_runtime;
+ if (ret)
+ goto err_disable_runtime;
+ }
return 0;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 4560ae9cbce1..8f84e98249c7 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -278,39 +278,35 @@ static const struct drm_connector_funcs sii902x_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static struct edid *sii902x_get_edid(struct sii902x *sii902x,
- struct drm_connector *connector)
+static const struct drm_edid *sii902x_edid_read(struct sii902x *sii902x,
+ struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
mutex_lock(&sii902x->mutex);
- edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
- if (edid) {
- if (drm_detect_hdmi_monitor(edid))
- sii902x->sink_is_hdmi = true;
- else
- sii902x->sink_is_hdmi = false;
- }
+ drm_edid = drm_edid_read_ddc(connector, sii902x->i2cmux->adapter[0]);
mutex_unlock(&sii902x->mutex);
- return edid;
+ return drm_edid;
}
static int sii902x_get_modes(struct drm_connector *connector)
{
struct sii902x *sii902x = connector_to_sii902x(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int num = 0;
- edid = sii902x_get_edid(sii902x, connector);
- drm_connector_update_edid_property(connector, edid);
- if (edid) {
- num = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = sii902x_edid_read(sii902x, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (drm_edid) {
+ num = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
}
+ sii902x->sink_is_hdmi = connector->display_info.is_hdmi;
+
return num;
}
@@ -465,12 +461,12 @@ static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge
return sii902x_detect(sii902x);
}
-static struct edid *sii902x_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *sii902x_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
- return sii902x_get_edid(sii902x, connector);
+ return sii902x_edid_read(sii902x, connector);
}
static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
@@ -514,7 +510,7 @@ static const struct drm_bridge_funcs sii902x_bridge_funcs = {
.disable = sii902x_bridge_disable,
.enable = sii902x_bridge_enable,
.detect = sii902x_bridge_detect,
- .get_edid = sii902x_bridge_get_edid,
+ .edid_read = sii902x_bridge_edid_read,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index cbe8e778d7c7..5813a2c4fc5e 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -51,18 +51,20 @@ drm_connector_to_simple_bridge(struct drm_connector *connector)
static int simple_bridge_get_modes(struct drm_connector *connector)
{
struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
if (sbridge->next_bridge->ops & DRM_BRIDGE_OP_EDID) {
- edid = drm_bridge_get_edid(sbridge->next_bridge, connector);
- if (!edid)
+ drm_edid = drm_bridge_edid_read(sbridge->next_bridge, connector);
+ if (!drm_edid)
DRM_INFO("EDID read failed. Fallback to standard modes\n");
} else {
- edid = NULL;
+ drm_edid = NULL;
}
- if (!edid) {
+ drm_edid_connector_update(connector, drm_edid);
+
+ if (!drm_edid) {
/*
* In case we cannot retrieve the EDIDs (missing or broken DDC
* bus from the next bridge), fallback on the XGA standards and
@@ -73,9 +75,8 @@ static int simple_bridge_get_modes(struct drm_connector *connector)
return ret;
}
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index aca5bb0866f8..cceb5aab6c83 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -31,6 +31,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -2453,27 +2454,35 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
return result;
}
-static struct edid *dw_hdmi_get_edid(struct dw_hdmi *hdmi,
- struct drm_connector *connector)
+static const struct drm_edid *dw_hdmi_edid_read(struct dw_hdmi *hdmi,
+ struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
if (!hdmi->ddc)
return NULL;
- edid = drm_get_edid(connector, hdmi->ddc);
- if (!edid) {
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
+ if (!drm_edid) {
dev_dbg(hdmi->dev, "failed to get edid\n");
return NULL;
}
+ /*
+ * FIXME: This should use connector->display_info.is_hdmi and
+ * connector->display_info.has_audio from a path that has read the EDID
+ * and called drm_edid_connector_update().
+ */
+ edid = drm_edid_raw(drm_edid);
+
dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
edid->width_cm, edid->height_cm);
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- return edid;
+ return drm_edid;
}
/* -----------------------------------------------------------------------------
@@ -2492,17 +2501,16 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
- edid = dw_hdmi_get_edid(hdmi, connector);
- if (!edid)
- return 0;
+ drm_edid = dw_hdmi_edid_read(hdmi, connector);
- drm_connector_update_edid_property(connector, edid);
- cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid_connector_update(connector, drm_edid);
+ cec_notifier_set_phys_addr(hdmi->cec_notifier,
+ connector->display_info.source_physical_address);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
@@ -2979,12 +2987,12 @@ static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge
return dw_hdmi_detect(hdmi);
}
-static struct edid *dw_hdmi_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *dw_hdmi_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct dw_hdmi *hdmi = bridge->driver_private;
- return dw_hdmi_get_edid(hdmi, connector);
+ return dw_hdmi_edid_read(hdmi, connector);
}
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
@@ -3001,7 +3009,7 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.mode_set = dw_hdmi_bridge_mode_set,
.mode_valid = dw_hdmi_bridge_mode_valid,
.detect = dw_hdmi_bridge_detect,
- .get_edid = dw_hdmi_bridge_get_edid,
+ .edid_read = dw_hdmi_bridge_edid_read,
};
/* -----------------------------------------------------------------------------
@@ -3541,6 +3549,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.interlace_allowed = true;
hdmi->bridge.ddc = hdmi->ddc;
hdmi->bridge.of_node = pdev->dev.of_node;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 615cc8f950d7..166f9a3e9622 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -41,8 +41,24 @@
/* Registers */
+/* DSI D-PHY Layer registers */
+#define D0W_DPHYCONTTX 0x0004
+#define CLW_DPHYCONTTX 0x0020
+#define D0W_DPHYCONTRX 0x0024
+#define D1W_DPHYCONTRX 0x0028
+#define D2W_DPHYCONTRX 0x002c
+#define D3W_DPHYCONTRX 0x0030
+#define COM_DPHYCONTRX 0x0038
+#define CLW_CNTRL 0x0040
+#define D0W_CNTRL 0x0044
+#define D1W_CNTRL 0x0048
+#define D2W_CNTRL 0x004c
+#define D3W_CNTRL 0x0050
+#define TESTMODE_CNTRL 0x0054
+
/* PPI layer registers */
#define PPI_STARTPPI 0x0104 /* START control bit */
+#define PPI_BUSYPPI 0x0108 /* PPI busy status */
#define PPI_LPTXTIMECNT 0x0114 /* LPTX timing signal */
#define LPX_PERIOD 3
#define PPI_LANEENABLE 0x0134
@@ -59,6 +75,7 @@
/* DSI layer registers */
#define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX */
+#define DSI_BUSYDSI 0x0208 /* DSI busy status */
#define DSI_LANEENABLE 0x0210 /* Enables each lane */
#define DSI_RX_START BIT(0)
@@ -69,6 +86,20 @@
#define LANEENABLE_L2EN BIT(1)
#define LANEENABLE_L3EN BIT(2)
+#define DSI_LANESTATUS0 0x0214 /* DSI lane status 0 */
+#define DSI_LANESTATUS1 0x0218 /* DSI lane status 1 */
+#define DSI_INTSTATUS 0x0220 /* Interrupt Status */
+#define DSI_INTMASK 0x0224 /* Interrupt Mask */
+#define DSI_INTCLR 0x0228 /* Interrupt Clear */
+#define DSI_LPTXTO 0x0230 /* LPTX Time Out Counter */
+
+/* DSI General Registers */
+#define DSIERRCNT 0x0300 /* DSI Error Count Register */
+
+/* DSI Application Layer Registers */
+#define APLCTRL 0x0400 /* Application layer Control Register */
+#define RDPKTLN 0x0404 /* DSI Read packet Length Register */
+
/* Display Parallel Input Interface */
#define DPIPXLFMT 0x0440
#define VS_POL_ACTIVE_LOW (1 << 10)
@@ -114,35 +145,39 @@
#define VFUEN BIT(0) /* Video Frame Timing Upload */
/* System */
-#define TC_IDREG 0x0500
-#define SYSSTAT 0x0508
-#define SYSCTRL 0x0510
-#define DP0_AUDSRC_NO_INPUT (0 << 3)
-#define DP0_AUDSRC_I2S_RX (1 << 3)
-#define DP0_VIDSRC_NO_INPUT (0 << 0)
-#define DP0_VIDSRC_DSI_RX (1 << 0)
-#define DP0_VIDSRC_DPI_RX (2 << 0)
-#define DP0_VIDSRC_COLOR_BAR (3 << 0)
-#define SYSRSTENB 0x050c
+#define TC_IDREG 0x0500 /* Chip ID and Revision ID */
+#define SYSBOOT 0x0504 /* System BootStrap Status Register */
+#define SYSSTAT 0x0508 /* System Status Register */
+#define SYSRSTENB 0x050c /* System Reset/Enable Register */
#define ENBI2C (1 << 0)
#define ENBLCD0 (1 << 2)
#define ENBBM (1 << 3)
#define ENBDSIRX (1 << 4)
#define ENBREG (1 << 5)
#define ENBHDCP (1 << 8)
-#define GPIOM 0x0540
-#define GPIOC 0x0544
-#define GPIOO 0x0548
-#define GPIOI 0x054c
-#define INTCTL_G 0x0560
-#define INTSTS_G 0x0564
+#define SYSCTRL 0x0510 /* System Control Register */
+#define DP0_AUDSRC_NO_INPUT (0 << 3)
+#define DP0_AUDSRC_I2S_RX (1 << 3)
+#define DP0_VIDSRC_NO_INPUT (0 << 0)
+#define DP0_VIDSRC_DSI_RX (1 << 0)
+#define DP0_VIDSRC_DPI_RX (2 << 0)
+#define DP0_VIDSRC_COLOR_BAR (3 << 0)
+#define GPIOM 0x0540 /* GPIO Mode Control Register */
+#define GPIOC 0x0544 /* GPIO Direction Control Register */
+#define GPIOO 0x0548 /* GPIO Output Register */
+#define GPIOI 0x054c /* GPIO Input Register */
+#define INTCTL_G 0x0560 /* General Interrupts Control Register */
+#define INTSTS_G 0x0564 /* General Interrupts Status Register */
#define INT_SYSERR BIT(16)
#define INT_GPIO_H(x) (1 << (x == 0 ? 2 : 10))
#define INT_GPIO_LC(x) (1 << (x == 0 ? 3 : 11))
-#define INT_GP0_LCNT 0x0584
-#define INT_GP1_LCNT 0x0588
+#define TEST_INT_C 0x0570 /* Test Interrupts Control Register */
+#define TEST_INT_S 0x0574 /* Test Interrupts Status Register */
+
+#define INT_GP0_LCNT 0x0584 /* Interrupt GPIO0 Low Count Value Register */
+#define INT_GP1_LCNT 0x0588 /* Interrupt GPIO1 Low Count Value Register */
/* Control */
#define DP0CTL 0x0600
@@ -152,9 +187,12 @@
#define DP_EN BIT(0) /* Enable DPTX function */
/* Clocks */
-#define DP0_VIDMNGEN0 0x0610
-#define DP0_VIDMNGEN1 0x0614
-#define DP0_VMNGENSTATUS 0x0618
+#define DP0_VIDMNGEN0 0x0610 /* DP0 Video Force M Value Register */
+#define DP0_VIDMNGEN1 0x0614 /* DP0 Video Force N Value Register */
+#define DP0_VMNGENSTATUS 0x0618 /* DP0 Video Current M Value Register */
+#define DP0_AUDMNGEN0 0x0628 /* DP0 Audio Force M Value Register */
+#define DP0_AUDMNGEN1 0x062c /* DP0 Audio Force N Value Register */
+#define DP0_AMNGENSTATUS 0x0630 /* DP0 Audio Current M Value Register */
/* Main Channel */
#define DP0_SECSAMPLE 0x0640
@@ -224,8 +262,22 @@
#define DP0_SNKLTCHGREQ 0x06d4
#define DP0_LTLOOPCTRL 0x06d8
#define DP0_SNKLTCTRL 0x06e4
-
-#define DP1_SRCCTRL 0x07a0
+#define DP0_TPATDAT0 0x06e8 /* DP0 Test Pattern bits 29 to 0 */
+#define DP0_TPATDAT1 0x06ec /* DP0 Test Pattern bits 59 to 30 */
+#define DP0_TPATDAT2 0x06f0 /* DP0 Test Pattern bits 89 to 60 */
+#define DP0_TPATDAT3 0x06f4 /* DP0 Test Pattern bits 119 to 90 */
+
+#define AUDCFG0 0x0700 /* DP0 Audio Config0 Register */
+#define AUDCFG1 0x0704 /* DP0 Audio Config1 Register */
+#define AUDIFDATA0 0x0708 /* DP0 Audio Info Frame Bytes 3 to 0 */
+#define AUDIFDATA1 0x070c /* DP0 Audio Info Frame Bytes 7 to 4 */
+#define AUDIFDATA2 0x0710 /* DP0 Audio Info Frame Bytes 11 to 8 */
+#define AUDIFDATA3 0x0714 /* DP0 Audio Info Frame Bytes 15 to 12 */
+#define AUDIFDATA4 0x0718 /* DP0 Audio Info Frame Bytes 19 to 16 */
+#define AUDIFDATA5 0x071c /* DP0 Audio Info Frame Bytes 23 to 20 */
+#define AUDIFDATA6 0x0720 /* DP0 Audio Info Frame Bytes 27 to 24 */
+
+#define DP1_SRCCTRL 0x07a0 /* DP1 Control Register */
/* PHY */
#define DP_PHY_CTRL 0x0800
@@ -238,6 +290,25 @@
#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
+#define DP_PHY_CFG_WR 0x0810 /* DP PHY Configuration Test Write Register */
+#define DP_PHY_CFG_RD 0x0814 /* DP PHY Configuration Test Read Register */
+#define DP0_AUX_PHY_CTRL 0x0820 /* DP0 AUX PHY Control Register */
+#define DP0_MAIN_PHY_DBG 0x0840 /* DP0 Main PHY Test Debug Register */
+
+/* I2S */
+#define I2SCFG 0x0880 /* I2S Audio Config 0 Register */
+#define I2SCH0STAT0 0x0888 /* I2S Audio Channel 0 Status Bytes 3 to 0 */
+#define I2SCH0STAT1 0x088c /* I2S Audio Channel 0 Status Bytes 7 to 4 */
+#define I2SCH0STAT2 0x0890 /* I2S Audio Channel 0 Status Bytes 11 to 8 */
+#define I2SCH0STAT3 0x0894 /* I2S Audio Channel 0 Status Bytes 15 to 12 */
+#define I2SCH0STAT4 0x0898 /* I2S Audio Channel 0 Status Bytes 19 to 16 */
+#define I2SCH0STAT5 0x089c /* I2S Audio Channel 0 Status Bytes 23 to 20 */
+#define I2SCH1STAT0 0x08a0 /* I2S Audio Channel 1 Status Bytes 3 to 0 */
+#define I2SCH1STAT1 0x08a4 /* I2S Audio Channel 1 Status Bytes 7 to 4 */
+#define I2SCH1STAT2 0x08a8 /* I2S Audio Channel 1 Status Bytes 11 to 8 */
+#define I2SCH1STAT3 0x08ac /* I2S Audio Channel 1 Status Bytes 15 to 12 */
+#define I2SCH1STAT4 0x08b0 /* I2S Audio Channel 1 Status Bytes 19 to 16 */
+#define I2SCH1STAT5 0x08b4 /* I2S Audio Channel 1 Status Bytes 23 to 20 */
/* PLL */
#define DP0_PLLCTRL 0x0900
@@ -546,9 +617,14 @@ static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
continue;
for (i_post = 0; i_post < ARRAY_SIZE(ext_div); i_post++) {
for (div = 1; div <= 16; div++) {
- u32 clk;
+ u32 clk, iclk;
u64 tmp;
+ /* PCLK PLL input unit clock ... 6..40 MHz */
+ iclk = refclk / (div * ext_div[i_pre]);
+ if (iclk < 6000000 || iclk > 40000000)
+ continue;
+
tmp = pixelclock * ext_div[i_pre] *
ext_div[i_post] * div;
do_div(tmp, refclk);
@@ -1575,19 +1651,19 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge,
drm_mode_copy(&tc->mode, mode);
}
-static struct edid *tc_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *tc_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct tc_data *tc = bridge_to_tc(bridge);
- return drm_get_edid(connector, &tc->aux.ddc);
+ return drm_edid_read_ddc(connector, &tc->aux.ddc);
}
static int tc_connector_get_modes(struct drm_connector *connector)
{
struct tc_data *tc = connector_to_tc(connector);
int num_modes;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
ret = tc_get_display_props(tc);
@@ -1602,9 +1678,10 @@ static int tc_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
- edid = tc_get_edid(&tc->bridge, connector);
- num_modes = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = tc_edid_read(&tc->bridge, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ num_modes = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return num_modes;
}
@@ -1773,7 +1850,7 @@ static const struct drm_bridge_funcs tc_edp_bridge_funcs = {
.atomic_enable = tc_edp_bridge_atomic_enable,
.atomic_disable = tc_edp_bridge_atomic_disable,
.detect = tc_bridge_detect,
- .get_edid = tc_get_edid,
+ .edid_read = tc_edid_read,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
@@ -1833,16 +1910,16 @@ static bool tc_readable_reg(struct device *dev, unsigned int reg)
case 0x1f4:
/* DSI Protocol Layer */
case DSI_STARTDSI:
- case 0x208:
+ case DSI_BUSYDSI:
case DSI_LANEENABLE:
- case 0x214:
- case 0x218:
- case 0x220:
+ case DSI_LANESTATUS0:
+ case DSI_LANESTATUS1:
+ case DSI_INTSTATUS:
case 0x224:
case 0x228:
case 0x230:
/* DSI General */
- case 0x300:
+ case DSIERRCNT:
/* DSI Application Layer */
case 0x400:
case 0x404:
@@ -1978,13 +2055,20 @@ static bool tc_readable_reg(struct device *dev, unsigned int reg)
}
static const struct regmap_range tc_volatile_ranges[] = {
+ regmap_reg_range(PPI_BUSYPPI, PPI_BUSYPPI),
+ regmap_reg_range(DSI_BUSYDSI, DSI_BUSYDSI),
+ regmap_reg_range(DSI_LANESTATUS0, DSI_INTSTATUS),
+ regmap_reg_range(DSIERRCNT, DSIERRCNT),
+ regmap_reg_range(VFUEN0, VFUEN0),
+ regmap_reg_range(SYSSTAT, SYSSTAT),
+ regmap_reg_range(GPIOI, GPIOI),
+ regmap_reg_range(INTSTS_G, INTSTS_G),
+ regmap_reg_range(DP0_VMNGENSTATUS, DP0_VMNGENSTATUS),
+ regmap_reg_range(DP0_AMNGENSTATUS, DP0_AMNGENSTATUS),
regmap_reg_range(DP0_AUXWDATA(0), DP0_AUXSTATUS),
regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL),
regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL),
- regmap_reg_range(VFUEN0, VFUEN0),
- regmap_reg_range(INTSTS_G, INTSTS_G),
- regmap_reg_range(GPIOI, GPIOI),
};
static const struct regmap_access_table tc_volatile_table = {
@@ -1992,12 +2076,28 @@ static const struct regmap_access_table tc_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(tc_volatile_ranges),
};
-static bool tc_writeable_reg(struct device *dev, unsigned int reg)
-{
- return (reg != TC_IDREG) &&
- (reg != DP0_LTSTAT) &&
- (reg != DP0_SNKLTCHGREQ);
-}
+static const struct regmap_range tc_precious_ranges[] = {
+ regmap_reg_range(SYSSTAT, SYSSTAT),
+};
+
+static const struct regmap_access_table tc_precious_table = {
+ .yes_ranges = tc_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tc_precious_ranges),
+};
+
+static const struct regmap_range tc_non_writeable_ranges[] = {
+ regmap_reg_range(PPI_BUSYPPI, PPI_BUSYPPI),
+ regmap_reg_range(DSI_BUSYDSI, DSI_BUSYDSI),
+ regmap_reg_range(DSI_LANESTATUS0, DSI_INTSTATUS),
+ regmap_reg_range(TC_IDREG, SYSSTAT),
+ regmap_reg_range(GPIOI, GPIOI),
+ regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
+};
+
+static const struct regmap_access_table tc_writeable_table = {
+ .no_ranges = tc_non_writeable_ranges,
+ .n_no_ranges = ARRAY_SIZE(tc_non_writeable_ranges),
+};
static const struct regmap_config tc_regmap_config = {
.name = "tc358767",
@@ -2008,7 +2108,8 @@ static const struct regmap_config tc_regmap_config = {
.cache_type = REGCACHE_MAPLE,
.readable_reg = tc_readable_reg,
.volatile_table = &tc_volatile_table,
- .writeable_reg = tc_writeable_reg,
+ .precious_table = &tc_precious_table,
+ .wr_table = &tc_writeable_table,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 62cc3893dca5..61dc6f063fb4 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1207,19 +1207,19 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
: connector_status_disconnected;
}
-static struct edid *ti_sn_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
- return drm_get_edid(connector, &pdata->aux.ddc);
+ return drm_edid_read_ddc(connector, &pdata->aux.ddc);
}
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
.mode_valid = ti_sn_bridge_mode_valid,
- .get_edid = ti_sn_bridge_get_edid,
+ .edid_read = ti_sn_bridge_edid_read,
.detect = ti_sn_bridge_detect,
.atomic_pre_enable = ti_sn_bridge_atomic_pre_enable,
.atomic_enable = ti_sn_bridge_atomic_enable,
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 28848a8eb42e..c7bef5c23927 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -50,18 +50,20 @@ drm_connector_to_tfp410(struct drm_connector *connector)
static int tfp410_get_modes(struct drm_connector *connector)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
if (dvi->next_bridge->ops & DRM_BRIDGE_OP_EDID) {
- edid = drm_bridge_get_edid(dvi->next_bridge, connector);
- if (!edid)
+ drm_edid = drm_bridge_edid_read(dvi->next_bridge, connector);
+ if (!drm_edid)
DRM_INFO("EDID read failed. Fallback to standard modes\n");
} else {
- edid = NULL;
+ drm_edid = NULL;
}
- if (!edid) {
+ drm_edid_connector_update(connector, drm_edid);
+
+ if (!drm_edid) {
/*
* No EDID, fallback on the XGA standard modes and prefer a mode
* pretty much anything can handle.
@@ -71,11 +73,9 @@ static int tfp410_get_modes(struct drm_connector *connector)
return ret;
}
- drm_connector_update_edid_property(connector, edid);
-
- ret = drm_add_edid_modes(connector, edid);
+ ret = drm_edid_connector_add_modes(connector);
- kfree(edid);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index f73f3471e94e..106f2d40d222 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -26,6 +26,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dtb"
elif [[ "$KERNEL_ARCH" = "arm" ]]; then
GCC_ARCH="arm-linux-gnueabihf"
DEBIAN_ARCH="armhf"
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index dac92cc2777c..084e3ff8e3f4 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,6 +1,6 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha edfbf74df1d4d6ce54ffe24566108be0e1a98c3d
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 9d162de9a05155e1c4041857a5848842749164cf
UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
TARGET_BRANCH: drm-next
@@ -25,7 +25,9 @@ variables:
# per-job artifact storage on MinIO
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
# default kernel for rootfs before injecting the current kernel tree
- KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/gfx-ci/linux/v6.4.12-for-mesa-ci-f6b4ad45f48d
+ KERNEL_REPO: "gfx-ci/linux"
+ KERNEL_TAG: "v6.6.4-for-mesa-ci-e4f4c500f7fb"
+ KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
LAVA_TAGS: subset-1-gfx
LAVA_JOB_PRIORITY: 30
@@ -133,6 +135,11 @@ stages:
- if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
when: on_success
+.never-post-merge-rules:
+ rules:
+ - if: *is-post-merge
+ when: never
+
# Rule to filter for only scheduled pipelines.
.scheduled_pipeline-rules:
rules:
@@ -150,6 +157,7 @@ stages:
.build-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.never-post-merge-rules, rules]
# Run automatically once all dependency jobs have passed
- when: on_success
@@ -157,6 +165,7 @@ stages:
.container+build-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.never-post-merge-rules, rules]
- when: manual
.ci-deqp-artifacts:
@@ -175,6 +184,7 @@ stages:
.container-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.never-post-merge-rules, rules]
# Run pipeline by default in the main project if any CI pipeline
# configuration files were changed, to ensure docker images are up to date
- if: *is-post-merge
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 2c9a1838e728..0857773e5c5f 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -82,20 +82,35 @@
tags:
- $RUNNER_TAG
-msm:sc7180:
+.msm-sc7180:
extends:
- .lava-igt:arm64
stage: msm
- parallel: 4
variables:
DRIVER_NAME: msm
- DEVICE_TYPE: sc7180-trogdor-lazor-limozeen
- DTB: sc7180-trogdor-lazor-limozeen-nots-r5
BOOT_METHOD: depthcharge
KERNEL_IMAGE_TYPE: ""
- GPU_VERSION: sc7180
+
+msm:sc7180-trogdor-lazor-limozeen:
+ extends:
+ - .msm-sc7180
+ parallel: 4
+ variables:
+ DEVICE_TYPE: sc7180-trogdor-lazor-limozeen
+ DTB: sc7180-trogdor-lazor-limozeen-nots-r5
+ GPU_VERSION: ${DEVICE_TYPE}
RUNNER_TAG: mesa-ci-x86-64-lava-sc7180-trogdor-lazor-limozeen
+msm:sc7180-trogdor-kingoftown:
+ extends:
+ - .msm-sc7180
+ parallel: 6
+ variables:
+ DEVICE_TYPE: sc7180-trogdor-kingoftown
+ DTB: sc7180-trogdor-kingoftown
+ GPU_VERSION: ${DEVICE_TYPE}
+ RUNNER_TAG: mesa-ci-x86-64-lava-sc7180-trogdor-kingoftown
+
msm:apq8016:
extends:
- .baremetal-igt-arm64
@@ -104,7 +119,10 @@ msm:apq8016:
DRIVER_NAME: msm
BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc-usb-host.dtb
GPU_VERSION: apq8016
- BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
+ # disabling unused clocks congests with the MDSS runtime PM trying to
+ # disable those clocks and causes boot to fail.
+ # Reproducer: DRM_MSM=y, DRM_I2C_ADV7511=m
+ BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
RUNNER_TAG: google-freedreno-db410c
script:
- ./install/bare-metal/fastboot.sh
@@ -324,6 +342,7 @@ virtio_gpu:none:
GPU_VERSION: none
extends:
- .test-gl
+ - .test-rules
tags:
- kvm
script:
diff --git a/drivers/gpu/drm/ci/testlist.txt b/drivers/gpu/drm/ci/testlist.txt
index f82cd90372f4..3377f002f8c5 100644
--- a/drivers/gpu/drm/ci/testlist.txt
+++ b/drivers/gpu/drm/ci/testlist.txt
@@ -100,7 +100,7 @@ kms_atomic@plane-invalid-params-fence
kms_atomic@crtc-invalid-params
kms_atomic@crtc-invalid-params-fence
kms_atomic@atomic-invalid-params
-kms_atomic@atomic_plane_damage
+kms_atomic@atomic-plane-damage
kms_atomic_interruptible@legacy-setmode
kms_atomic_interruptible@atomic-setmode
kms_atomic_interruptible@legacy-dpms
@@ -321,726 +321,726 @@ kms_bw@linear-tiling-7-displays-3840x2160p
kms_bw@linear-tiling-8-displays-1920x1080p
kms_bw@linear-tiling-8-displays-2560x1440p
kms_bw@linear-tiling-8-displays-3840x2160p
-kms_ccs@pipe-A-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-A-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-A-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-A-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-A-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-A-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-B-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-B-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-B-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-B-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-B-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-C-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-C-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-C-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-C-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-C-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-D-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-D-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-D-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-D-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-D-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-E-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-E-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-E-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-E-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-E-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-F-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-F-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-F-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-F-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-F-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-G-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-G-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-G-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-G-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-G-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-H-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-H-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-H-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-H-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-H-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-A-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-A-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-A-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-A-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-A-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-A-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-A-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-A-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-A-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-A-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-B-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-B-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-B-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-B-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-B-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-B-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-B-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-B-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-B-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-B-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-C-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-C-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-C-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-C-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-C-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-C-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-C-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-C-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-C-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-C-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-D-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-D-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-D-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-D-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-D-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-D-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-D-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-D-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-D-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-D-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-E-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-E-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-E-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-E-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-E-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-E-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-E-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-E-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-E-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-E-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-F-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-F-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-F-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-F-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-F-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-F-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-F-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-F-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-F-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-F-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-G-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-G-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-G-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-G-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-G-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-G-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-G-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-G-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-G-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-G-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-H-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-H-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-H-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-H-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-H-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-H-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-H-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-H-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-H-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-H-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
kms_cdclk@plane-scaling
kms_cdclk@mode-transition
kms_cdclk@mode-transition-all-outputs
@@ -1061,21 +1061,14 @@ kms_color@deep-color
kms_color@invalid-gamma-lut-sizes
kms_color@invalid-degamma-lut-sizes
kms_color@invalid-ctm-matrix-sizes
-kms_concurrent@pipe-A
-kms_concurrent@pipe-B
-kms_concurrent@pipe-C
-kms_concurrent@pipe-D
-kms_concurrent@pipe-E
-kms_concurrent@pipe-F
-kms_concurrent@pipe-G
-kms_concurrent@pipe-H
+kms_concurrent@multi-plane-atomic-lowres
kms_content_protection@legacy
kms_content_protection@atomic
kms_content_protection@atomic-dpms
-kms_content_protection@LIC
+kms_content_protection@lic
kms_content_protection@type1
-kms_content_protection@mei_interface
-kms_content_protection@content_type_change
+kms_content_protection@mei-interface
+kms_content_protection@content-type-change
kms_content_protection@uevent
kms_content_protection@srm
kms_content_protection@dp-mst-type-0
@@ -1218,8 +1211,8 @@ kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions-varying-size
kms_cursor_legacy@cursorA-vs-flipB-atomic-transitions-varying-size
kms_cursor_legacy@cursorB-vs-flipA-atomic-transitions-varying-size
kms_cursor_legacy@cursorB-vs-flipB-atomic-transitions-varying-size
-kms_dither@FB-8BPC-Vs-Panel-6BPC
-kms_dither@FB-8BPC-Vs-Panel-8BPC
+kms_dither@fb-8bpc-vs-panel-6bpc
+kms_dither@fb-8bpc-vs-panel-8bpc
kms_dp_aux_dev
kms_tiled_display@basic-test-pattern
kms_tiled_display@basic-test-pattern-with-chamelium
@@ -2351,7 +2344,6 @@ kms_frontbuffer_tracking@psrdrrs-shrfb-scaledprimary
kms_frontbuffer_tracking@fbcpsrdrrs-indfb-scaledprimary
kms_frontbuffer_tracking@fbcpsrdrrs-shrfb-scaledprimary
kms_frontbuffer_tracking@fbc-modesetfrombusy
-kms_frontbuffer_tracking@fbc-badstride
kms_frontbuffer_tracking@fbc-stridechange
kms_frontbuffer_tracking@fbc-tiling-linear
kms_frontbuffer_tracking@fbc-tiling-y
@@ -2361,7 +2353,6 @@ kms_frontbuffer_tracking@psr-modesetfrombusy
kms_frontbuffer_tracking@psr-slowdraw
kms_frontbuffer_tracking@psr-suspend
kms_frontbuffer_tracking@fbcpsr-modesetfrombusy
-kms_frontbuffer_tracking@fbcpsr-badstride
kms_frontbuffer_tracking@fbcpsr-stridechange
kms_frontbuffer_tracking@fbcpsr-tiling-linear
kms_frontbuffer_tracking@fbcpsr-tiling-y
@@ -2372,7 +2363,6 @@ kms_frontbuffer_tracking@drrs-modesetfrombusy
kms_frontbuffer_tracking@drrs-slowdraw
kms_frontbuffer_tracking@drrs-suspend
kms_frontbuffer_tracking@fbcdrrs-modesetfrombusy
-kms_frontbuffer_tracking@fbcdrrs-badstride
kms_frontbuffer_tracking@fbcdrrs-stridechange
kms_frontbuffer_tracking@fbcdrrs-tiling-linear
kms_frontbuffer_tracking@fbcdrrs-tiling-y
@@ -2383,7 +2373,6 @@ kms_frontbuffer_tracking@psrdrrs-modesetfrombusy
kms_frontbuffer_tracking@psrdrrs-slowdraw
kms_frontbuffer_tracking@psrdrrs-suspend
kms_frontbuffer_tracking@fbcpsrdrrs-modesetfrombusy
-kms_frontbuffer_tracking@fbcpsrdrrs-badstride
kms_frontbuffer_tracking@fbcpsrdrrs-stridechange
kms_frontbuffer_tracking@fbcpsrdrrs-tiling-linear
kms_frontbuffer_tracking@fbcpsrdrrs-tiling-y
@@ -2456,7 +2445,7 @@ kms_plane@plane-position-hole-dpms
kms_plane@plane-panning-top-left
kms_plane@plane-panning-bottom-right
kms_plane@plane-panning-bottom-right-suspend
-kms_plane@invalid-pixel-format-settings
+kms_plane@planar-pixel-format-settings
kms_plane_alpha_blend@alpha-basic
kms_plane_alpha_blend@alpha-7efc
kms_plane_alpha_blend@coverage-7efc
@@ -2479,24 +2468,24 @@ kms_plane_multiple@tiling-x
kms_plane_multiple@tiling-y
kms_plane_multiple@tiling-yf
kms_plane_multiple@tiling-4
-kms_plane_scaling@plane-upscale-with-pixel-format-20x20
-kms_plane_scaling@plane-upscale-with-pixel-format-factor-0-25
-kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-25
-kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-5
-kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-75
-kms_plane_scaling@plane-scaler-with-pixel-format-unity-scaling
-kms_plane_scaling@plane-upscale-with-rotation-20x20
-kms_plane_scaling@plane-upscale-with-rotation-factor-0-25
-kms_plane_scaling@plane-downscale-with-rotation-factor-0-25
-kms_plane_scaling@plane-downscale-with-rotation-factor-0-5
-kms_plane_scaling@plane-downscale-with-rotation-factor-0-75
-kms_plane_scaling@plane-scaler-with-rotation-unity-scaling
-kms_plane_scaling@plane-upscale-with-modifiers-20x20
-kms_plane_scaling@plane-upscale-with-modifiers-factor-0-25
-kms_plane_scaling@plane-downscale-with-modifiers-factor-0-25
-kms_plane_scaling@plane-downscale-with-modifiers-factor-0-5
-kms_plane_scaling@plane-downscale-with-modifiers-factor-0-75
-kms_plane_scaling@plane-scaler-with-modifiers-unity-scaling
+kms_plane_scaling@plane-upscale-20x20-with-pixel-format
+kms_plane_scaling@plane-upscale-factor-0-25-with-pixel-format
+kms_plane_scaling@plane-downscale-factor-0-25-with-pixel-format
+kms_plane_scaling@plane-downscale-factor-0-5-with-pixel-format
+kms_plane_scaling@plane-downscale-factor-0-75-with-pixel-format
+kms_plane_scaling@plane-scaler-unity-scaling-with-pixel-format
+kms_plane_scaling@plane-upscale-20x20-with-rotation
+kms_plane_scaling@plane-upscale-factor-0-25-with-rotation
+kms_plane_scaling@plane-downscale-factor-0-25-with-rotation
+kms_plane_scaling@plane-downscale-factor-0-5-with-rotation
+kms_plane_scaling@plane-downscale-factor-0-75-with-rotation
+kms_plane_scaling@plane-scaler-unity-scaling-with-rotation
+kms_plane_scaling@plane-upscale-20x20-with-modifiers
+kms_plane_scaling@plane-upscale-factor-0-25-with-modifiers
+kms_plane_scaling@plane-downscale-factor-0-25-with-modifiers
+kms_plane_scaling@plane-downscale-factor-0-5-with-modifiers
+kms_plane_scaling@plane-downscale-factor-0-75-with-modifiers
+kms_plane_scaling@plane-scaler-unity-scaling-with-modifiers
kms_plane_scaling@plane-scaler-with-clipping-clamping-pixel-formats
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation
kms_plane_scaling@plane-scaler-with-clipping-clamping-modifiers
@@ -2551,48 +2540,69 @@ kms_properties@invalid-properties-legacy
kms_properties@invalid-properties-atomic
kms_properties@get_properties-sanity-atomic
kms_properties@get_properties-sanity-non-atomic
-kms_psr@basic
-kms_psr@no_drrs
-kms_psr@primary_page_flip
-kms_psr@primary_mmap_gtt
-kms_psr@primary_mmap_cpu
-kms_psr@primary_blt
-kms_psr@primary_render
-kms_psr@sprite_mmap_gtt
-kms_psr@cursor_mmap_gtt
-kms_psr@sprite_mmap_cpu
-kms_psr@cursor_mmap_cpu
-kms_psr@sprite_blt
-kms_psr@cursor_blt
-kms_psr@sprite_render
-kms_psr@cursor_render
-kms_psr@sprite_plane_move
-kms_psr@cursor_plane_move
-kms_psr@sprite_plane_onoff
-kms_psr@cursor_plane_onoff
-kms_psr@dpms
-kms_psr@suspend
-kms_psr@psr2_basic
-kms_psr@psr2_no_drrs
-kms_psr@psr2_primary_page_flip
-kms_psr@psr2_primary_mmap_gtt
-kms_psr@psr2_primary_mmap_cpu
-kms_psr@psr2_primary_blt
-kms_psr@psr2_primary_render
-kms_psr@psr2_sprite_mmap_gtt
-kms_psr@psr2_cursor_mmap_gtt
-kms_psr@psr2_sprite_mmap_cpu
-kms_psr@psr2_cursor_mmap_cpu
-kms_psr@psr2_sprite_blt
-kms_psr@psr2_cursor_blt
-kms_psr@psr2_sprite_render
-kms_psr@psr2_cursor_render
-kms_psr@psr2_sprite_plane_move
-kms_psr@psr2_cursor_plane_move
-kms_psr@psr2_sprite_plane_onoff
-kms_psr@psr2_cursor_plane_onoff
-kms_psr@psr2_dpms
-kms_psr@psr2_suspend
+kms_psr@pr-basic
+kms_psr@pr-no-drrs
+kms_psr@pr-primary-page-flip
+kms_psr@pr-primary-mmap-gtt
+kms_psr@pr-primary-mmap-cpu
+kms_psr@pr-primary-blt
+kms_psr@pr-primary-render
+kms_psr@pr-sprite-mmap-gtt
+kms_psr@pr-cursor-mmap-gtt
+kms_psr@pr-sprite-mmap-cpu
+kms_psr@pr-cursor-mmap-cpu
+kms_psr@pr-sprite-blt
+kms_psr@pr-cursor-blt
+kms_psr@pr-sprite-render
+kms_psr@pr-cursor-render
+kms_psr@pr-sprite-plane-move
+kms_psr@pr-cursor-plane-move
+kms_psr@pr-sprite-plane-onoff
+kms_psr@pr-cursor-plane-onoff
+kms_psr@pr-dpms
+kms_psr@pr-suspend
+kms_psr@psr-basic
+kms_psr@psr-no-drrs
+kms_psr@psr-primary-page-flip
+kms_psr@psr-primary-mmap-gtt
+kms_psr@psr-primary-mmap-cpu
+kms_psr@psr-primary-blt
+kms_psr@psr-primary-render
+kms_psr@psr-sprite-mmap-gtt
+kms_psr@psr-cursor-mmap-gtt
+kms_psr@psr-sprite-mmap-cpu
+kms_psr@psr-cursor-mmap-cpu
+kms_psr@psr-sprite-blt
+kms_psr@psr-cursor-blt
+kms_psr@psr-sprite-render
+kms_psr@psr-cursor-render
+kms_psr@psr-sprite-plane-move
+kms_psr@psr-cursor-plane-move
+kms_psr@psr-sprite-plane-onoff
+kms_psr@psr-cursor-plane-onoff
+kms_psr@psr-dpms
+kms_psr@psr-suspend
+kms_psr@psr2-basic
+kms_psr@psr2-no-drrs
+kms_psr@psr2-primary-page-flip
+kms_psr@psr2-primary-mmap-gtt
+kms_psr@psr2-primary-mmap-cpu
+kms_psr@psr2-primary-blt
+kms_psr@psr2-primary-render
+kms_psr@psr2-sprite-mmap-gtt
+kms_psr@psr2-cursor-mmap-gtt
+kms_psr@psr2-sprite-mmap-cpu
+kms_psr@psr2-cursor-mmap-cpu
+kms_psr@psr2-sprite-blt
+kms_psr@psr2-cursor-blt
+kms_psr@psr2-sprite-render
+kms_psr@psr2-cursor-render
+kms_psr@psr2-sprite-plane-move
+kms_psr@psr2-cursor-plane-move
+kms_psr@psr2-sprite-plane-onoff
+kms_psr@psr2-cursor-plane-onoff
+kms_psr@psr2-dpms
+kms_psr@psr2-suspend
kms_psr2_sf@primary-plane-update-sf-dmg-area
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb
kms_psr2_sf@overlay-plane-update-sf-dmg-area
@@ -2643,13 +2653,13 @@ kms_scaling_modes@scaling-mode-full
kms_scaling_modes@scaling-mode-center
kms_scaling_modes@scaling-mode-full-aspect
kms_scaling_modes@scaling-mode-none
-kms_selftest@drm_cmdline
-kms_selftest@drm_damage
-kms_selftest@drm_dp_mst
+kms_selftest@drm_cmdline_parser
+kms_selftest@drm_damage_helper
+kms_selftest@drm_dp_mst_helper
kms_selftest@drm_format_helper
kms_selftest@drm_format
-kms_selftest@framebuffer
-kms_selftest@drm_plane
+kms_selftest@drm_framebuffer
+kms_selftest@drm_plane_helper
kms_setmode@basic
kms_setmode@basic-clone-single-crtc
kms_setmode@invalid-clone-single-crtc
@@ -2658,248 +2668,38 @@ kms_setmode@clone-exclusive-crtc
kms_setmode@invalid-clone-single-crtc-stealing
kms_sysfs_edid_timing
kms_tv_load_detect@load-detect
-kms_universal_plane@universal-plane-pipe-A-functional
-kms_universal_plane@universal-plane-pipe-A-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-A
-kms_universal_plane@cursor-fb-leak-pipe-A
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-A
-kms_universal_plane@universal-plane-pipe-B-functional
-kms_universal_plane@universal-plane-pipe-B-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-B
-kms_universal_plane@cursor-fb-leak-pipe-B
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-B
-kms_universal_plane@universal-plane-pipe-C-functional
-kms_universal_plane@universal-plane-pipe-C-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-C
-kms_universal_plane@cursor-fb-leak-pipe-C
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-C
-kms_universal_plane@universal-plane-pipe-D-functional
-kms_universal_plane@universal-plane-pipe-D-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-D
-kms_universal_plane@cursor-fb-leak-pipe-D
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-D
-kms_universal_plane@universal-plane-pipe-E-functional
-kms_universal_plane@universal-plane-pipe-E-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-E
-kms_universal_plane@cursor-fb-leak-pipe-E
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-E
-kms_universal_plane@universal-plane-pipe-F-functional
-kms_universal_plane@universal-plane-pipe-F-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-F
-kms_universal_plane@cursor-fb-leak-pipe-F
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-F
-kms_universal_plane@universal-plane-pipe-G-functional
-kms_universal_plane@universal-plane-pipe-G-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-G
-kms_universal_plane@cursor-fb-leak-pipe-G
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-G
-kms_universal_plane@universal-plane-pipe-H-functional
-kms_universal_plane@universal-plane-pipe-H-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-H
-kms_universal_plane@cursor-fb-leak-pipe-H
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-H
+kms_universal_plane@universal-plane-functional
+kms_universal_plane@universal-plane-sanity
+kms_universal_plane@disable-primary-vs-flip
+kms_universal_plane@cursor-fb-leak
+kms_universal_plane@universal-plane-pageflip-windowed
kms_vblank@invalid
kms_vblank@crtc-id
-kms_vblank@pipe-A-accuracy-idle
-kms_vblank@pipe-A-query-idle
-kms_vblank@pipe-A-query-idle-hang
-kms_vblank@pipe-A-query-forked
-kms_vblank@pipe-A-query-forked-hang
-kms_vblank@pipe-A-query-busy
-kms_vblank@pipe-A-query-busy-hang
-kms_vblank@pipe-A-query-forked-busy
-kms_vblank@pipe-A-query-forked-busy-hang
-kms_vblank@pipe-A-wait-idle
-kms_vblank@pipe-A-wait-idle-hang
-kms_vblank@pipe-A-wait-forked
-kms_vblank@pipe-A-wait-forked-hang
-kms_vblank@pipe-A-wait-busy
-kms_vblank@pipe-A-wait-busy-hang
-kms_vblank@pipe-A-wait-forked-busy
-kms_vblank@pipe-A-wait-forked-busy-hang
-kms_vblank@pipe-A-ts-continuation-idle
-kms_vblank@pipe-A-ts-continuation-idle-hang
-kms_vblank@pipe-A-ts-continuation-dpms-rpm
-kms_vblank@pipe-A-ts-continuation-dpms-suspend
-kms_vblank@pipe-A-ts-continuation-suspend
-kms_vblank@pipe-A-ts-continuation-modeset
-kms_vblank@pipe-A-ts-continuation-modeset-hang
-kms_vblank@pipe-A-ts-continuation-modeset-rpm
-kms_vblank@pipe-B-accuracy-idle
-kms_vblank@pipe-B-query-idle
-kms_vblank@pipe-B-query-idle-hang
-kms_vblank@pipe-B-query-forked
-kms_vblank@pipe-B-query-forked-hang
-kms_vblank@pipe-B-query-busy
-kms_vblank@pipe-B-query-busy-hang
-kms_vblank@pipe-B-query-forked-busy
-kms_vblank@pipe-B-query-forked-busy-hang
-kms_vblank@pipe-B-wait-idle
-kms_vblank@pipe-B-wait-idle-hang
-kms_vblank@pipe-B-wait-forked
-kms_vblank@pipe-B-wait-forked-hang
-kms_vblank@pipe-B-wait-busy
-kms_vblank@pipe-B-wait-busy-hang
-kms_vblank@pipe-B-wait-forked-busy
-kms_vblank@pipe-B-wait-forked-busy-hang
-kms_vblank@pipe-B-ts-continuation-idle
-kms_vblank@pipe-B-ts-continuation-idle-hang
-kms_vblank@pipe-B-ts-continuation-dpms-rpm
-kms_vblank@pipe-B-ts-continuation-dpms-suspend
-kms_vblank@pipe-B-ts-continuation-suspend
-kms_vblank@pipe-B-ts-continuation-modeset
-kms_vblank@pipe-B-ts-continuation-modeset-hang
-kms_vblank@pipe-B-ts-continuation-modeset-rpm
-kms_vblank@pipe-C-accuracy-idle
-kms_vblank@pipe-C-query-idle
-kms_vblank@pipe-C-query-idle-hang
-kms_vblank@pipe-C-query-forked
-kms_vblank@pipe-C-query-forked-hang
-kms_vblank@pipe-C-query-busy
-kms_vblank@pipe-C-query-busy-hang
-kms_vblank@pipe-C-query-forked-busy
-kms_vblank@pipe-C-query-forked-busy-hang
-kms_vblank@pipe-C-wait-idle
-kms_vblank@pipe-C-wait-idle-hang
-kms_vblank@pipe-C-wait-forked
-kms_vblank@pipe-C-wait-forked-hang
-kms_vblank@pipe-C-wait-busy
-kms_vblank@pipe-C-wait-busy-hang
-kms_vblank@pipe-C-wait-forked-busy
-kms_vblank@pipe-C-wait-forked-busy-hang
-kms_vblank@pipe-C-ts-continuation-idle
-kms_vblank@pipe-C-ts-continuation-idle-hang
-kms_vblank@pipe-C-ts-continuation-dpms-rpm
-kms_vblank@pipe-C-ts-continuation-dpms-suspend
-kms_vblank@pipe-C-ts-continuation-suspend
-kms_vblank@pipe-C-ts-continuation-modeset
-kms_vblank@pipe-C-ts-continuation-modeset-hang
-kms_vblank@pipe-C-ts-continuation-modeset-rpm
-kms_vblank@pipe-D-accuracy-idle
-kms_vblank@pipe-D-query-idle
-kms_vblank@pipe-D-query-idle-hang
-kms_vblank@pipe-D-query-forked
-kms_vblank@pipe-D-query-forked-hang
-kms_vblank@pipe-D-query-busy
-kms_vblank@pipe-D-query-busy-hang
-kms_vblank@pipe-D-query-forked-busy
-kms_vblank@pipe-D-query-forked-busy-hang
-kms_vblank@pipe-D-wait-idle
-kms_vblank@pipe-D-wait-idle-hang
-kms_vblank@pipe-D-wait-forked
-kms_vblank@pipe-D-wait-forked-hang
-kms_vblank@pipe-D-wait-busy
-kms_vblank@pipe-D-wait-busy-hang
-kms_vblank@pipe-D-wait-forked-busy
-kms_vblank@pipe-D-wait-forked-busy-hang
-kms_vblank@pipe-D-ts-continuation-idle
-kms_vblank@pipe-D-ts-continuation-idle-hang
-kms_vblank@pipe-D-ts-continuation-dpms-rpm
-kms_vblank@pipe-D-ts-continuation-dpms-suspend
-kms_vblank@pipe-D-ts-continuation-suspend
-kms_vblank@pipe-D-ts-continuation-modeset
-kms_vblank@pipe-D-ts-continuation-modeset-hang
-kms_vblank@pipe-D-ts-continuation-modeset-rpm
-kms_vblank@pipe-E-accuracy-idle
-kms_vblank@pipe-E-query-idle
-kms_vblank@pipe-E-query-idle-hang
-kms_vblank@pipe-E-query-forked
-kms_vblank@pipe-E-query-forked-hang
-kms_vblank@pipe-E-query-busy
-kms_vblank@pipe-E-query-busy-hang
-kms_vblank@pipe-E-query-forked-busy
-kms_vblank@pipe-E-query-forked-busy-hang
-kms_vblank@pipe-E-wait-idle
-kms_vblank@pipe-E-wait-idle-hang
-kms_vblank@pipe-E-wait-forked
-kms_vblank@pipe-E-wait-forked-hang
-kms_vblank@pipe-E-wait-busy
-kms_vblank@pipe-E-wait-busy-hang
-kms_vblank@pipe-E-wait-forked-busy
-kms_vblank@pipe-E-wait-forked-busy-hang
-kms_vblank@pipe-E-ts-continuation-idle
-kms_vblank@pipe-E-ts-continuation-idle-hang
-kms_vblank@pipe-E-ts-continuation-dpms-rpm
-kms_vblank@pipe-E-ts-continuation-dpms-suspend
-kms_vblank@pipe-E-ts-continuation-suspend
-kms_vblank@pipe-E-ts-continuation-modeset
-kms_vblank@pipe-E-ts-continuation-modeset-hang
-kms_vblank@pipe-E-ts-continuation-modeset-rpm
-kms_vblank@pipe-F-accuracy-idle
-kms_vblank@pipe-F-query-idle
-kms_vblank@pipe-F-query-idle-hang
-kms_vblank@pipe-F-query-forked
-kms_vblank@pipe-F-query-forked-hang
-kms_vblank@pipe-F-query-busy
-kms_vblank@pipe-F-query-busy-hang
-kms_vblank@pipe-F-query-forked-busy
-kms_vblank@pipe-F-query-forked-busy-hang
-kms_vblank@pipe-F-wait-idle
-kms_vblank@pipe-F-wait-idle-hang
-kms_vblank@pipe-F-wait-forked
-kms_vblank@pipe-F-wait-forked-hang
-kms_vblank@pipe-F-wait-busy
-kms_vblank@pipe-F-wait-busy-hang
-kms_vblank@pipe-F-wait-forked-busy
-kms_vblank@pipe-F-wait-forked-busy-hang
-kms_vblank@pipe-F-ts-continuation-idle
-kms_vblank@pipe-F-ts-continuation-idle-hang
-kms_vblank@pipe-F-ts-continuation-dpms-rpm
-kms_vblank@pipe-F-ts-continuation-dpms-suspend
-kms_vblank@pipe-F-ts-continuation-suspend
-kms_vblank@pipe-F-ts-continuation-modeset
-kms_vblank@pipe-F-ts-continuation-modeset-hang
-kms_vblank@pipe-F-ts-continuation-modeset-rpm
-kms_vblank@pipe-G-accuracy-idle
-kms_vblank@pipe-G-query-idle
-kms_vblank@pipe-G-query-idle-hang
-kms_vblank@pipe-G-query-forked
-kms_vblank@pipe-G-query-forked-hang
-kms_vblank@pipe-G-query-busy
-kms_vblank@pipe-G-query-busy-hang
-kms_vblank@pipe-G-query-forked-busy
-kms_vblank@pipe-G-query-forked-busy-hang
-kms_vblank@pipe-G-wait-idle
-kms_vblank@pipe-G-wait-idle-hang
-kms_vblank@pipe-G-wait-forked
-kms_vblank@pipe-G-wait-forked-hang
-kms_vblank@pipe-G-wait-busy
-kms_vblank@pipe-G-wait-busy-hang
-kms_vblank@pipe-G-wait-forked-busy
-kms_vblank@pipe-G-wait-forked-busy-hang
-kms_vblank@pipe-G-ts-continuation-idle
-kms_vblank@pipe-G-ts-continuation-idle-hang
-kms_vblank@pipe-G-ts-continuation-dpms-rpm
-kms_vblank@pipe-G-ts-continuation-dpms-suspend
-kms_vblank@pipe-G-ts-continuation-suspend
-kms_vblank@pipe-G-ts-continuation-modeset
-kms_vblank@pipe-G-ts-continuation-modeset-hang
-kms_vblank@pipe-G-ts-continuation-modeset-rpm
-kms_vblank@pipe-H-accuracy-idle
-kms_vblank@pipe-H-query-idle
-kms_vblank@pipe-H-query-idle-hang
-kms_vblank@pipe-H-query-forked
-kms_vblank@pipe-H-query-forked-hang
-kms_vblank@pipe-H-query-busy
-kms_vblank@pipe-H-query-busy-hang
-kms_vblank@pipe-H-query-forked-busy
-kms_vblank@pipe-H-query-forked-busy-hang
-kms_vblank@pipe-H-wait-idle
-kms_vblank@pipe-H-wait-idle-hang
-kms_vblank@pipe-H-wait-forked
-kms_vblank@pipe-H-wait-forked-hang
-kms_vblank@pipe-H-wait-busy
-kms_vblank@pipe-H-wait-busy-hang
-kms_vblank@pipe-H-wait-forked-busy
-kms_vblank@pipe-H-wait-forked-busy-hang
-kms_vblank@pipe-H-ts-continuation-idle
-kms_vblank@pipe-H-ts-continuation-idle-hang
-kms_vblank@pipe-H-ts-continuation-dpms-rpm
-kms_vblank@pipe-H-ts-continuation-dpms-suspend
-kms_vblank@pipe-H-ts-continuation-suspend
-kms_vblank@pipe-H-ts-continuation-modeset
-kms_vblank@pipe-H-ts-continuation-modeset-hang
-kms_vblank@pipe-H-ts-continuation-modeset-rpm
+kms_vblank@accuracy-idle
+kms_vblank@query-idle
+kms_vblank@query-idle-hang
+kms_vblank@query-forked
+kms_vblank@query-forked-hang
+kms_vblank@query-busy
+kms_vblank@query-busy-hang
+kms_vblank@query-forked-busy
+kms_vblank@query-forked-busy-hang
+kms_vblank@wait-idle
+kms_vblank@wait-idle-hang
+kms_vblank@wait-forked
+kms_vblank@wait-forked-hang
+kms_vblank@wait-busy
+kms_vblank@wait-busy-hang
+kms_vblank@wait-forked-busy
+kms_vblank@wait-forked-busy-hang
+kms_vblank@ts-continuation-idle
+kms_vblank@ts-continuation-idle-hang
+kms_vblank@ts-continuation-dpms-rpm
+kms_vblank@ts-continuation-dpms-suspend
+kms_vblank@ts-continuation-suspend
+kms_vblank@ts-continuation-modeset
+kms_vblank@ts-continuation-modeset-hang
+kms_vblank@ts-continuation-modeset-rpm
kms_vrr@flip-basic
kms_vrr@flip-dpms
kms_vrr@flip-suspend
@@ -2910,3 +2710,52 @@ kms_writeback@writeback-invalid-parameters
kms_writeback@writeback-fb-id
kms_writeback@writeback-check-output
prime_mmap_kms@buffer-sharing
+msm_shrink@copy-gpu-sanitycheck-8
+msm_shrink@copy-gpu-sanitycheck-32
+msm_shrink@copy-gpu-8
+msm_shrink@copy-gpu-32
+msm_shrink@copy-gpu-madvise-8
+msm_shrink@copy-gpu-madvise-32
+msm_shrink@copy-gpu-oom-8
+msm_shrink@copy-gpu-oom-32
+msm_shrink@copy-mmap-sanitycheck-8
+msm_shrink@copy-mmap-sanitycheck-32
+msm_shrink@copy-mmap-8
+msm_shrink@copy-mmap-32
+msm_shrink@copy-mmap-madvise-8
+msm_shrink@copy-mmap-madvise-32
+msm_shrink@copy-mmap-oom-8
+msm_shrink@copy-mmap-oom-32
+msm_shrink@copy-mmap-dmabuf-sanitycheck-8
+msm_shrink@copy-mmap-dmabuf-sanitycheck-32
+msm_shrink@copy-mmap-dmabuf-8
+msm_shrink@copy-mmap-dmabuf-32
+msm_shrink@copy-mmap-dmabuf-madvise-8
+msm_shrink@copy-mmap-dmabuf-madvise-32
+msm_shrink@copy-mmap-dmabuf-oom-8
+msm_shrink@copy-mmap-dmabuf-oom-32
+msm_mapping@ring
+msm_mapping@sqefw
+msm_mapping@shadow
+msm_submitoverhead@submitbench-10-bos
+msm_submitoverhead@submitbench-10-bos-no-implicit-sync
+msm_submitoverhead@submitbench-100-bos
+msm_submitoverhead@submitbench-100-bos-no-implicit-sync
+msm_submitoverhead@submitbench-250-bos
+msm_submitoverhead@submitbench-250-bos-no-implicit-sync
+msm_submitoverhead@submitbench-500-bos
+msm_submitoverhead@submitbench-500-bos-no-implicit-sync
+msm_submitoverhead@submitbench-1000-bos
+msm_submitoverhead@submitbench-1000-bos-no-implicit-sync
+msm_recovery@hangcheck
+msm_recovery@gpu-fault
+msm_recovery@gpu-fault-parallel
+msm_recovery@iova-fault
+msm_submit@empty-submit
+msm_submit@invalid-queue-submit
+msm_submit@invalid-flags-submit
+msm_submit@invalid-in-fence-submit
+msm_submit@invalid-duplicate-bo-submit
+msm_submit@invalid-cmd-idx-submit
+msm_submit@invalid-cmd-type-submit
+msm_submit@valid-submit
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index d39d254c935e..44a5c62dedad 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -6,8 +6,6 @@ kms_cursor_legacy@all-pipes-single-bo,Fail
kms_cursor_legacy@all-pipes-single-move,Fail
kms_cursor_legacy@all-pipes-torture-bo,Fail
kms_cursor_legacy@all-pipes-torture-move,Fail
-kms_cursor_legacy@forked-bo,Fail
-kms_cursor_legacy@forked-move,Fail
kms_cursor_legacy@pipe-A-forked-bo,Fail
kms_cursor_legacy@pipe-A-forked-move,Fail
kms_cursor_legacy@pipe-A-single-bo,Fail
@@ -18,3 +16,4 @@ kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_selftest@drm_format,Timeout
kms_selftest@drm_format_helper,Timeout
+msm_mapping@ring,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
index 2cd49e8ee47f..88a1fc0a3b0d 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
@@ -1,4 +1,2 @@
kms_3d,Fail
kms_addfb_basic@addfb25-bad-modifier,Fail
-kms_force_connector_basic@force-edid,Fail
-kms_hdmi_inject@inject-4k,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt
deleted file mode 100644
index f71166a57731..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-kms_color@ctm-0-25,Fail
-kms_color@ctm-0-50,Fail
-kms_color@ctm-0-75,Fail
-kms_color@ctm-blue-to-red,Fail
-kms_color@ctm-green-to-red,Fail
-kms_color@ctm-negative,Fail
-kms_color@ctm-red-to-blue,Fail
-kms_color@ctm-signed,Fail
-kms_cursor_legacy@cursor-vs-flip-toggle,Fail
-kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
-kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions,Crash
-kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning-vs-hang,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@coverage-7efc,Fail
-kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_alpha_blend@pipe-A-alpha-7efc,Fail
-kms_plane_alpha_blend@pipe-A-coverage-7efc,Fail
-kms_plane_alpha_blend@pipe-A-coverage-vs-premult-vs-constant,Fail
-kms_plane_alpha_blend@pipe-B-alpha-7efc,Fail
-kms_plane_alpha_blend@pipe-B-alpha-basic,Fail
-kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
-kms_plane_alpha_blend@pipe-B-constant-alpha-max,Fail
-kms_plane_alpha_blend@pipe-B-constant-alpha-mid,Fail
-kms_plane_alpha_blend@pipe-B-coverage-7efc,Fail
-kms_plane_alpha_blend@pipe-B-coverage-vs-premult-vs-constant,Fail
-kms_rmfb@close-fd,Fail
-kms_universal_plane@disable-primary-vs-flip-pipe-b,Fail
-kms_universal_plane@universal-plane-pipe-B-sanity,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt
deleted file mode 100644
index 04730044ed12..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-kms_color@ctm-0-25
-kms_color@ctm-0-50
-kms_color@ctm-0-75
-kms_color@ctm-blue-to-red
-kms_color@ctm-green-to-red
-kms_color@ctm-negative
-kms_color@ctm-red-to-blue
-kms_color@ctm-signed
-kms_flip@flip-vs-modeset-vs-hang
-kms_flip@flip-vs-panning-vs-hang
-kms_plane@pixel-format
-kms_plane@pixel-format-source-clamping
-kms_plane@plane-position-covered
-kms_plane@plane-position-hole
-kms_plane@plane-position-hole-dpms
-kms_writeback@writeback-fb-id
-kms_writeback@writeback-invalid-parameters
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt
deleted file mode 100644
index e59a2fddfde0..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-# Suspend to RAM seems to be broken on this machine
-.*suspend.*
-
-# Test incorrectly assumes that CTM support implies gamma/degamma
-# LUT support. None of the subtests handle the case of only having
-# CTM support
-#kms_color.*
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
new file mode 100644
index 000000000000..f0576aa629e8
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
@@ -0,0 +1,18 @@
+kms_color@ctm-0-25,Fail
+kms_color@ctm-0-50,Fail
+kms_color@ctm-0-75,Fail
+kms_color@ctm-blue-to-red,Fail
+kms_color@ctm-green-to-red,Fail
+kms_color@ctm-negative,Fail
+kms_color@ctm-red-to-blue,Fail
+kms_color@ctm-signed,Fail
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_plane_alpha_blend@alpha-7efc,Fail
+kms_plane_alpha_blend@coverage-7efc,Fail
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
+kms_rmfb@close-fd,Fail
+kms_universal_plane@universal-plane-sanity,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
new file mode 100644
index 000000000000..327039f70252
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -0,0 +1,2 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
new file mode 100644
index 000000000000..f0576aa629e8
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
@@ -0,0 +1,18 @@
+kms_color@ctm-0-25,Fail
+kms_color@ctm-0-50,Fail
+kms_color@ctm-0-75,Fail
+kms_color@ctm-blue-to-red,Fail
+kms_color@ctm-green-to-red,Fail
+kms_color@ctm-negative,Fail
+kms_color@ctm-red-to-blue,Fail
+kms_color@ctm-signed,Fail
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_plane_alpha_blend@alpha-7efc,Fail
+kms_plane_alpha_blend@coverage-7efc,Fail
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
+kms_rmfb@close-fd,Fail
+kms_universal_plane@universal-plane-sanity,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
new file mode 100644
index 000000000000..327039f70252
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -0,0 +1,2 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
index c55baa2d18c1..e9043a00383e 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -15,7 +15,7 @@ kms_color@pipe-A-ctm-max,Fail
kms_color@pipe-A-ctm-negative,Fail
kms_color@pipe-A-ctm-red-to-blue,Fail
kms_color@pipe-A-legacy-gamma,Fail
-kms_cursor_legacy@basic-flip-after-cursor-legacy,Fail
+kms_cursor_legacy@basic-flip-after-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-after-cursor-varying-size,Fail
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
@@ -29,9 +29,6 @@ kms_cursor_legacy@flip-vs-cursor-atomic,Fail
kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
kms_cursor_legacy@flip-vs-cursor-legacy,Fail
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions,Fail
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size,Fail
-kms_cursor_legacy@short-flip-after-cursor-toggle,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 16d205c04cbb..8a492f01eaa4 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -1,12 +1,22 @@
-kms_cursor_legacy@basic-flip-after-cursor-atomic
-kms_cursor_legacy@basic-flip-before-cursor-varying-size
-kms_cursor_legacy@cursorA-vs-flipA-toggle
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions
+# Board Name: msm:sdm845
+# Bug Report: https://lore.kernel.org/dri-devel/46287831-edfa-78e8-6055-d7a08831c445@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gd2af13d9f
+# Linux Version: 6.7.0-rc3
+
+# Reported by deqp-runner
+kms_cursor_legacy@basic-flip-after-cursor-legacy
kms_cursor_legacy@flip-vs-cursor-toggle
kms_cursor_legacy@flip-vs-cursor-varying-size
+kms_cursor_legacy@short-flip-after-cursor-toggle
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
-kms_cursor_legacy@short-flip-before-cursor-toggle
-kms_flip@flip-vs-modeset-vs-hang
-kms_flip@flip-vs-panning-vs-hang
-kms_plane@pixel-format
-kms_plane@pixel-format-source-clamping
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
+msm_shrink@copy-gpu-32
+msm_shrink@copy-gpu-oom-32
+
+# The below test shows inconsistency across multiple runs, giving
+# results of Pass and Fail alternately.
+kms_cursor_legacy@basic-flip-before-cursor-varying-size
+kms_cursor_legacy@flip-vs-cursor-atomic-transitions
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 42675f1c6d76..618e3a3a7277 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -1,2 +1,7 @@
# Hangs machine
-kms_bw.* \ No newline at end of file
+kms_bw.*
+
+# Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches
+# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e
+# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1
+msm_mapping@*
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index 8a165be1a821..5afc26be9d2a 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -127,7 +127,7 @@ static void dp_aux_ep_shutdown(struct device *dev)
aux_ep_drv->shutdown(to_dp_aux_ep_dev(dev));
}
-static struct bus_type dp_aux_bus_type = {
+static const struct bus_type dp_aux_bus_type = {
.name = "dp-aux",
.match = dp_aux_ep_match,
.probe = dp_aux_ep_probe,
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index b1ca3a1100da..8d6ce46471ae 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2897,22 +2897,19 @@ static const char *dp_content_type_get_name(enum dp_content_type content_type)
}
}
-void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
- const struct drm_dp_vsc_sdp *vsc)
+void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc)
{
-#define DP_SDP_LOG(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
- DP_SDP_LOG("DP SDP: %s, revision %u, length %u\n", "VSC",
+ drm_printf(p, "DP SDP: VSC, revision %u, length %u\n",
vsc->revision, vsc->length);
- DP_SDP_LOG(" pixelformat: %s\n",
+ drm_printf(p, " pixelformat: %s\n",
dp_pixelformat_get_name(vsc->pixelformat));
- DP_SDP_LOG(" colorimetry: %s\n",
+ drm_printf(p, " colorimetry: %s\n",
dp_colorimetry_get_name(vsc->pixelformat, vsc->colorimetry));
- DP_SDP_LOG(" bpc: %u\n", vsc->bpc);
- DP_SDP_LOG(" dynamic range: %s\n",
+ drm_printf(p, " bpc: %u\n", vsc->bpc);
+ drm_printf(p, " dynamic range: %s\n",
dp_dynamic_range_get_name(vsc->dynamic_range));
- DP_SDP_LOG(" content type: %s\n",
+ drm_printf(p, " content type: %s\n",
dp_content_type_get_name(vsc->content_type));
-#undef DP_SDP_LOG
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index f7c6b60629c2..03d528209426 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -1306,7 +1306,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
+ DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
@@ -1593,10 +1594,11 @@ topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
}
static void
-__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
+__dump_topology_ref_history(struct drm_device *drm,
+ struct drm_dp_mst_topology_ref_history *history,
void *ptr, const char *type_str)
{
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(drm, DRM_UT_DP, DBG_PREFIX);
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
int i;
@@ -1638,15 +1640,15 @@ out:
static __always_inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
{
- __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
- "MSTB");
+ __dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history,
+ mstb, "MSTB");
}
static __always_inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
{
- __dump_topology_ref_history(&port->topology_ref_history, port,
- "Port");
+ __dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history,
+ port, "Port");
}
static __always_inline void
@@ -2824,7 +2826,9 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
if (ret) {
if (drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev,
+ DRM_UT_DP,
+ DBG_PREFIX);
drm_printf(&p, "sideband msg failed to send\n");
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
@@ -2869,7 +2873,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
if (drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
+ DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index cee3188adf3d..521a71c61b16 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -27,8 +27,9 @@
#include <linux/mutex.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_debugfs.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
#include <drm/drm_of.h>
@@ -1207,26 +1208,26 @@ int drm_bridge_get_modes(struct drm_bridge *bridge,
EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
/**
- * drm_bridge_get_edid - get the EDID data of the connected display
+ * drm_bridge_edid_read - read the EDID data of the connected display
* @bridge: bridge control structure
* @connector: the connector to read EDID for
*
* If the bridge supports output EDID retrieval, as reported by the
- * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
- * get the EDID and return it. Otherwise return NULL.
+ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
+ * the EDID and return it. Otherwise return NULL.
*
* RETURNS:
* The retrieved EDID on success, or NULL otherwise.
*/
-struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
return NULL;
- return bridge->funcs->get_edid(bridge, connector);
+ return bridge->funcs->edid_read(bridge, connector);
}
-EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
+EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
/**
* drm_bridge_hpd_enable - enable hot plug detection for the bridge
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 3acd67021ec6..982552c9f92c 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -239,27 +239,27 @@ static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
struct drm_bridge *bridge)
{
enum drm_connector_status status;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int n;
status = drm_bridge_connector_detect(connector, false);
if (status != connector_status_connected)
goto no_edid;
- edid = drm_bridge_get_edid(bridge, connector);
- if (!drm_edid_is_valid(edid)) {
- kfree(edid);
+ drm_edid = drm_bridge_edid_read(bridge, connector);
+ if (!drm_edid_valid(drm_edid)) {
+ drm_edid_free(drm_edid);
goto no_edid;
}
- drm_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
+ drm_edid_connector_update(connector, drm_edid);
+ n = drm_edid_connector_add_modes(connector);
- kfree(edid);
+ drm_edid_free(drm_edid);
return n;
no_edid:
- drm_connector_update_edid_property(connector, NULL);
+ drm_edid_connector_update(connector, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 65f9f66933bb..6795624f16e7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -61,13 +61,13 @@
* to one or more &drm_encoder, which are then each connected to one
* &drm_connector.
*
- * To create a CRTC, a KMS drivers allocates and zeroes an instances of
+ * To create a CRTC, a KMS driver allocates and zeroes an instance of
* &struct drm_crtc (possibly as part of a larger structure) and registers it
* with a call to drm_crtc_init_with_planes().
*
- * The CRTC is also the entry point for legacy modeset operations, see
- * &drm_crtc_funcs.set_config, legacy plane operations, see
- * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy
+ * The CRTC is also the entry point for legacy modeset operations (see
+ * &drm_crtc_funcs.set_config), legacy plane operations (see
+ * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2), and other legacy
* operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these
* features are controlled through &drm_property and
* &drm_mode_config_funcs.atomic_check.
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index f4715a67e340..08fcefd804bc 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -45,8 +45,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#if defined(CONFIG_DEBUG_FS)
-
/***************************************************
* Initialization, etc.
**************************************************/
@@ -647,5 +645,3 @@ void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
debugfs_remove_recursive(encoder->debugfs_entry);
encoder->debugfs_entry = NULL;
}
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 69c68804023f..923c4423151c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3611,7 +3611,8 @@ static bool mode_in_range(const struct drm_display_mode *mode,
if (!mode_in_vsync_range(mode, edid, t))
return false;
- if ((max_clock = range_pixel_clock(edid, t)))
+ max_clock = range_pixel_clock(edid, t);
+ if (max_clock)
if (mode->clock > max_clock)
return false;
@@ -6990,28 +6991,6 @@ int drm_add_modes_noedid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_add_modes_noedid);
-/**
- * drm_set_preferred_mode - Sets the preferred mode of a connector
- * @connector: connector whose mode list should be processed
- * @hpref: horizontal resolution of preferred mode
- * @vpref: vertical resolution of preferred mode
- *
- * Marks a mode as preferred if it matches the resolution specified by @hpref
- * and @vpref.
- */
-void drm_set_preferred_mode(struct drm_connector *connector,
- int hpref, int vpref)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &connector->probed_modes, head) {
- if (mode->hdisplay == hpref &&
- mode->vdisplay == vpref)
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- }
-}
-EXPORT_SYMBOL(drm_set_preferred_mode);
-
static bool is_hdmi2_sink(const struct drm_connector *connector)
{
/*
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 48ee851b61d9..2da094bdf8a4 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -76,7 +76,7 @@ static void drm_exec_unlock_all(struct drm_exec *exec)
* If nr is non-zero then it is used as the initial objects table size.
* In either case, the table will grow (be re-allocated) on demand.
*/
-void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr)
+void drm_exec_init(struct drm_exec *exec, u32 flags, unsigned nr)
{
if (!nr)
nr = PAGE_SIZE / sizeof(void *);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 8c87287c3e16..638ffa4444f5 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -913,7 +913,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
DRM_GEM_OBJECT_PURGEABLE;
}
- if (obj->handle_count > 1) {
+ if (drm_gem_object_is_shared_for_memory_stats(obj)) {
status.shared += obj->size;
} else {
status.private += obj->size;
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index b67eafa55715..75f2eaf0d5b6 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -147,7 +147,6 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
invariant_flags = TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
- gbo->placement.busy_placement = gbo->placements;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
gbo->placements[c].mem_type = TTM_PL_VRAM;
@@ -160,7 +159,6 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
}
gbo->placement.num_placement = c;
- gbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
gbo->placements[i].fpfn = 0;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 129e2b91dbfe..e6b5b06de148 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -229,7 +229,7 @@ typedef struct drm_update_draw32 {
unsigned int num;
/* 64-bit version has a 32-bit pad here */
u64 data; /**< Pointer */
-} __attribute__((packed)) drm_update_draw32_t;
+} __packed drm_update_draw32_t;
static int compat_drm_update_draw(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -296,7 +296,7 @@ typedef struct drm_mode_fb_cmd232 {
u32 pitches[4];
u32 offsets[4];
u64 modifier[4];
-} __attribute__((packed)) drm_mode_fb_cmd232_t;
+} __packed drm_mode_fb_cmd232_t;
static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
unsigned long arg)
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index bcd111404b12..7646f67bda4e 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -177,6 +177,45 @@ int __drmm_add_action_or_reset(struct drm_device *dev,
EXPORT_SYMBOL(__drmm_add_action_or_reset);
/**
+ * drmm_release_action - release a managed action from a &drm_device
+ * @dev: DRM device
+ * @action: function which would be called when @dev is released
+ * @data: opaque pointer, passed to @action
+ *
+ * This function calls the @action previously added by drmm_add_action()
+ * immediately.
+ * The @action is removed from the list of cleanup actions for @dev,
+ * which means that it won't be called in the final drm_dev_put().
+ */
+void drmm_release_action(struct drm_device *dev,
+ drmres_release_t action,
+ void *data)
+{
+ struct drmres *dr_match = NULL, *dr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->managed.lock, flags);
+ list_for_each_entry_reverse(dr, &dev->managed.resources, node.entry) {
+ if (dr->node.release == action) {
+ if (!data || (data && *(void **)dr->data == data)) {
+ dr_match = dr;
+ del_dr(dev, dr_match);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+ if (WARN_ON(!dr_match))
+ return;
+
+ action(dev, data);
+
+ free_dr(dr_match);
+}
+EXPORT_SYMBOL(drmm_release_action);
+
+/**
* drmm_kmalloc - &drm_device managed kmalloc()
* @dev: DRM device
* @size: size of the memory allocation
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 843a6dbda93a..ef6e416522f8 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -89,7 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
.restore = pm_generic_restore,
};
-static struct bus_type mipi_dsi_bus_type = {
+static const struct bus_type mipi_dsi_bus_type = {
.name = "mipi-dsi",
.match = mipi_dsi_device_match,
.uevent = mipi_dsi_uevent,
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 8525ef851540..48fd2d67f352 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -544,7 +544,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- struct drm_printer p = drm_debug_printer("[leaked fb]");
+ struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
drm_framebuffer_print_info(&p, 1, fb);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 893f52ee4926..c4f88c3a93b7 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -2752,3 +2752,25 @@ bool drm_mode_is_420(const struct drm_display_info *display,
drm_mode_is_420_also(display, mode);
}
EXPORT_SYMBOL(drm_mode_is_420);
+
+/**
+ * drm_set_preferred_mode - Sets the preferred mode of a connector
+ * @connector: connector whose mode list should be processed
+ * @hpref: horizontal resolution of preferred mode
+ * @vpref: vertical resolution of preferred mode
+ *
+ * Marks a mode as preferred if it matches the resolution specified by @hpref
+ * and @vpref.
+ */
+void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (mode->hdisplay == hpref &&
+ mode->vdisplay == vpref)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+}
+EXPORT_SYMBOL(drm_set_preferred_mode);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 918065982db4..7694b85e75e3 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -91,7 +91,7 @@ static noinline depot_stack_handle_t __drm_stack_depot_save(void)
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
- struct drm_printer p = drm_debug_printer("drm_modeset_lock");
+ struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_KMS, "drm_modeset_lock");
unsigned long *entries;
unsigned int nr_entries;
char *buf;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 3d92f66e550c..aa93129c3397 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -117,6 +117,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
+ .width = 1080,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
@@ -279,6 +285,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
},
.driver_data = (void *)&lcd720x1280_rightside_up,
+ }, { /* GPD Win Mini */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1617-01")
+ },
+ .driver_data = (void *)&lcd1080x1920_rightside_up,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 5b93c11895bb..699b7dbffd7b 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -182,16 +182,35 @@ void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf)
}
EXPORT_SYMBOL(__drm_printfn_info);
-void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
+void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
{
- /* pr_debug callsite decorations are unhelpful here */
- printk(KERN_DEBUG "%s %pV", p->prefix, vaf);
+ const struct drm_device *drm = p->arg;
+ const struct device *dev = drm ? drm->dev : NULL;
+ enum drm_debug_category category = p->category;
+ const char *prefix = p->prefix ?: "";
+ const char *prefix_pad = p->prefix ? " " : "";
+
+ if (!__drm_debug_enabled(category))
+ return;
+
+ /* Note: __builtin_return_address(0) is useless here. */
+ if (dev)
+ dev_printk(KERN_DEBUG, dev, "[" DRM_NAME "]%s%s %pV",
+ prefix_pad, prefix, vaf);
+ else
+ printk(KERN_DEBUG "[" DRM_NAME "]%s%s %pV",
+ prefix_pad, prefix, vaf);
}
-EXPORT_SYMBOL(__drm_printfn_debug);
+EXPORT_SYMBOL(__drm_printfn_dbg);
void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf)
{
- pr_err("*ERROR* %s %pV", p->prefix, vaf);
+ struct drm_device *drm = p->arg;
+
+ if (p->prefix)
+ drm_err(drm, "%s %pV", p->prefix, vaf);
+ else
+ drm_err(drm, "%pV", vaf);
}
EXPORT_SYMBOL(__drm_printfn_err);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 3f479483d7d8..d1e1ade66f81 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -1101,42 +1101,6 @@ enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_crtc_helper_mode_valid_fixed);
/**
- * drm_connector_helper_get_modes_from_ddc - Updates the connector's EDID
- * property from the connector's
- * DDC channel
- * @connector: The connector
- *
- * Returns:
- * The number of detected display modes.
- *
- * Uses a connector's DDC channel to retrieve EDID data and update the
- * connector's EDID property and display modes. Drivers can use this
- * function to implement struct &drm_connector_helper_funcs.get_modes
- * for connectors with a DDC channel.
- */
-int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector)
-{
- struct edid *edid;
- int count = 0;
-
- if (!connector->ddc)
- return 0;
-
- edid = drm_get_edid(connector, connector->ddc);
-
- // clears property if EDID is NULL
- drm_connector_update_edid_property(connector, edid);
-
- if (edid) {
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
-
- return count;
-}
-EXPORT_SYMBOL(drm_connector_helper_get_modes_from_ddc);
-
-/**
* drm_connector_helper_get_modes_fixed - Duplicates a display mode for a connector
* @connector: the connector
* @fixed_mode: the display hardware's mode
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index a6c19de46292..a0e94217b511 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -441,6 +441,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
int ret;
+ if (flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
+ return -EINVAL;
+
if (!syncobj)
return -ENOENT;
@@ -1041,8 +1044,10 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint32_t signaled_count, i;
if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ might_sleep();
lockdep_assert_none_held_once();
+ }
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
if (points == NULL)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index f957552c6c50..207aa3f660b0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -18,7 +18,6 @@
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
-#include <drm/drm_edid.h>
#include <drm/drm_framebuffer.h>
struct hibmc_connector {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 8c6d2ea2a472..94e2c573a7af 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 91f2bc405cba..0279c8aabdd1 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -1060,3 +1060,33 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->fb = intel_fb;
}
+
+bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 base;
+
+ if (!plane_state->uapi.visible)
+ return false;
+
+ base = intel_plane_ggtt_offset(plane_state);
+
+ /*
+ * We may have moved the surface to a different
+ * part of ggtt, make the plane aware of that.
+ */
+ if (plane_config->base == base)
+ return false;
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ intel_de_write(dev_priv, DSPSURF(i9xx_plane), base);
+ else
+ intel_de_write(dev_priv, DSPADDR(i9xx_plane), base);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index b3d724a144cb..0ca12d1e6839 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -26,6 +26,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
+bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config);
#else
static inline unsigned int i965_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -46,6 +48,11 @@ static inline void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
}
+static inline bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config)
+{
+ return false;
+}
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 06c2455bdd78..76d77d5a0409 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -217,6 +217,9 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
int width, height;
unsigned int rel_data_rate;
+ if (plane->id == PLANE_CURSOR)
+ return 0;
+
if (!plane_state->uapi.visible)
return 0;
@@ -244,9 +247,6 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
rel_data_rate = width * height * fb->format->cpp[color_plane];
- if (plane->id == PLANE_CURSOR)
- return rel_data_rate;
-
return intel_adjusted_rate(&plane_state->uapi.src,
&plane_state->uapi.dst,
rel_data_rate);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 3f3cd944a1c5..1946d7fb3c2e 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -1465,7 +1465,7 @@ static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int
if (controller == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) < PCH_MTP)
+ INTEL_PCH_TYPE(i915) <= PCH_ADP)
return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index aa169b0055e9..5f04e495fd27 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2204,8 +2204,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
if (IS_DGFX(i915))
return vbt_pin;
- if (INTEL_PCH_TYPE(i915) >= PCH_LNL || HAS_PCH_MTP(i915) ||
- IS_ALDERLAKE_P(i915)) {
+ if (INTEL_PCH_TYPE(i915) >= PCH_MTL || IS_ALDERLAKE_P(i915)) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
} else if (IS_ALDERLAKE_S(i915)) {
@@ -3074,7 +3073,7 @@ err_unmap_oprom:
*/
void intel_bios_init(struct drm_i915_private *i915)
{
- const struct vbt_header *vbt = i915->display.opregion.vbt;
+ const struct vbt_header *vbt;
struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
@@ -3089,6 +3088,8 @@ void intel_bios_init(struct drm_i915_private *i915)
init_vbt_defaults(i915);
+ vbt = intel_opregion_get_vbt(i915, NULL);
+
/*
* If the OpRegion does not have VBT, look in SPI flash through MMIO or
* PCI mapping
@@ -3306,7 +3307,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- if (i915->display.opregion.vbt)
+ if (intel_opregion_get_vbt(i915, NULL))
return true;
}
@@ -3657,3 +3658,30 @@ void intel_bios_for_each_encoder(struct drm_i915_private *i915,
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
func(i915, devdata);
}
+
+static int intel_bios_vbt_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ const void *vbt;
+ size_t vbt_size;
+
+ /*
+ * FIXME: VBT might originate from other places than opregion, and then
+ * this would be incorrect.
+ */
+ vbt = intel_opregion_get_vbt(i915, &vbt_size);
+ if (vbt)
+ seq_write(m, vbt, vbt_size);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt);
+
+void intel_bios_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_vbt", 0444, minor->debugfs_root,
+ i915, &intel_bios_vbt_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 49e24b7cf675..41bfb009d4b0 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -246,13 +246,10 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
-bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
-bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
@@ -283,4 +280,6 @@ void intel_bios_for_each_encoder(struct drm_i915_private *i915,
void (*func)(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata));
+void intel_bios_debugfs_register(struct drm_i915_private *i915);
+
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c985ebb6831a..26200ee3e23f 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1227,183 +1227,182 @@ struct intel_cdclk_vals {
u32 cdclk;
u16 refclk;
u16 waveform;
- u8 divider; /* CD2X divider * 2 */
u8 ratio;
};
static const struct intel_cdclk_vals bxt_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
- { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
- { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 },
- { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 },
- { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 },
+ { .refclk = 19200, .cdclk = 144000, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 288000, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 384000, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 576000, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 624000, .ratio = 65 },
{}
};
static const struct intel_cdclk_vals glk_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 },
- { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 },
- { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 79200, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 158400, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 316800, .ratio = 33 },
{}
};
static const struct intel_cdclk_vals icl_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
- { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
- { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
- { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 },
- { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
- { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
-
- { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 },
- { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
- { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
- { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 },
- { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
-
- { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
- { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ { .refclk = 19200, .cdclk = 172800, .ratio = 18 },
+ { .refclk = 19200, .cdclk = 192000, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 326400, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 180000, .ratio = 15 },
+ { .refclk = 24000, .cdclk = 192000, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 324000, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 172800, .ratio = 9 },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 326400, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals rkl_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 },
- { .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 },
- { .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 },
- { .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 },
- { .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 },
- { .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 },
-
- { .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 },
- { .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 },
- { .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 },
- { .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 },
- { .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 },
- { .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 },
-
- { .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 },
- { .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 },
- { .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 },
- { .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 },
- { .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 },
- { .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 172800, .ratio = 36 },
+ { .refclk = 19200, .cdclk = 192000, .ratio = 40 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 64 },
+ { .refclk = 19200, .cdclk = 326400, .ratio = 136 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 116 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 136 },
+
+ { .refclk = 24000, .cdclk = 180000, .ratio = 30 },
+ { .refclk = 24000, .cdclk = 192000, .ratio = 32 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 52 },
+ { .refclk = 24000, .cdclk = 324000, .ratio = 108 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 92 },
+ { .refclk = 24000, .cdclk = 648000, .ratio = 108 },
+
+ { .refclk = 38400, .cdclk = 172800, .ratio = 18 },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 20 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 38400, .cdclk = 326400, .ratio = 68 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 68 },
{}
};
static const struct intel_cdclk_vals adlp_a_step_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
- { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
- { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 68 },
- { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
- { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 46 },
+ { .refclk = 24400, .cdclk = 648000, .ratio = 54 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals adlp_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
- { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
- { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
- { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
- { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
-
- { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
- { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
- { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
- { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
-
- { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ { .refclk = 19200, .cdclk = 172800, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals rplu_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
- { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
- { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
- { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 },
- { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
- { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
-
- { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
- { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
- { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
- { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 },
- { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
-
- { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
- { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ { .refclk = 19200, .cdclk = 172800, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 480000, .ratio = 50 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 480000, .ratio = 40 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals dg2_cdclk_table[] = {
- { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 },
- { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 },
- { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 },
- { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a },
- { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa },
- { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a },
- { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 },
- { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 },
- { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee },
- { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de },
- { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 163200, .ratio = 34, .waveform = 0x8888 },
+ { .refclk = 38400, .cdclk = 204000, .ratio = 34, .waveform = 0x9248 },
+ { .refclk = 38400, .cdclk = 244800, .ratio = 34, .waveform = 0xa4a4 },
+ { .refclk = 38400, .cdclk = 285600, .ratio = 34, .waveform = 0xa54a },
+ { .refclk = 38400, .cdclk = 326400, .ratio = 34, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 367200, .ratio = 34, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 408000, .ratio = 34, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 448800, .ratio = 34, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 489600, .ratio = 34, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 530400, .ratio = 34, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 571200, .ratio = 34, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 612000, .ratio = 34, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
{}
};
static const struct intel_cdclk_vals mtl_cdclk_table[] = {
- { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0x0000 },
- { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0x0000 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0x0000 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0x0000 },
{}
};
static const struct intel_cdclk_vals lnl_cdclk_table[] = {
- { .refclk = 38400, .cdclk = 153600, .divider = 2, .ratio = 16, .waveform = 0xaaaa },
- { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 },
- { .refclk = 38400, .cdclk = 211200, .divider = 2, .ratio = 16, .waveform = 0xdbb6 },
- { .refclk = 38400, .cdclk = 230400, .divider = 2, .ratio = 16, .waveform = 0xeeee },
- { .refclk = 38400, .cdclk = 249600, .divider = 2, .ratio = 16, .waveform = 0xf7de },
- { .refclk = 38400, .cdclk = 268800, .divider = 2, .ratio = 16, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 288000, .divider = 2, .ratio = 16, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0xffff },
- { .refclk = 38400, .cdclk = 330000, .divider = 2, .ratio = 25, .waveform = 0xdbb6 },
- { .refclk = 38400, .cdclk = 360000, .divider = 2, .ratio = 25, .waveform = 0xeeee },
- { .refclk = 38400, .cdclk = 390000, .divider = 2, .ratio = 25, .waveform = 0xf7de },
- { .refclk = 38400, .cdclk = 420000, .divider = 2, .ratio = 25, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 450000, .divider = 2, .ratio = 25, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0xffff },
- { .refclk = 38400, .cdclk = 487200, .divider = 2, .ratio = 29, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 522000, .divider = 2, .ratio = 29, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0xffff },
- { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 211200, .ratio = 16, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 230400, .ratio = 16, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 249600, .ratio = 16, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 268800, .ratio = 16, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 288000, .ratio = 16, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 330000, .ratio = 25, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 360000, .ratio = 25, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 390000, .ratio = 25, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 420000, .ratio = 25, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 450000, .ratio = 25, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 487200, .ratio = 29, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 522000, .ratio = 29, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 571200, .ratio = 34, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 612000, .ratio = 34, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
{}
};
@@ -1901,15 +1900,47 @@ static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.hw.vco > 0;
}
+static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
+ int unsquashed_cdclk;
+ u16 waveform;
+ u32 val;
+
+ waveform = cdclk_squash_waveform(i915, cdclk);
+
+ unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
+ cdclk_squash_divider(waveform));
+
+ val = bxt_cdclk_cd2x_div_sel(i915, unsquashed_cdclk, vco) |
+ bxt_cdclk_cd2x_pipe(i915, pipe);
+
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if ((IS_GEMINILAKE(i915) || IS_BROXTON(i915)) &&
+ cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+
+ if (DISPLAY_VER(i915) >= 20)
+ val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
+ else
+ val |= skl_cdclk_decimal(cdclk);
+
+ return val;
+}
+
static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- int unsquashed_cdclk;
u16 waveform;
- u32 val;
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
@@ -1926,29 +1957,10 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
waveform = cdclk_squash_waveform(dev_priv, cdclk);
- unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
- cdclk_squash_divider(waveform));
-
if (HAS_CDCLK_SQUASH(dev_priv))
dg2_cdclk_squash_program(dev_priv, waveform);
- val = bxt_cdclk_cd2x_div_sel(dev_priv, unsquashed_cdclk, vco) |
- bxt_cdclk_cd2x_pipe(dev_priv, pipe);
-
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- cdclk >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-
- if (DISPLAY_VER(dev_priv) >= 20)
- val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
- else
- val |= skl_cdclk_decimal(cdclk);
-
- intel_de_write(dev_priv, CDCLK_CTL, val);
+ intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe));
if (pipe != INVALID_PIPE)
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
@@ -2039,7 +2051,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
- int cdclk, clock, vco;
+ int cdclk, vco;
intel_update_cdclk(dev_priv);
intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
@@ -2048,20 +2060,6 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
goto sanitize;
- /* DPLL okay; verify the cdclock
- *
- * Some BIOS versions leave an incorrect decimal frequency value and
- * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
- * so sanitize this register.
- */
- cdctl = intel_de_read(dev_priv, CDCLK_CTL);
- /*
- * Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
- * (PIPE_NONE).
- */
- cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
-
/* Make sure this is a legal cdclk value for the platform */
cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk);
if (cdclk != dev_priv->display.cdclk.hw.cdclk)
@@ -2072,24 +2070,21 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
if (vco != dev_priv->display.cdclk.hw.vco)
goto sanitize;
- expected = skl_cdclk_decimal(cdclk);
-
- /* Figure out what CD2X divider we should be using for this cdclk */
- if (HAS_CDCLK_SQUASH(dev_priv))
- clock = dev_priv->display.cdclk.hw.vco / 2;
- else
- clock = dev_priv->display.cdclk.hw.cdclk;
-
- expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock,
- dev_priv->display.cdclk.hw.vco);
+ /*
+ * Some BIOS versions leave an incorrect decimal frequency value and
+ * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+ * so sanitize this register.
+ */
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
+ expected = bxt_cdclk_ctl(dev_priv, &dev_priv->display.cdclk.hw, INVALID_PIPE);
/*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
+ * Let's ignore the pipe field, since BIOS could have configured the
+ * dividers both synching to an active pipe, or asynchronously
+ * (PIPE_NONE).
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- dev_priv->display.cdclk.hw.cdclk >= 500000)
- expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
+ expected &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
if (cdctl == expected)
/* All well; nothing to sanitize */
@@ -3467,15 +3462,15 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
{
u32 freq;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
- freq = dg1_rawclk(dev_priv);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
/*
* MTL always uses a 38.4 MHz rawclk. The bspec tells us
* "RAWCLK_FREQ defaults to the values for 38.4 and does
* not need to be programmed."
*/
freq = 38400;
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ freq = dg1_rawclk(dev_priv);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index abaacea5c2cc..b9733a73e21d 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -42,6 +42,7 @@
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
@@ -846,6 +847,9 @@ intel_crt_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(dev_priv))
+ return connector->status;
+
if (dev_priv->display.params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
@@ -1069,6 +1073,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
} else {
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
+ intel_connector->base.polled = intel_connector->polled;
if (HAS_DDI(dev_priv)) {
assert_port_valid(dev_priv, PORT_E);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 8a84a31c7b48..25593f6aae7d 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -461,70 +461,6 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
-static int intel_mode_vblank_start(const struct drm_display_mode *mode)
-{
- int vblank_start = mode->crtc_vblank_start;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vblank_start = DIV_ROUND_UP(vblank_start, 2);
-
- return vblank_start;
-}
-
-static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- int *min, int *max, int *vblank_start)
-{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *crtc_state;
- const struct drm_display_mode *adjusted_mode;
-
- /*
- * During fastsets/etc. the transcoder is still
- * running with the old timings at this point.
- *
- * TODO: maybe just use the active timings here?
- */
- if (intel_crtc_needs_modeset(new_crtc_state))
- crtc_state = new_crtc_state;
- else
- crtc_state = old_crtc_state;
-
- adjusted_mode = &crtc_state->hw.adjusted_mode;
-
- if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
- /* timing changes should happen with VRR disabled */
- drm_WARN_ON(state->base.dev, intel_crtc_needs_modeset(new_crtc_state) ||
- new_crtc_state->update_m_n || new_crtc_state->update_lrr);
-
- if (intel_vrr_is_push_sent(crtc_state))
- *vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
- else
- *vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- } else {
- *vblank_start = intel_mode_vblank_start(adjusted_mode);
- }
-
- /* FIXME needs to be calibrated sensibly */
- *min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode,
- VBLANK_EVASION_TIME_US);
- *max = *vblank_start - 1;
-
- /*
- * M/N and TRANS_VTOTAL are double buffered on the transcoder's
- * undelayed vblank, so with seamless M/N and LRR we must evade
- * both vblanks.
- *
- * DSB execution waits for the transcoder's undelayed vblank,
- * hence we must kick off the commit before that.
- */
- if (new_crtc_state->dsb || new_crtc_state->update_m_n || new_crtc_state->update_lrr)
- *min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
-}
-
/**
* intel_pipe_update_start() - start update of a set of display registers
* @state: the atomic state
@@ -542,14 +478,12 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- long timeout = msecs_to_jiffies_timeout(1);
- int scanline, min, max, vblank_start;
- wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
- bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
- DEFINE_WAIT(wait);
+ struct intel_vblank_evade_ctx evade;
+ int scanline;
intel_psr_lock(new_crtc_state);
@@ -566,9 +500,7 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
- intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start);
- if (min <= 0 || max <= 0)
- goto irq_disable;
+ intel_vblank_evade_init(old_crtc_state, new_crtc_state, &evade);
if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
goto irq_disable;
@@ -582,58 +514,14 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
local_irq_disable();
- crtc->debug.min_vbl = min;
- crtc->debug.max_vbl = max;
+ crtc->debug.min_vbl = evade.min;
+ crtc->debug.max_vbl = evade.max;
trace_intel_pipe_update_start(crtc);
- for (;;) {
- /*
- * prepare_to_wait() has a memory barrier, which guarantees
- * other CPUs can see the task state update by the time we
- * read the scanline.
- */
- prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
-
- scanline = intel_get_crtc_scanline(crtc);
- if (scanline < min || scanline > max)
- break;
-
- if (!timeout) {
- drm_err(&dev_priv->drm,
- "Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
- break;
- }
-
- local_irq_enable();
-
- timeout = schedule_timeout(timeout);
-
- local_irq_disable();
- }
-
- finish_wait(wq, &wait);
+ scanline = intel_vblank_evade(&evade);
drm_crtc_vblank_put(&crtc->base);
- /*
- * On VLV/CHV DSI the scanline counter would appear to
- * increment approx. 1/3 of a scanline before start of vblank.
- * The registers still get latched at start of vblank however.
- * This means we must not write any registers on the first
- * line of vblank (since not the whole line is actually in
- * vblank). And unfortunately we can't use the interrupt to
- * wait here since it will fire too soon. We could use the
- * frame start interrupt instead since it will fire after the
- * critical scanline, but that would require more changes
- * in the interrupt code. So for now we'll just do the nasty
- * thing and poll for the bad scanline to pass us by.
- *
- * FIXME figure out if BXT+ DSI suffers from this as well
- */
- while (need_vlv_dsi_wa && scanline == vblank_start)
- scanline = intel_get_crtc_scanline(crtc);
-
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 49fd100ec98a..4bcf446c75f4 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -55,10 +55,9 @@ static void
intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
const struct drm_dp_vsc_sdp *vsc)
{
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
+ struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
- drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc);
+ drm_dp_vsc_sdp_log(&p, vsc);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 926e2de00eb5..f8b33999d43f 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -22,6 +22,7 @@
#include "intel_frontbuffer.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_vblank.h"
#include "skl_watermark.h"
#include "gem/i915_gem_object.h"
@@ -47,12 +48,23 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
return base + plane_state->view.color_plane[0].offset;
}
-static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+static u32 intel_cursor_position(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool early_tpt)
{
int x = plane_state->uapi.dst.x1;
int y = plane_state->uapi.dst.y1;
u32 pos = 0;
+ /*
+ * Formula from Bspec:
+ * MAX(-1 * <Cursor vertical size from CUR_CTL base on cursor mode
+ * select setting> + 1, CUR_POS Y Position - Update region Y position
+ */
+ if (early_tpt)
+ y = max(-1 * drm_rect_height(&plane_state->uapi.dst) + 1,
+ y - crtc_state->psr2_su_area.y1);
+
if (x < 0) {
pos |= CURSOR_POS_X_SIGN;
x = -x;
@@ -274,7 +286,7 @@ static void i845_cursor_update_arm(struct intel_plane *plane,
size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width);
base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
+ pos = intel_cursor_position(crtc_state, plane_state, false);
}
/* On these chipsets we can only modify the base/size/stride
@@ -503,17 +515,24 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
return;
- if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
- intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) {
+ if (crtc_state->enable_psr2_su_region_et) {
+ u32 val = intel_cursor_position(crtc_state, plane_state,
+ true);
+ intel_de_write_fw(dev_priv, CURPOS_ERLY_TPT(pipe), val);
+ }
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
plane_state->ctl);
- else
+ } else {
i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+ }
}
/* TODO: split into noarm+arm pair */
@@ -536,7 +555,7 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
+ pos = intel_cursor_position(crtc_state, plane_state, false);
}
/*
@@ -647,12 +666,14 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
{
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->base.state);
struct intel_plane_state *new_plane_state;
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_crtc_state *new_crtc_state;
+ struct intel_vblank_evade_ctx evade;
int ret;
/*
@@ -745,13 +766,25 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- /*
- * Technically we should do a vblank evasion here to make
- * sure all the cursor registers update on the same frame.
- * For now just make sure the register writes happen as
- * quickly as possible to minimize the race window.
- */
- local_irq_disable();
+ intel_vblank_evade_init(crtc_state, crtc_state, &evade);
+
+ intel_psr_lock(crtc_state);
+
+ if (!drm_WARN_ON(&i915->drm, drm_crtc_vblank_get(&crtc->base))) {
+ /*
+ * TODO: maybe check if we're still in PSR
+ * and skip the vblank evasion entirely?
+ */
+ intel_psr_wait_for_idle_locked(crtc_state);
+
+ local_irq_disable();
+
+ intel_vblank_evade(&evade);
+
+ drm_crtc_vblank_put(&crtc->base);
+ } else {
+ local_irq_disable();
+ }
if (new_plane_state->uapi.visible) {
intel_plane_update_noarm(plane, crtc_state, new_plane_state);
@@ -762,6 +795,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
local_irq_enable();
+ intel_psr_unlock(crtc_state);
+
intel_plane_unpin_fb(old_plane_state);
out_free:
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 6b25e195232f..288a00e083c8 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -78,7 +78,7 @@ static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder)
for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane)
intel_de_rmw(i915,
- XELPDP_PORT_MSGBUS_TIMER(encoder->port, lane),
+ XELPDP_PORT_MSGBUS_TIMER(i915, encoder->port, lane),
XELPDP_PORT_MSGBUS_TIMER_VAL_MASK,
XELPDP_PORT_MSGBUS_TIMER_VAL);
}
@@ -117,7 +117,7 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
enum port port, int lane)
{
- intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+ intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
@@ -125,10 +125,10 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i
{
enum phy phy = intel_port_to_phy(i915, port);
- intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy));
@@ -144,7 +144,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
enum phy phy = intel_port_to_phy(i915, port);
if (__intel_de_wait_for_register(i915,
- XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+ XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
XELPDP_PORT_P2M_RESPONSE_READY,
XELPDP_PORT_P2M_RESPONSE_READY,
XELPDP_MSGBUS_TIMEOUT_FAST_US,
@@ -152,7 +152,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
- if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(port, lane)) &
+ if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(i915, port, lane)) &
XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT))
drm_dbg_kms(&i915->drm,
"PHY %c Hardware did not detect a timeout\n",
@@ -186,7 +186,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
int ack;
u32 val;
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@@ -195,7 +195,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
return -ETIMEDOUT;
}
- intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
XELPDP_PORT_M2P_COMMAND_READ |
XELPDP_PORT_M2P_ADDRESS(addr));
@@ -253,7 +253,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
int ack;
u32 val;
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@@ -262,14 +262,14 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
return -ETIMEDOUT;
}
- intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
(committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
XELPDP_PORT_M2P_DATA(data) |
XELPDP_PORT_M2P_ADDRESS(addr));
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@@ -282,7 +282,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
if (ack < 0)
return ack;
- } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) &
+ } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) &
XELPDP_PORT_P2M_ERROR_SET)) {
drm_dbg_kms(&i915->drm,
"PHY %c Error occurred during write command.\n", phy_name(phy));
@@ -2096,13 +2096,54 @@ int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
return intel_c20pll_calc_state(crtc_state, encoder);
}
-static bool intel_c20_use_mplla(u32 clock)
+static bool intel_c20phy_use_mpllb(const struct intel_c20pll_state *state)
{
- /* 10G and 20G rates use MPLLA */
- if (clock == 1000000 || clock == 2000000)
- return true;
+ return state->tx[0] & C20_PHY_USE_MPLLB;
+}
- return false;
+static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c20pll_state *pll_state)
+{
+ unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
+ unsigned int multiplier, refclk = 38400;
+ unsigned int tx_clk_div;
+ unsigned int ref_clk_mpllb_div;
+ unsigned int fb_clk_div4_en;
+ unsigned int ref, vco;
+ unsigned int tx_rate_mult;
+ unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
+
+ if (intel_c20phy_use_mpllb(pll_state)) {
+ tx_rate_mult = 1;
+ frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
+ frac_quot = pll_state->mpllb[8];
+ frac_rem = pll_state->mpllb[9];
+ frac_den = pll_state->mpllb[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
+ fb_clk_div4_en = 0;
+ } else {
+ tx_rate_mult = 2;
+ frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
+ frac_quot = pll_state->mplla[8];
+ frac_rem = pll_state->mplla[9];
+ frac_den = pll_state->mplla[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
+ fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
+ }
+
+ if (frac_en)
+ frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
+ else
+ frac = 0;
+
+ ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
+ vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
+
+ return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
}
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
@@ -2138,7 +2179,7 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
PHY_C20_A_CMN_CNTX_CFG(i));
}
- if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
+ if (intel_c20phy_use_mpllb(pll_state)) {
/* MPLLB configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
@@ -2160,6 +2201,8 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
}
}
+ pll_state->clock = intel_c20pll_calc_port_clock(encoder, pll_state);
+
intel_cx0_phy_transaction_end(encoder, wakeref);
}
@@ -2174,12 +2217,12 @@ void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n",
hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]);
- if (intel_c20_use_mplla(hw_state->clock)) {
- for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++)
- drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]);
- } else {
+ if (intel_c20phy_use_mpllb(hw_state)) {
for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++)
drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++)
+ drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]);
}
}
@@ -2326,27 +2369,27 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
}
/* 3.3 mpllb or mplla configuration */
- if (intel_c20_use_mplla(clock)) {
- for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
+ if (intel_c20phy_use_mpllb(pll_state)) {
+ for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
- PHY_C20_A_MPLLA_CNTX_CFG(i),
- pll_state->mplla[i]);
+ PHY_C20_A_MPLLB_CNTX_CFG(i),
+ pll_state->mpllb[i]);
else
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
- PHY_C20_B_MPLLA_CNTX_CFG(i),
- pll_state->mplla[i]);
+ PHY_C20_B_MPLLB_CNTX_CFG(i),
+ pll_state->mpllb[i]);
}
} else {
- for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
+ for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
- PHY_C20_A_MPLLB_CNTX_CFG(i),
- pll_state->mpllb[i]);
+ PHY_C20_A_MPLLA_CNTX_CFG(i),
+ pll_state->mplla[i]);
else
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
- PHY_C20_B_MPLLB_CNTX_CFG(i),
- pll_state->mpllb[i]);
+ PHY_C20_B_MPLLA_CNTX_CFG(i),
+ pll_state->mplla[i]);
}
}
@@ -2408,51 +2451,6 @@ static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
return tmpclk;
}
-static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state)
-{
- unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
- unsigned int multiplier, refclk = 38400;
- unsigned int tx_clk_div;
- unsigned int ref_clk_mpllb_div;
- unsigned int fb_clk_div4_en;
- unsigned int ref, vco;
- unsigned int tx_rate_mult;
- unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
-
- if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
- tx_rate_mult = 1;
- frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
- frac_quot = pll_state->mpllb[8];
- frac_rem = pll_state->mpllb[9];
- frac_den = pll_state->mpllb[7];
- multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
- tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
- ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
- fb_clk_div4_en = 0;
- } else {
- tx_rate_mult = 2;
- frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
- frac_quot = pll_state->mplla[8];
- frac_rem = pll_state->mplla[9];
- frac_den = pll_state->mplla[7];
- multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
- tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
- ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
- fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
- }
-
- if (frac_en)
- frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
- else
- frac = 0;
-
- ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
- vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
-
- return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
-}
-
static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool lane_reversal)
@@ -2460,7 +2458,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 val = 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL,
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
+ XELPDP_PORT_REVERSAL,
lane_reversal ? XELPDP_PORT_REVERSAL : 0);
if (lane_reversal)
@@ -2481,7 +2480,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
else
val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
XELPDP_SSC_ENABLE_PLLB, val);
@@ -2514,15 +2513,16 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
u8 lane_mask, u8 state)
{
enum phy phy = intel_port_to_phy(i915, port);
+ i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port);
int lane;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ intel_de_rmw(i915, buf_ctl2_reg,
intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK),
intel_cx0_get_powerdown_state(lane_mask, state));
/* Wait for pending transactions.*/
for_each_cx0_lane_in_mask(lane_mask, lane)
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@@ -2531,12 +2531,12 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
intel_cx0_bus_reset(i915, port, lane);
}
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ intel_de_rmw(i915, buf_ctl2_reg,
intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES),
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
+ if (__intel_de_wait_for_register(i915, buf_ctl2_reg,
intel_cx0_get_powerdown_update(lane_mask), 0,
XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
@@ -2545,10 +2545,10 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
{
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port),
XELPDP_POWER_STATE_READY_MASK,
XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port),
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(i915, port),
XELPDP_POWER_STATE_ACTIVE_MASK |
XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
@@ -2593,27 +2593,27 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(i915, port),
XELPDP_PORT_BUF_SOC_PHY_READY,
XELPDP_PORT_BUF_SOC_PHY_READY,
XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset,
lane_pipe_reset);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(i915, port),
lane_phy_current_status, lane_phy_current_status,
XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
intel_cx0_get_pclk_refclk_ack(lane_mask),
XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
@@ -2624,9 +2624,10 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
CX0_P2_STATE_RESET);
intel_cx0_setup_powerdown(i915, port);
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset, 0);
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0);
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port), lane_phy_current_status,
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(i915, port),
+ lane_phy_current_status,
XELPDP_PORT_RESET_END_TIMEOUT))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n",
phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
@@ -2761,12 +2762,12 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
* 9. Set PORT_CLOCK_CTL register PCLK PLL Request
* LN<Lane for maxPCLK> to "1" to enable PLL.
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_ack(maxpclk_lane),
XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
@@ -2786,7 +2787,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 clock;
- u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
+ u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
@@ -2839,11 +2840,11 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
*/
val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock));
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val);
/* 2. Read back PORT_CLOCK_CTL REGISTER */
- val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
+ val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
/*
* 3. Follow the Display Voltage Frequency Switching - Sequence
@@ -2854,10 +2855,10 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
* 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL.
*/
val |= XELPDP_TBT_CLOCK_REQUEST;
- intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), val);
+ intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_TBT_CLOCK_ACK,
XELPDP_TBT_CLOCK_ACK,
100, 0, NULL))
@@ -2909,7 +2910,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
* 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK>
* to "0" to disable PLL.
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) |
intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0);
@@ -2919,7 +2920,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
@@ -2932,9 +2933,9 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
*/
/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK, 0);
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_FORWARD_CLOCK_UNGATE, 0);
intel_cx0_phy_transaction_end(encoder, wakeref);
@@ -2953,11 +2954,11 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
/*
* 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL.
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
@@ -2970,7 +2971,7 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
/*
* 5. Program PORT CLOCK CTRL register to disable and gate clocks
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK |
XELPDP_FORWARD_CLOCK_UNGATE, 0);
@@ -2997,7 +2998,7 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
* TODO: Determine the PLL type from the SW state, once MTL PLL
* handling is done via the standard shared DPLL framework.
*/
- u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
+ u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
@@ -3016,6 +3017,9 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
int i;
+ if (intel_crtc_needs_fastset(state))
+ return;
+
for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
u8 expected = mpllb_sw_state->pll[i];
@@ -3067,10 +3071,15 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
- bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB;
- bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB;
+ bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state);
+ bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state);
int i;
+ I915_STATE_WARN(i915, mpll_hw_state->clock != mpll_sw_state->clock,
+ "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ mpll_sw_state->clock, mpll_hw_state->clock);
+
I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
"[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
crtc->base.base.id, crtc->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index adf8f4ce0d49..bdd0c8c4ef97 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -7,16 +7,39 @@
#define __INTEL_CX0_PHY_REGS_H__
#include "i915_reg_defs.h"
+#include "intel_display_limits.h"
+
+/*
+ * Wrapper macro to convert from port number to the index used in some of the
+ * registers. For Display version 20 and above it converts the port number to a
+ * single range, starting with the TC offsets. When used together with
+ * _PICK_EVEN_2RANGES(idx, PORT_TC1, ...), this single range will be the second
+ * range. Example:
+ *
+ * PORT_TC1 -> PORT_TC1
+ * PORT_TC2 -> PORT_TC2
+ * PORT_TC3 -> PORT_TC3
+ * PORT_TC4 -> PORT_TC4
+ * PORT_A -> PORT_TC4 + 1
+ * PORT_B -> PORT_TC4 + 2
+ * ...
+ */
+#define __xe2lpd_port_idx(port) \
+ (port >= PORT_TC1 ? port : PORT_TC4 + 1 + port - PORT_A)
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A 0x64040
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B 0x64140
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1 0x16F240
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2 0x16F440
-#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_M2P_MSGBUS_CTL(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4)
+#define XELPDP_PORT_M2P_MSGBUS_CTL(i915__, port, lane) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_M2P_MSGBUS_CTL(__xe2lpd_port_idx(port), lane) : \
+ _XELPDP_PORT_M2P_MSGBUS_CTL(port, lane))
#define XELPDP_PORT_M2P_TRANSACTION_PENDING REG_BIT(31)
#define XELPDP_PORT_M2P_COMMAND_TYPE_MASK REG_GENMASK(30, 27)
#define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
@@ -27,11 +50,16 @@
#define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15)
#define XELPDP_PORT_M2P_ADDRESS_MASK REG_GENMASK(11, 0)
#define XELPDP_PORT_M2P_ADDRESS(val) REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val)
-#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+
+#define _XELPDP_PORT_P2M_MSGBUS_STATUS(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4 + 8)
+#define XELPDP_PORT_P2M_MSGBUS_STATUS(i915__, port, lane) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_P2M_MSGBUS_STATUS(__xe2lpd_port_idx(port), lane) : \
+ _XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane))
#define XELPDP_PORT_P2M_RESPONSE_READY REG_BIT(31)
#define XELPDP_PORT_P2M_COMMAND_TYPE_MASK REG_GENMASK(30, 27)
#define XELPDP_PORT_P2M_COMMAND_READ_ACK 0x4
@@ -54,11 +82,15 @@
#define _XELPDP_PORT_BUF_CTL1_LN0_B 0x64104
#define _XELPDP_PORT_BUF_CTL1_LN0_USBC1 0x16F200
#define _XELPDP_PORT_BUF_CTL1_LN0_USBC2 0x16F400
-#define XELPDP_PORT_BUF_CTL1(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_BUF_CTL1(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
_XELPDP_PORT_BUF_CTL1_LN0_B, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC2))
+#define XELPDP_PORT_BUF_CTL1(i915__, port) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_BUF_CTL1(__xe2lpd_port_idx(port)) : \
+ _XELPDP_PORT_BUF_CTL1(port))
#define XELPDP_PORT_BUF_D2D_LINK_ENABLE REG_BIT(29)
#define XELPDP_PORT_BUF_D2D_LINK_STATE REG_BIT(28)
#define XELPDP_PORT_BUF_SOC_PHY_READY REG_BIT(24)
@@ -75,12 +107,15 @@
#define XELPDP_PORT_WIDTH_MASK REG_GENMASK(3, 1)
#define XELPDP_PORT_WIDTH(val) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val)
-#define XELPDP_PORT_BUF_CTL2(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_BUF_CTL2(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
_XELPDP_PORT_BUF_CTL1_LN0_B, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 4)
-
+#define XELPDP_PORT_BUF_CTL2(i915__, port) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_BUF_CTL2(__xe2lpd_port_idx(port)) : \
+ _XELPDP_PORT_BUF_CTL2(port))
#define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30))
#define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28))
#define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24))
@@ -95,11 +130,15 @@
#define XELPDP_POWER_STATE_READY_MASK REG_GENMASK(7, 4)
#define XELPDP_POWER_STATE_READY(val) REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val)
-#define XELPDP_PORT_BUF_CTL3(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_BUF_CTL3(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
_XELPDP_PORT_BUF_CTL1_LN0_B, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 8)
+#define XELPDP_PORT_BUF_CTL3(i915__, port) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_BUF_CTL3(__xe2lpd_port_idx(port)) : \
+ _XELPDP_PORT_BUF_CTL3(port))
#define XELPDP_PLL_LANE_STAGGERING_DELAY_MASK REG_GENMASK(15, 8)
#define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
#define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0)
@@ -114,11 +153,15 @@
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_B 0x641d8
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC1 0x16f258
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC2 0x16f458
-#define XELPDP_PORT_MSGBUS_TIMER(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_MSGBUS_TIMER(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_A, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_B, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_USBC1, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_USBC2) + (lane) * 4)
+#define XELPDP_PORT_MSGBUS_TIMER(i915__, port, lane) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_MSGBUS_TIMER(__xe2lpd_port_idx(port), lane) : \
+ _XELPDP_PORT_MSGBUS_TIMER(port, lane))
#define XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT REG_BIT(31)
#define XELPDP_PORT_MSGBUS_TIMER_VAL_MASK REG_GENMASK(23, 0)
#define XELPDP_PORT_MSGBUS_TIMER_VAL REG_FIELD_PREP(XELPDP_PORT_MSGBUS_TIMER_VAL_MASK, 0xa000)
@@ -127,11 +170,15 @@
#define _XELPDP_PORT_CLOCK_CTL_B 0x641E0
#define _XELPDP_PORT_CLOCK_CTL_USBC1 0x16F260
#define _XELPDP_PORT_CLOCK_CTL_USBC2 0x16F460
-#define XELPDP_PORT_CLOCK_CTL(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_CLOCK_CTL(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_CLOCK_CTL_A, \
_XELPDP_PORT_CLOCK_CTL_B, \
_XELPDP_PORT_CLOCK_CTL_USBC1, \
_XELPDP_PORT_CLOCK_CTL_USBC2))
+#define XELPDP_PORT_CLOCK_CTL(i915__, port) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_CLOCK_CTL(__xe2lpd_port_idx(port)) : \
+ _XELPDP_PORT_CLOCK_CTL(port))
#define XELPDP_LANE_PCLK_PLL_REQUEST(lane) REG_BIT(31 - ((lane) * 4))
#define XELPDP_LANE_PCLK_PLL_ACK(lane) REG_BIT(30 - ((lane) * 4))
#define XELPDP_LANE_PCLK_REFCLK_REQUEST(lane) REG_BIT(29 - ((lane) * 4))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 12a29363e5df..bea441590204 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -178,7 +178,7 @@ static void mtl_wait_ddi_buf_idle(struct drm_i915_private *i915, enum port port)
int ret;
/* FIXME: find out why Bspec's 100us timeout is too short */
- ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) &
+ ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port)) &
XELPDP_PORT_BUF_PHY_IDLE), 10000);
if (ret)
drm_err(&i915->drm, "Timeout waiting for DDI BUF %c to get idle\n",
@@ -226,7 +226,9 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
}
if (DISPLAY_VER(dev_priv) >= 14)
- ret = _wait_for(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_PHY_IDLE),
+ ret = _wait_for(!(intel_de_read(dev_priv,
+ XELPDP_PORT_BUF_CTL1(dev_priv, port)) &
+ XELPDP_PORT_BUF_PHY_IDLE),
timeout_us, 10, 10);
else
ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE),
@@ -2429,13 +2431,22 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ i915_reg_t reg;
+ u32 set_bits, wait_bits;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), 0,
- XELPDP_PORT_BUF_D2D_LINK_ENABLE);
+ if (DISPLAY_VER(dev_priv) >= 20) {
+ reg = DDI_BUF_CTL(port);
+ set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
+ wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
+ } else {
+ reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
+ wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
+ }
- if (wait_for_us((intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) &
- XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) {
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for PORT_BUF_CTL %c\n",
+ intel_de_rmw(dev_priv, reg, 0, set_bits);
+ if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) {
+ drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
}
@@ -2448,7 +2459,7 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 val;
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
+ val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port));
val &= ~XELPDP_PORT_WIDTH_MASK;
val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count));
@@ -2461,7 +2472,7 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
val |= XELPDP_PORT_REVERSAL;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
+ intel_de_write(i915, XELPDP_PORT_BUF_CTL1(i915, port), val);
}
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
@@ -2472,7 +2483,7 @@ static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, val);
}
@@ -2898,13 +2909,22 @@ mtl_ddi_disable_d2d_link(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ i915_reg_t reg;
+ u32 clr_bits, wait_bits;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port),
- XELPDP_PORT_BUF_D2D_LINK_ENABLE, 0);
+ if (DISPLAY_VER(dev_priv) >= 20) {
+ reg = DDI_BUF_CTL(port);
+ clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
+ wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
+ } else {
+ reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
+ wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
+ }
- if (wait_for_us(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) &
- XELPDP_PORT_BUF_D2D_LINK_STATE), 100))
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for PORT_BUF_CTL %c\n",
+ intel_de_rmw(dev_priv, reg, clr_bits, 0);
+ if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100))
+ drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3038,7 +3058,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
/* De-select Thunderbolt */
if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(encoder->port),
+ intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
@@ -3319,10 +3339,13 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
port_buf |= XELPDP_PORT_REVERSAL;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port),
+ intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
buf_ctl |= DDI_PORT_WIDTH(lane_count);
+
+ if (DISPLAY_VER(dev_priv) >= 20)
+ buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
} else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -3543,6 +3566,9 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
/* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ if (DISPLAY_VER(dev_priv) >= 20)
+ intel_dp->DP |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
+
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
@@ -3941,11 +3967,11 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
+ intel_psr_get_config(encoder, pipe_config);
+
intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
- intel_psr_get_config(encoder, pipe_config);
-
intel_audio_codec_get_config(encoder, pipe_config);
}
@@ -5117,6 +5143,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
encoder->suspend_complete = intel_ddi_tc_encoder_suspend_complete;
encoder->shutdown_complete = intel_ddi_tc_encoder_shutdown_complete;
+ dig_port->lock = intel_tc_port_lock;
+ dig_port->unlock = intel_tc_port_unlock;
+
if (intel_tc_port_init(dig_port, is_legacy) < 0)
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index b10aad15a63d..7db0655d8c9e 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -104,6 +104,7 @@
#include "intel_pmdemand.h"
#include "intel_pps.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "intel_sdvo.h"
#include "intel_snps_phy.h"
#include "intel_tc.h"
@@ -2706,6 +2707,15 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
*/
intel_de_write(dev_priv, PIPESRC(pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
+
+ if (!crtc_state->enable_psr2_su_region_et)
+ return;
+
+ width = drm_rect_width(&crtc_state->psr2_su_area);
+ height = drm_rect_height(&crtc_state->psr2_su_area);
+
+ intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
+ PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -4764,7 +4774,11 @@ static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
- return memcmp(a, b, sizeof(*a)) == 0;
+ return a->pixelformat == b->pixelformat &&
+ a->colorimetry == b->colorimetry &&
+ a->bpc == b->bpc &&
+ a->dynamic_range == b->dynamic_range &&
+ a->content_type == b->content_type;
}
static bool
@@ -4799,28 +4813,27 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
}
static void
-pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
+pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915,
bool fastset, const char *name,
const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
+ struct drm_printer p;
+
if (fastset) {
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
- drm_dbg_kms(&dev_priv->drm,
- "fastset requirement not met in %s dp sdp\n", name);
- drm_dbg_kms(&dev_priv->drm, "expected:\n");
- drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
- drm_dbg_kms(&dev_priv->drm, "found:\n");
- drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name);
} else {
- drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
- drm_err(&dev_priv->drm, "expected:\n");
- drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
- drm_err(&dev_priv->drm, "found:\n");
- drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
+ p = drm_err_printer(&i915->drm, NULL);
+
+ drm_printf(&p, "mismatch in %s dp sdp\n", name);
}
+
+ drm_printf(&p, "expected:\n");
+ drm_dp_vsc_sdp_log(&p, a);
+ drm_printf(&p, "found:\n");
+ drm_dp_vsc_sdp_log(&p, b);
}
/* Returns the length up to and including the last differing byte */
@@ -5045,8 +5058,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} while (0)
#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
- if (!current_config->has_psr && !pipe_config->has_psr && \
- !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
+ if (!intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
&current_config->infoframes.name, \
@@ -5199,13 +5211,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_CSC(csc);
PIPE_CONF_CHECK_CSC(output_csc);
-
- if (current_config->active_planes) {
- PIPE_CONF_CHECK_BOOL(has_psr);
- PIPE_CONF_CHECK_BOOL(has_psr2);
- PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
- PIPE_CONF_CHECK_I(dc3co_exitline);
- }
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -6307,6 +6312,9 @@ int intel_atomic_check(struct drm_device *dev,
int ret, i;
bool any_ms = false;
+ if (!intel_display_driver_check_access(dev_priv))
+ return -ENODEV;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
/*
@@ -7068,6 +7076,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
+ intel_atomic_global_state_wait_for_dependencies(state);
/*
* During full modesets we write a lot of registers, wait
@@ -7244,6 +7253,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
+ intel_atomic_global_state_commit_done(state);
if (state->modeset) {
/* As one of the primary mmio accessors, KMS has a high
@@ -7294,6 +7304,38 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
plane->frontbuffer_bit);
}
+static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock)
+{
+ int ret;
+
+ ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_global_state_setup_commit(state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int intel_atomic_swap_state(struct intel_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_swap_state(&state->base, true);
+ if (ret)
+ return ret;
+
+ intel_atomic_swap_global_state(state);
+
+ intel_shared_dpll_swap_state(state);
+
+ intel_atomic_track_fbs(state);
+
+ return 0;
+}
+
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
bool nonblock)
{
@@ -7339,11 +7381,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
return ret;
}
- ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
+ ret = intel_atomic_setup_commit(state, nonblock);
if (!ret)
- ret = drm_atomic_helper_swap_state(&state->base, true);
- if (!ret)
- intel_atomic_swap_global_state(state);
+ ret = intel_atomic_swap_state(state);
if (ret) {
struct intel_crtc_state *new_crtc_state;
@@ -7357,8 +7397,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
- intel_shared_dpll_swap_state(state);
- intel_atomic_track_fbs(state);
drm_atomic_state_get(&state->base);
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
@@ -7811,6 +7849,7 @@ static const struct intel_display_funcs skl_display_funcs = {
.crtc_disable = hsw_crtc_disable,
.commit_modeset_enables = skl_commit_modeset_enables,
.get_initial_plane_config = skl_get_initial_plane_config,
+ .fixup_initial_plane_config = skl_fixup_initial_plane_config,
};
static const struct intel_display_funcs ddi_display_funcs = {
@@ -7819,6 +7858,7 @@ static const struct intel_display_funcs ddi_display_funcs = {
.crtc_disable = hsw_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
+ .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs pch_split_display_funcs = {
@@ -7827,6 +7867,7 @@ static const struct intel_display_funcs pch_split_display_funcs = {
.crtc_disable = ilk_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
+ .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs vlv_display_funcs = {
@@ -7835,6 +7876,7 @@ static const struct intel_display_funcs vlv_display_funcs = {
.crtc_disable = i9xx_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
+ .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs i9xx_display_funcs = {
@@ -7843,6 +7885,7 @@ static const struct intel_display_funcs i9xx_display_funcs = {
.crtc_disable = i9xx_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
+ .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
/**
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 47297ed85822..fdeaac994e17 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -28,6 +28,8 @@
#include "intel_opregion.h"
#include "intel_wm_types.h"
+struct task_struct;
+
struct drm_i915_private;
struct drm_property;
struct drm_property_blob;
@@ -47,6 +49,7 @@ struct intel_fbdev;
struct intel_fdi_funcs;
struct intel_hotplug_funcs;
struct intel_initial_plane_config;
+struct intel_opregion;
struct intel_overlay;
/* Amount of SAGV/QGV points, BSpec precisely defines this */
@@ -64,6 +67,8 @@ struct intel_display_funcs {
struct intel_crtc_state *);
void (*get_initial_plane_config)(struct intel_crtc *,
struct intel_initial_plane_config *);
+ bool (*fixup_initial_plane_config)(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config);
void (*crtc_enable)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*crtc_disable)(struct intel_atomic_state *state,
@@ -172,6 +177,12 @@ struct intel_hotplug {
struct work_struct poll_init_work;
bool poll_enabled;
+ /*
+ * Queuing of hotplug_work, reenable_work and poll_init_work is
+ * enabled. Protected by drm_i915_private::irq_lock.
+ */
+ bool detection_work_enabled;
+
unsigned int hpd_storm_threshold;
/* Whether or not to count short HPD IRQs in HPD storms */
u8 hpd_short_storm_enabled;
@@ -299,6 +310,11 @@ struct intel_display {
} funcs;
struct {
+ bool any_task_allowed;
+ struct task_struct *allowed_task;
+ } access;
+
+ struct {
/* backlight registers and fields in struct intel_panel */
struct mutex lock;
} backlight;
@@ -513,7 +529,7 @@ struct intel_display {
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
struct intel_hotplug hotplug;
- struct intel_opregion opregion;
+ struct intel_opregion *opregion;
struct intel_overlay *overlay;
struct intel_display_params params;
struct intel_vbt_data vbt;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index d951edb36687..6f2d13c8ccf7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -86,28 +86,6 @@ static int i915_sr_status(struct seq_file *m, void *unused)
return 0;
}
-static int i915_opregion(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_opregion *opregion = &i915->display.opregion;
-
- if (opregion->header)
- seq_write(m, opregion->header, OPREGION_SIZE);
-
- return 0;
-}
-
-static int i915_vbt(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_opregion *opregion = &i915->display.opregion;
-
- if (opregion->vbt)
- seq_write(m, opregion->vbt, opregion->vbt_size);
-
- return 0;
-}
-
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1066,8 +1044,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_sr_status", i915_sr_status, 0},
- {"i915_opregion", i915_opregion, 0},
- {"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
@@ -1105,10 +1081,12 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
+ intel_bios_debugfs_register(i915);
intel_cdclk_debugfs_register(i915);
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
intel_hpd_debugfs_register(i915);
+ intel_opregion_debugfs_register(i915);
intel_psr_debugfs_register(i915);
intel_wm_debugfs_register(i915);
intel_display_debugfs_params(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index b7e68eb62452..f35718748555 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 0b522c6a8d6f..c02d79b50006 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1012,7 +1012,7 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
goto display_fused_off;
}
- if (IS_GRAPHICS_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ if (IS_DISPLAY_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
u32 fuse_strap = intel_de_read(i915, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 9df9097a0255..4f7ba7eb03d2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -45,6 +45,7 @@
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_hti.h"
+#include "intel_modeset_lock.h"
#include "intel_modeset_setup.h"
#include "intel_opregion.h"
#include "intel_overlay.h"
@@ -276,12 +277,144 @@ cleanup_bios:
return ret;
}
+static void set_display_access(struct drm_i915_private *i915,
+ bool any_task_allowed,
+ struct task_struct *allowed_task)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ int err;
+
+ intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
+ err = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
+ if (err)
+ continue;
+
+ i915->display.access.any_task_allowed = any_task_allowed;
+ i915->display.access.allowed_task = allowed_task;
+ }
+
+ drm_WARN_ON(&i915->drm, err);
+}
+
+/**
+ * intel_display_driver_enable_user_access - Enable display HW access for all threads
+ * @i915: i915 device instance
+ *
+ * Enable the display HW access for all threads. Examples for such accesses
+ * are modeset commits and connector probing.
+ *
+ * This function should be called during driver loading and system resume once
+ * all the HW initialization steps are done.
+ */
+void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
+{
+ set_display_access(i915, true, NULL);
+
+ intel_hpd_enable_detection_work(i915);
+}
+
+/**
+ * intel_display_driver_disable_user_access - Disable display HW access for user threads
+ * @i915: i915 device instance
+ *
+ * Disable the display HW access for user threads. Examples for such accesses
+ * are modeset commits and connector probing. For the current thread the
+ * access is still enabled, which should only perform HW init/deinit
+ * programming (as the initial modeset during driver loading or the disabling
+ * modeset during driver unloading and system suspend/shutdown). This function
+ * should be followed by calling either intel_display_driver_enable_user_access()
+ * after completing the HW init programming or
+ * intel_display_driver_suspend_access() after completing the HW deinit
+ * programming.
+ *
+ * This function should be called during driver loading/unloading and system
+ * suspend/shutdown before starting the HW init/deinit programming.
+ */
+void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
+{
+ intel_hpd_disable_detection_work(i915);
+
+ set_display_access(i915, false, current);
+}
+
+/**
+ * intel_display_driver_suspend_access - Suspend display HW access for all threads
+ * @i915: i915 device instance
+ *
+ * Disable the display HW access for all threads. Examples for such accesses
+ * are modeset commits and connector probing. This call should be either
+ * followed by calling intel_display_driver_resume_access(), or the driver
+ * should be unloaded/shutdown.
+ *
+ * This function should be called during driver unloading and system
+ * suspend/shutdown after completing the HW deinit programming.
+ */
+void intel_display_driver_suspend_access(struct drm_i915_private *i915)
+{
+ set_display_access(i915, false, NULL);
+}
+
+/**
+ * intel_display_driver_resume_access - Resume display HW access for the resume thread
+ * @i915: i915 device instance
+ *
+ * Enable the display HW access for the current resume thread, keeping the
+ * access disabled for all other (user) threads. Examples for such accesses
+ * are modeset commits and connector probing. The resume thread should only
+ * perform HW init programming (as the restoring modeset). This function
+ * should be followed by calling intel_display_driver_enable_user_access(),
+ * after completing the HW init programming steps.
+ *
+ * This function should be called during system resume before starting the HW
+ * init steps.
+ */
+void intel_display_driver_resume_access(struct drm_i915_private *i915)
+{
+ set_display_access(i915, false, current);
+}
+
+/**
+ * intel_display_driver_check_access - Check if the current thread has disaplay HW access
+ * @i915: i915 device instance
+ *
+ * Check whether the current thread has display HW access, print a debug
+ * message if it doesn't. Such accesses are modeset commits and connector
+ * probing. If the function returns %false any HW access should be prevented.
+ *
+ * Returns %true if the current thread has display HW access, %false
+ * otherwise.
+ */
+bool intel_display_driver_check_access(struct drm_i915_private *i915)
+{
+ char comm[TASK_COMM_LEN];
+ char current_task[TASK_COMM_LEN + 16];
+ char allowed_task[TASK_COMM_LEN + 16] = "none";
+
+ if (i915->display.access.any_task_allowed ||
+ i915->display.access.allowed_task == current)
+ return true;
+
+ snprintf(current_task, sizeof(current_task), "%s[%d]",
+ get_task_comm(comm, current),
+ task_pid_vnr(current));
+
+ if (i915->display.access.allowed_task)
+ snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
+ get_task_comm(comm, i915->display.access.allowed_task),
+ task_pid_vnr(i915->display.access.allowed_task));
+
+ drm_dbg_kms(&i915->drm,
+ "Reject display access from task %s (allowed to %s)\n",
+ current_task, allowed_task);
+
+ return false;
+}
+
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
{
struct drm_device *dev = &i915->drm;
enum pipe pipe;
- struct intel_crtc *crtc;
int ret;
if (!HAS_DISPLAY(i915))
@@ -315,8 +448,6 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_display_driver_init_hw(i915);
intel_dpll_update_ref_clks(i915);
- intel_hdcp_component_init(i915);
-
if (i915->display.cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(i915);
@@ -326,16 +457,14 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_vga_disable(i915);
intel_setup_outputs(i915);
+ intel_display_driver_disable_user_access(i915);
+
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(i915);
drm_modeset_unlock_all(dev);
- for_each_intel_crtc(dev, crtc) {
- if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
- continue;
- intel_crtc_initial_plane_config(crtc);
- }
+ intel_initial_plane_config(i915);
/*
* Make sure hardware watermarks really match the state we read out.
@@ -357,6 +486,13 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
return 0;
/*
+ * This will bind stuff into ggtt, so it needs to be done after
+ * the BIOS fb takeover and whatever else magic ggtt reservations
+ * happen during gem/ggtt init.
+ */
+ intel_hdcp_component_init(i915);
+
+ /*
* Force all active planes to recompute their states. So that on
* mode_setcrtc after probe, all the intel_plane_state variables
* are already calculated and there is no assert_plane warnings
@@ -374,7 +510,6 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
- intel_hpd_poll_disable(i915);
skl_watermark_ipc_init(i915);
@@ -383,7 +518,8 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
void intel_display_driver_register(struct drm_i915_private *i915)
{
- struct drm_printer p = drm_debug_printer("i915 display info:");
+ struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS,
+ "i915 display info:");
if (!HAS_DISPLAY(i915))
return;
@@ -394,6 +530,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_audio_init(i915);
+ intel_display_driver_enable_user_access(i915);
+
intel_display_debugfs_register(i915);
/*
@@ -412,6 +550,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* fbdev->async_cookie.
*/
drm_kms_helper_poll_init(&i915->drm);
+ intel_hpd_poll_disable(i915);
intel_display_device_info_print(DISPLAY_INFO(i915),
DISPLAY_RUNTIME_INFO(i915), &p);
@@ -440,6 +579,8 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
+ intel_display_driver_suspend_access(i915);
+
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -486,14 +627,17 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
return;
intel_fbdev_unregister(i915);
- intel_audio_deinit(i915);
-
/*
* After flushing the fbdev (incl. a late async config which
* will have delayed queuing of a hotplug event), then flush
* the hotplug events.
*/
drm_kms_helper_poll_fini(&i915->drm);
+
+ intel_display_driver_disable_user_access(i915);
+
+ intel_audio_deinit(i915);
+
drm_atomic_helper_shutdown(&i915->drm);
acpi_video_unregister();
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.h b/drivers/gpu/drm/i915/display/intel_display_driver.h
index c276a58ee329..42cc4af6d3fd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.h
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.h
@@ -32,5 +32,11 @@ int __intel_display_driver_resume(struct drm_i915_private *i915,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
+void intel_display_driver_enable_user_access(struct drm_i915_private *i915);
+void intel_display_driver_disable_user_access(struct drm_i915_private *i915);
+void intel_display_driver_suspend_access(struct drm_i915_private *i915);
+void intel_display_driver_resume_access(struct drm_i915_private *i915);
+bool intel_display_driver_check_access(struct drm_i915_private *i915);
+
#endif /* __INTEL_DISPLAY_DRIVER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index a7d8f3fc98de..f846c5b108b5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -266,12 +266,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
intel_uncore_posting_read(&dev_priv->uncore, reg);
}
-static bool i915_has_asle(struct drm_i915_private *dev_priv)
+static bool i915_has_asle(struct drm_i915_private *i915)
{
- if (!dev_priv->display.opregion.asle)
+ if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915))
return false;
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+ return intel_opregion_asle_present(i915);
}
/**
@@ -986,7 +986,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
* their flags both in the PICA and SDE IIR.
*/
if (*pch_iir & SDE_PICAINTERRUPT) {
- drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP);
+ drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
*pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
@@ -1587,7 +1587,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
struct intel_uncore *uncore = &i915->uncore;
u32 display_mask, extra_mask;
- if (GRAPHICS_VER(i915) >= 7) {
+ if (DISPLAY_VER(i915) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 3fdd8a517983..01eb6e4e6049 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -780,6 +780,8 @@ struct intel_plane_state {
struct intel_initial_plane_config {
struct intel_framebuffer *fb;
+ struct intel_memory_region *mem;
+ resource_size_t phys_base;
struct i915_vma *vma;
unsigned int tiling;
int size;
@@ -1213,12 +1215,12 @@ struct intel_crtc_state {
bool has_psr;
bool has_psr2;
bool enable_psr2_sel_fetch;
+ bool enable_psr2_su_region_et;
bool req_psr2_sdp_prior_scanline;
bool has_panel_replay;
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
- struct drm_dp_vsc_sdp psr_vsc;
/*
* Frequence the dpll for the port should run at. Differs from the
@@ -1402,6 +1404,8 @@ struct intel_crtc_state {
u32 psr2_man_track_ctl;
+ struct drm_rect psr2_su_area;
+
/* Variable Refresh Rate state */
struct {
bool enable, in_range;
@@ -1682,13 +1686,14 @@ struct intel_psr {
/* Mutex for PSR state of the transcoder */
struct mutex lock;
-#define I915_PSR_DEBUG_MODE_MASK 0x0f
-#define I915_PSR_DEBUG_DEFAULT 0x00
-#define I915_PSR_DEBUG_DISABLE 0x01
-#define I915_PSR_DEBUG_ENABLE 0x02
-#define I915_PSR_DEBUG_FORCE_PSR1 0x03
-#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
-#define I915_PSR_DEBUG_IRQ 0x10
+#define I915_PSR_DEBUG_MODE_MASK 0x0f
+#define I915_PSR_DEBUG_DEFAULT 0x00
+#define I915_PSR_DEBUG_DISABLE 0x01
+#define I915_PSR_DEBUG_ENABLE 0x02
+#define I915_PSR_DEBUG_FORCE_PSR1 0x03
+#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
+#define I915_PSR_DEBUG_IRQ 0x10
+#define I915_PSR_DEBUG_SU_REGION_ET_DISABLE 0x20
u32 debug;
bool sink_support;
@@ -1702,14 +1707,20 @@ struct intel_psr {
unsigned int busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
- bool colorimetry_support;
bool psr2_enabled;
bool psr2_sel_fetch_enabled;
bool psr2_sel_fetch_cff_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
- u8 io_wake_lines;
- u8 fast_wake_lines;
+
+ struct {
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
+
+ /* LNL and beyond */
+ u8 check_entry_lines;
+ } alpm_parameters;
+
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
@@ -1833,6 +1844,8 @@ struct intel_dp {
/* When we last wrote the OUI for eDP */
unsigned long last_oui_write;
+
+ bool colorimetry_support;
};
enum lspcon_vendor {
@@ -1890,6 +1903,9 @@ struct intel_digital_port {
u32 (*infoframes_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
bool (*connected)(struct intel_encoder *encoder);
+
+ void (*lock)(struct intel_digital_port *dig_port);
+ void (*unlock)(struct intel_digital_port *dig_port);
};
struct intel_dp_mst_encoder {
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index b70502586ab9..835781624482 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -1158,7 +1158,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
str_yes_no(intel_dmc_has_payload(i915)));
seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
seq_printf(m, "Pipe A fw needed: %s\n",
- str_yes_no(GRAPHICS_VER(i915) >= 12));
+ str_yes_no(DISPLAY_VER(i915) >= 12));
seq_printf(m, "Pipe A fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ae647d03af25..5045c34a16be 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -56,6 +56,7 @@
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -2619,58 +2620,38 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
+ struct drm_dp_vsc_sdp *vsc;
- /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
- if (crtc_state->has_psr)
+ if ((!intel_dp->colorimetry_support ||
+ !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) &&
+ !crtc_state->has_psr)
return;
- if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
- return;
+ vsc = &crtc_state->infoframes.vsc;
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
vsc->sdp_type = DP_SDP_VSC;
- intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
- &crtc_state->infoframes.vsc);
-}
-void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state,
- struct drm_dp_vsc_sdp *vsc)
-{
- vsc->sdp_type = DP_SDP_VSC;
-
- if (crtc_state->has_psr2) {
- if (intel_dp->psr.colorimetry_support &&
- intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
- /* [PSR2, +Colorimetry] */
- intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
- vsc);
- } else {
- /*
- * [PSR2, -Colorimetry]
- * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
- * 3D stereo + PSR/PSR2 + Y-coordinate.
- */
- vsc->revision = 0x4;
- vsc->length = 0xe;
- }
+ /* Needs colorimetry */
+ if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ vsc);
+ } else if (crtc_state->has_psr2) {
+ /*
+ * [PSR2 without colorimetry]
+ * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
+ * 3D stereo + PSR/PSR2 + Y-coordinate.
+ */
+ vsc->revision = 0x4;
+ vsc->length = 0xe;
} else if (crtc_state->has_panel_replay) {
- if (intel_dp->psr.colorimetry_support &&
- intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
- /* [Panel Replay with colorimetry info] */
- intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
- vsc);
- } else {
- /*
- * [Panel Replay without colorimetry info]
- * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
- * VSC SDP supporting 3D stereo + Panel Replay.
- */
- vsc->revision = 0x6;
- vsc->length = 0x10;
- }
+ /*
+ * [Panel Replay without colorimetry info]
+ * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
+ * VSC SDP supporting 3D stereo + Panel Replay.
+ */
+ vsc->revision = 0x6;
+ vsc->length = 0x10;
} else {
/*
* [PSR1]
@@ -3348,13 +3329,6 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
fastset = false;
}
- if (CAN_PSR(intel_dp)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute PSR state\n",
- encoder->base.base.id, encoder->base.name);
- crtc_state->uapi.mode_changed = true;
- fastset = false;
- }
-
return fastset;
}
@@ -4291,24 +4265,6 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
}
-void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_dp_vsc_sdp *vsc)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct dp_sdp sdp = {};
- ssize_t len;
-
- len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
-
- if (drm_WARN_ON(&dev_priv->drm, len < 0))
- return;
-
- dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
- &sdp, len);
-}
-
void intel_dp_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -4335,9 +4291,7 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
if (!enable)
return;
- /* When PSR is enabled, VSC SDP is handled by PSR routine */
- if (!crtc_state->has_psr)
- intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+ intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
}
@@ -4468,10 +4422,6 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
struct dp_sdp sdp = {};
int ret;
- /* When PSR is enabled, VSC SDP is handled by PSR routine */
- if (crtc_state->has_psr)
- return;
-
if ((crtc_state->infoframes.enable &
intel_hdmi_infoframe_enable(type)) == 0)
return;
@@ -4682,31 +4632,36 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
struct drm_dp_phy_test_params *data =
&intel_dp->compliance.test_data.phytest;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum pipe pipe = crtc->pipe;
u32 pattern_val;
switch (data->phy_pattern) {
- case DP_PHY_TEST_PATTERN_NONE:
+ case DP_LINK_QUAL_PATTERN_DISABLE:
drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+ if (DISPLAY_VER(dev_priv) >= 10)
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
+ DP_TP_CTL_LINK_TRAIN_NORMAL);
break;
- case DP_PHY_TEST_PATTERN_D10_2:
+ case DP_LINK_QUAL_PATTERN_D10_2:
drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
break;
- case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ case DP_LINK_QUAL_PATTERN_ERROR_RATE:
drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE |
DDI_DP_COMP_CTL_SCRAMBLED_0);
break;
- case DP_PHY_TEST_PATTERN_PRBS7:
+ case DP_LINK_QUAL_PATTERN_PRBS7:
drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
break;
- case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM:
/*
* FIXME: Ideally pattern should come from DPCD 0x250. As
* current firmware of DPR-100 could not set it, so hardcoding
@@ -4724,7 +4679,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
DDI_DP_COMP_CTL_ENABLE |
DDI_DP_COMP_CTL_CUSTOM80);
break;
- case DP_PHY_TEST_PATTERN_CP2520:
+ case DP_LINK_QUAL_PATTERN_CP2520_PAT_1:
/*
* FIXME: Ideally pattern should come from DPCD 0x24A. As
* current firmware of DPR-100 could not set it, so hardcoding
@@ -4736,8 +4691,19 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
pattern_val);
break;
+ case DP_LINK_QUAL_PATTERN_CP2520_PAT_3:
+ if (DISPLAY_VER(dev_priv) < 10) {
+ drm_warn(&dev_priv->drm, "Platform does not support TPS4\n");
+ break;
+ }
+ drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
+ DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4);
+ break;
default:
- WARN(1, "Invalid Phy Test Pattern\n");
+ drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n");
}
}
@@ -5456,8 +5422,24 @@ edp_detect(struct intel_dp *intel_dp)
return connector_status_connected;
}
+void intel_digital_port_lock(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (dig_port->lock)
+ dig_port->lock(dig_port);
+}
+
+void intel_digital_port_unlock(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (dig_port->unlock)
+ dig_port->unlock(dig_port);
+}
+
/*
- * intel_digital_port_connected - is the specified port connected?
+ * intel_digital_port_connected_locked - is the specified port connected?
* @encoder: intel_encoder
*
* In cases where there's a connector physically connected but it can't be used
@@ -5465,21 +5447,44 @@ edp_detect(struct intel_dp *intel_dp)
* pretty much treat the port as disconnected. This is relevant for type-C
* (starting on ICL) where there's ownership involved.
*
+ * The caller must hold the lock acquired by calling intel_digital_port_lock()
+ * when calling this function.
+ *
* Return %true if port is connected, %false otherwise.
*/
-bool intel_digital_port_connected(struct intel_encoder *encoder)
+bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port);
bool is_connected = false;
intel_wakeref_t wakeref;
- with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- is_connected = dig_port->connected(encoder);
+ with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
+ unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
+
+ do {
+ is_connected = dig_port->connected(encoder);
+ if (is_connected || is_glitch_free)
+ break;
+ usleep_range(10, 30);
+ } while (time_before(jiffies, wait_expires));
+ }
return is_connected;
}
+bool intel_digital_port_connected(struct intel_encoder *encoder)
+{
+ bool ret;
+
+ intel_digital_port_lock(encoder);
+ ret = intel_digital_port_connected_locked(encoder);
+ intel_digital_port_unlock(encoder);
+
+ return ret;
+}
+
static const struct drm_edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
@@ -5673,6 +5678,9 @@ intel_dp_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(dev_priv))
+ return connector->status;
+
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
@@ -5773,6 +5781,10 @@ intel_dp_force(struct drm_connector *connector)
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+
+ if (!intel_display_driver_check_access(dev_priv))
+ return;
+
intel_dp_unset_edid(intel_dp);
if (connector->status != connector_status_connected)
@@ -6057,7 +6069,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
spin_unlock_irq(&i915->irq_lock);
if (need_work)
- queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0);
+ intel_hpd_schedule_detection(i915);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -6500,6 +6512,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
connector->interlace_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_connector->base.polled = intel_connector->polled;
intel_connector_attach_encoder(intel_connector, intel_encoder);
@@ -6530,6 +6543,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
+ intel_dp->colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 05db46b111f2..530cc97bc42f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -109,20 +109,16 @@ int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state,
- struct drm_dp_vsc_sdp *vsc);
-void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_dp_vsc_sdp *vsc);
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_read_dp_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
unsigned int type);
+void intel_digital_port_lock(struct intel_encoder *encoder);
+void intel_digital_port_unlock(struct intel_encoder *encoder);
bool intel_digital_port_connected(struct intel_encoder *encoder);
+bool intel_digital_port_connected_locked(struct intel_encoder *encoder);
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 dsc_max_bpc);
u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 2e2af71bcd5a..4f4a0e3b3114 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -9,6 +9,7 @@
#include "intel_bios.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_aux_regs.h"
#include "intel_pps.h"
@@ -228,9 +229,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
@@ -245,18 +245,16 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- if (is_tc_port) {
- intel_tc_port_lock(dig_port);
- /*
- * Abort transfers on a disconnected port as required by
- * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
- * timeouts that would otherwise happen.
- * TODO: abort the transfer on non-TC ports as well.
- */
- if (!intel_tc_port_connected_locked(&dig_port->base)) {
- ret = -ENXIO;
- goto out_unlock;
- }
+ intel_digital_port_lock(encoder);
+ /*
+ * Abort transfers on a disconnected port as required by
+ * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
+ * timeouts that would otherwise happen.
+ */
+ if (!intel_dp_is_edp(intel_dp) &&
+ !intel_digital_port_connected_locked(&dig_port->base)) {
+ ret = -ENXIO;
+ goto out_unlock;
}
aux_domain = intel_aux_power_domain(dig_port);
@@ -423,8 +421,7 @@ out:
intel_pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
out_unlock:
- if (is_tc_port)
- intel_tc_port_unlock(dig_port);
+ intel_digital_port_unlock(encoder);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 8a9432335030..5fa25a5a36b5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -37,6 +37,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
@@ -1410,6 +1411,9 @@ intel_dp_mst_detect(struct drm_connector *connector,
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->status;
+
return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
intel_connector->port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index ef57dad1a9cb..e7e0a4cf9f93 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -1263,11 +1263,11 @@ static const struct dpll_info hsw_plls[] = {
{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
- .flags = INTEL_DPLL_ALWAYS_ON, },
+ .always_on = true, },
{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
- .flags = INTEL_DPLL_ALWAYS_ON, },
+ .always_on = true, },
{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
- .flags = INTEL_DPLL_ALWAYS_ON, },
+ .always_on = true, },
{}
};
@@ -1945,7 +1945,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
static const struct dpll_info skl_plls[] = {
{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
- .flags = INTEL_DPLL_ALWAYS_ON, },
+ .always_on = true, },
{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
@@ -3308,6 +3308,8 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
struct skl_wrpll_params pll_params = {};
@@ -3326,7 +3328,11 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
return ret;
/* this is mainly for the fastset check */
- icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
+ if (old_crtc_state->shared_dpll &&
+ old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
+ else
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
&port_dpll->hw_state);
@@ -4023,7 +4029,8 @@ static const struct intel_shared_dpll_funcs mg_pll_funcs = {
static const struct dpll_info icl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
- { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
+ .is_alt_port_dpll = true, },
{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
@@ -4068,7 +4075,8 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
static const struct dpll_info tgl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
- { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
+ .is_alt_port_dpll = true, },
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
@@ -4141,7 +4149,8 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
static const struct dpll_info adlp_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
- { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
+ .is_alt_port_dpll = true, },
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
@@ -4465,31 +4474,29 @@ verify_single_dpll_state(struct drm_i915_private *i915,
struct intel_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_dpll_hw_state dpll_hw_state;
+ struct intel_dpll_hw_state dpll_hw_state = {};
u8 pipe_mask;
bool active;
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
-
- drm_dbg_kms(&i915->drm, "%s\n", pll->info->name);
-
active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
- if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
+ if (!pll->info->always_on) {
I915_STATE_WARN(i915, !pll->on && pll->active_mask,
- "pll in active use but not on in sw tracking\n");
+ "%s: pll in active use but not on in sw tracking\n",
+ pll->info->name);
I915_STATE_WARN(i915, pll->on && !pll->active_mask,
- "pll is on but not used by any active pipe\n");
+ "%s: pll is on but not used by any active pipe\n",
+ pll->info->name);
I915_STATE_WARN(i915, pll->on != active,
- "pll on state mismatch (expected %i, found %i)\n",
- pll->on, active);
+ "%s: pll on state mismatch (expected %i, found %i)\n",
+ pll->info->name, pll->on, active);
}
if (!crtc) {
I915_STATE_WARN(i915,
pll->active_mask & ~pll->state.pipe_mask,
- "more active pll users than references: 0x%x vs 0x%x\n",
- pll->active_mask, pll->state.pipe_mask);
+ "%s: more active pll users than references: 0x%x vs 0x%x\n",
+ pll->info->name, pll->active_mask, pll->state.pipe_mask);
return;
}
@@ -4498,21 +4505,29 @@ verify_single_dpll_state(struct drm_i915_private *i915,
if (new_crtc_state->hw.active)
I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
- "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
+ "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
else
I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
+ "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
- "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
- pipe_mask, pll->state.pipe_mask);
+ "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
+ pll->info->name, pipe_mask, pll->state.pipe_mask);
I915_STATE_WARN(i915,
pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
- "pll hw state mismatch\n");
+ "%s: pll hw state mismatch\n",
+ pll->info->name);
+}
+
+static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
+ const struct intel_shared_dpll *new_pll)
+{
+ return old_pll && new_pll && old_pll != new_pll &&
+ (old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
}
void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
@@ -4534,11 +4549,15 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->active_mask);
- I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
- "pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->state.pipe_mask);
+ "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
+
+ /* TC ports have both MG/TC and TBT PLL referenced simultaneously */
+ I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
+ new_crtc_state->shared_dpll) &&
+ pll->state.pipe_mask & pipe_mask,
+ "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2e7ea0d8d3ff..616afe861b46 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -276,15 +276,21 @@ struct dpll_info {
*/
enum intel_display_power_domain power_domain;
-#define INTEL_DPLL_ALWAYS_ON (1 << 0)
/**
- * @flags:
+ * @always_on:
*
- * INTEL_DPLL_ALWAYS_ON
- * Inform the state checker that the DPLL is kept enabled even if
- * not in use by any CRTC.
+ * Inform the state checker that the DPLL is kept enabled even if
+ * not in use by any CRTC.
*/
- u32 flags;
+ bool always_on;
+
+ /**
+ * @is_alt_port_dpll:
+ *
+ * Inform the state checker that the DPLL can be used as a fallback
+ * (for TC->TBT fallback).
+ */
+ bool is_alt_port_dpll;
};
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 482c28b5c2de..a6c7122fd671 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -453,6 +453,10 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
if (!HAS_DSB(i915))
return NULL;
+ /* TODO: DSB is broken in Xe KMD, so disabling it until fixed */
+ if (!IS_ENABLED(I915))
+ return NULL;
+
dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 9111e9d46486..8ca9ae4798a8 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -35,6 +35,7 @@
#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
@@ -328,6 +329,9 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->base.status;
+
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
@@ -536,6 +540,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
if (intel_dvo->dev.type == INTEL_DVO_CHIP_TMDS)
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->base.polled = connector->polled;
drm_connector_init_with_ddc(&i915->drm, &connector->base,
&intel_dvo_connector_funcs,
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index f17a1afb4929..b453fcbd67da 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1087,18 +1087,7 @@ static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state)
static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- case I915_FORMAT_MOD_4_TILED:
- case I915_FORMAT_MOD_X_TILED:
- return true;
- default:
- return false;
- }
+ return true;
}
static bool tiling_is_valid(const struct intel_plane_state *plane_state)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 717c3a3237c4..0665f943f65f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -78,8 +78,9 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
/* Use fbdev's framebuffer from lmem for discrete */
info->fix.smem_start =
- (unsigned long)(mem->io_start +
- i915_gem_object_get_dma_address(obj, 0));
+ (unsigned long)(mem->io.start +
+ i915_gem_object_get_dma_address(obj, 0) -
+ mem->region.start);
info->fix.smem_len = obj->base.size;
} else {
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index e8e8be54143b..cbcd1e91b7be 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -10,12 +10,55 @@
#include "intel_display_types.h"
#include "intel_global_state.h"
+struct intel_global_commit {
+ struct kref ref;
+ struct completion done;
+};
+
+static struct intel_global_commit *commit_new(void)
+{
+ struct intel_global_commit *commit;
+
+ commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+ if (!commit)
+ return NULL;
+
+ init_completion(&commit->done);
+ kref_init(&commit->ref);
+
+ return commit;
+}
+
+static void __commit_free(struct kref *kref)
+{
+ struct intel_global_commit *commit =
+ container_of(kref, typeof(*commit), ref);
+
+ kfree(commit);
+}
+
+static struct intel_global_commit *commit_get(struct intel_global_commit *commit)
+{
+ if (commit)
+ kref_get(&commit->ref);
+
+ return commit;
+}
+
+static void commit_put(struct intel_global_commit *commit)
+{
+ if (commit)
+ kref_put(&commit->ref, __commit_free);
+}
+
static void __intel_atomic_global_state_free(struct kref *kref)
{
struct intel_global_state *obj_state =
container_of(kref, struct intel_global_state, ref);
struct intel_global_obj *obj = obj_state->obj;
+ commit_put(obj_state->commit);
+
obj->funcs->atomic_destroy_state(obj, obj_state);
}
@@ -127,6 +170,8 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
obj_state->obj = obj;
obj_state->changed = false;
+ obj_state->serialized = false;
+ obj_state->commit = NULL;
kref_init(&obj_state->ref);
@@ -239,19 +284,13 @@ int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
{
- struct intel_atomic_state *state = obj_state->state;
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
+ int ret;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
+ ret = intel_atomic_lock_global_state(obj_state);
+ if (ret)
+ return ret;
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- obj_state->changed = true;
+ obj_state->serialized = true;
return 0;
}
@@ -267,3 +306,79 @@ intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
return false;
return true;
}
+
+int
+intel_atomic_global_state_setup_commit(struct intel_atomic_state *state)
+{
+ const struct intel_global_state *old_obj_state;
+ struct intel_global_state *new_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
+ new_obj_state, i) {
+ struct intel_global_commit *commit = NULL;
+
+ if (new_obj_state->serialized) {
+ /*
+ * New commit which is going to be completed
+ * after the hardware reprogramming is done.
+ */
+ commit = commit_new();
+ if (!commit)
+ return -ENOMEM;
+ } else if (new_obj_state->changed) {
+ /*
+ * We're going to swap to this state, so carry the
+ * previous commit along, in case it's not yet done.
+ */
+ commit = commit_get(old_obj_state->commit);
+ }
+
+ new_obj_state->commit = commit;
+ }
+
+ return 0;
+}
+
+int
+intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_global_state *old_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_old_global_obj_in_state(state, obj, old_obj_state, i) {
+ struct intel_global_commit *commit = old_obj_state->commit;
+ long ret;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->done, 10 * HZ);
+ if (ret == 0) {
+ drm_err(&i915->drm, "global state timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void
+intel_atomic_global_state_commit_done(struct intel_atomic_state *state)
+{
+ const struct intel_global_state *new_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_new_global_obj_in_state(state, obj, new_obj_state, i) {
+ struct intel_global_commit *commit = new_obj_state->commit;
+
+ if (!new_obj_state->serialized)
+ continue;
+
+ complete_all(&commit->done);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index 5477de8f0b30..5c8545d7a76a 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -54,11 +54,14 @@ struct intel_global_obj {
(__i)++) \
for_each_if(obj)
+struct intel_global_commit;
+
struct intel_global_state {
struct intel_global_obj *obj;
struct intel_atomic_state *state;
+ struct intel_global_commit *commit;
struct kref ref;
- bool changed;
+ bool changed, serialized;
};
struct __intel_global_objs_state {
@@ -87,6 +90,10 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state);
int intel_atomic_lock_global_state(struct intel_global_state *obj_state);
int intel_atomic_serialize_global_state(struct intel_global_state *obj_state);
+int intel_atomic_global_state_setup_commit(struct intel_atomic_state *state);
+void intel_atomic_global_state_commit_done(struct intel_atomic_state *state);
+int intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state);
+
bool intel_atomic_global_state_is_serialized(struct intel_atomic_state *state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index e9e4dcf345f9..d3e03ed5b79c 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -155,7 +155,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
const struct gmbus_pin *pins;
size_t size;
- if (INTEL_PCH_TYPE(i915) >= PCH_LNL) {
+ if (INTEL_PCH_TYPE(i915) >= PCH_MTL) {
pins = gmbus_pins_mtp;
size = ARRAY_SIZE(gmbus_pins_mtp);
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG2) {
@@ -164,9 +164,6 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) {
- pins = gmbus_pins_mtp;
- size = ARRAY_SIZE(gmbus_pins_mtp);
} else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 39b3f7c0c77c..c3e692e7f790 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -347,7 +347,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
default:
drm_err(&i915->drm, "Unknown transcoder %d\n",
cpu_transcoder);
- return -EINVAL;
+ return 0;
}
}
@@ -364,7 +364,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
drm_err(&i915->drm, "Unknown port %d\n", port);
- return -EINVAL;
+ return 0;
}
}
@@ -853,8 +853,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (shim->stream_encryption) {
ret = shim->stream_encryption(connector, true);
if (ret) {
- drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
@@ -878,14 +878,14 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
u32 repeater_ctl;
int ret;
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
+ connector->base.base.id, connector->base.name);
if (hdcp->shim->stream_encryption) {
ret = hdcp->shim->stream_encryption(connector, false);
if (ret) {
- drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
@@ -929,8 +929,8 @@ static int intel_hdcp1_enable(struct intel_connector *connector)
struct intel_hdcp *hdcp = &connector->hdcp;
int i, ret, tries = 3;
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
+ connector->base.base.id, connector->base.name);
if (!hdcp_key_loadable(i915)) {
drm_err(&i915->drm, "HDCP key Load is not possible\n");
@@ -1027,8 +1027,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
if (drm_WARN_ON(&i915->drm,
!intel_hdcp_in_use(i915, cpu_transcoder, port))) {
drm_err(&i915->drm,
- "%s:%d HDCP link stopped encryption,%x\n",
- connector->base.name, connector->base.base.id,
+ "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
+ connector->base.base.id, connector->base.name,
intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
ret = -ENXIO;
intel_hdcp_update_value(connector,
@@ -1046,8 +1046,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
}
drm_dbg_kms(&i915->drm,
- "[%s:%d] HDCP link failed, retrying authentication\n",
- connector->base.name, connector->base.base.id);
+ "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
+ connector->base.base.id, connector->base.name);
ret = _intel_hdcp_disable(connector);
if (ret) {
@@ -1633,6 +1633,12 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
+ if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
+ drm_dbg_kms(&i915->drm,
+ "HDCP1.x or 2.0 Legacy Device Downstream\n");
+ return -EINVAL;
+ }
+
/* Converting and Storing the seq_num_v to local variable as DWORD */
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
@@ -1731,8 +1737,8 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS)) {
- drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
+ connector->base.base.id, connector->base.name);
ret = -EPERM;
goto link_recover;
}
@@ -1740,8 +1746,8 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
if (hdcp->shim->stream_2_2_encryption) {
ret = hdcp->shim->stream_2_2_encryption(connector, true);
if (ret) {
- drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
@@ -1925,8 +1931,8 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
- connector->base.name, connector->base.base.id,
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.base.id, connector->base.name,
hdcp->content_type);
ret = hdcp2_authenticate_and_encrypt(connector);
@@ -1936,8 +1942,8 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
return ret;
}
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
- connector->base.name, connector->base.base.id,
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
+ connector->base.base.id, connector->base.name,
hdcp->content_type);
hdcp->hdcp2_encrypted = true;
@@ -1953,14 +1959,14 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
+ connector->base.base.id, connector->base.name);
if (hdcp->shim->stream_2_2_encryption) {
ret = hdcp->shim->stream_2_2_encryption(connector, false);
if (ret) {
- drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
@@ -2040,20 +2046,20 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
drm_dbg_kms(&i915->drm,
- "[%s:%d] Repeater topology auth failed.(%d)\n",
- connector->base.name, connector->base.base.id,
+ "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
+ connector->base.base.id, connector->base.name,
ret);
} else {
drm_dbg_kms(&i915->drm,
- "[%s:%d] HDCP2.2 link failed, retrying auth\n",
- connector->base.name, connector->base.base.id);
+ "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
+ connector->base.base.id, connector->base.name);
}
ret = _intel_hdcp2_disable(connector, true);
if (ret) {
drm_err(&i915->drm,
- "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id, ret);
+ "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
+ connector->base.base.id, connector->base.name, ret);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
goto out;
@@ -2062,8 +2068,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
ret = _intel_hdcp2_enable(connector);
if (ret) {
drm_dbg_kms(&i915->drm,
- "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id,
+ "[CONNECTOR:%d:%s] Failed to enable hdcp2.2 (%d)\n",
+ connector->base.base.id, connector->base.name,
ret);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED,
@@ -2341,8 +2347,8 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
return -ENOENT;
if (!connector->encoder) {
- drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
+ connector->base.base.id, connector->base.name);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 18117b789b16..302bff75b06c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -65,7 +65,7 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
goto out_unmap;
}
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto out_unmap;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
index 8023c85c7fa0..a568a457e532 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -8,6 +8,8 @@
#include "intel_display_reg_defs.h"
+#define TRANS_HDCP(__i915) (DISPLAY_VER(__i915) >= 12)
+
/* HDCP Key Registers */
#define HDCP_KEY_CONF _MMIO(0x66c00)
#define HDCP_AKSV_SEND_TRIGGER REG_BIT(31)
@@ -82,7 +84,7 @@
#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
_TRANSB_HDCP_CONF)
#define HDCP_CONF(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_CONF(trans) : \
PORT_HDCP_CONF(port))
@@ -95,7 +97,7 @@
_TRANSA_HDCP_ANINIT, \
_TRANSB_HDCP_ANINIT)
#define HDCP_ANINIT(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_ANINIT(trans) : \
PORT_HDCP_ANINIT(port))
@@ -105,7 +107,7 @@
#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
_TRANSB_HDCP_ANLO)
#define HDCP_ANLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_ANLO(trans) : \
PORT_HDCP_ANLO(port))
@@ -115,7 +117,7 @@
#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
_TRANSB_HDCP_ANHI)
#define HDCP_ANHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_ANHI(trans) : \
PORT_HDCP_ANHI(port))
@@ -126,7 +128,7 @@
_TRANSA_HDCP_BKSVLO, \
_TRANSB_HDCP_BKSVLO)
#define HDCP_BKSVLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_BKSVLO(trans) : \
PORT_HDCP_BKSVLO(port))
@@ -137,7 +139,7 @@
_TRANSA_HDCP_BKSVHI, \
_TRANSB_HDCP_BKSVHI)
#define HDCP_BKSVHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_BKSVHI(trans) : \
PORT_HDCP_BKSVHI(port))
@@ -148,7 +150,7 @@
_TRANSA_HDCP_RPRIME, \
_TRANSB_HDCP_RPRIME)
#define HDCP_RPRIME(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_RPRIME(trans) : \
PORT_HDCP_RPRIME(port))
@@ -159,7 +161,7 @@
_TRANSA_HDCP_STATUS, \
_TRANSB_HDCP_STATUS)
#define HDCP_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_STATUS(trans) : \
PORT_HDCP_STATUS(port))
@@ -200,7 +202,7 @@
#define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19)
#define AUTH_CLR_KEYS REG_BIT(18)
#define HDCP2_AUTH(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_AUTH(trans) : \
PORT_HDCP2_AUTH(port))
@@ -211,7 +213,7 @@
_TRANSB_HDCP2_CTL)
#define CTL_LINK_ENCRYPTION_REQ REG_BIT(31)
#define HDCP2_CTL(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_CTL(trans) : \
PORT_HDCP2_CTL(port))
@@ -225,7 +227,7 @@
#define LINK_AUTH_STATUS REG_BIT(21)
#define LINK_ENCRYPTION_STATUS REG_BIT(20)
#define HDCP2_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STATUS(trans) : \
PORT_HDCP2_STATUS(port))
@@ -247,7 +249,7 @@
#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
#define STREAM_TYPE_STATUS REG_BIT(30)
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
PIPE_HDCP2_STREAM_STATUS(pipe))
@@ -263,7 +265,7 @@
_TRANSB_HDCP2_AUTH_STREAM)
#define AUTH_STREAM_TYPE REG_BIT(31)
#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_AUTH_STREAM(trans) : \
PORT_HDCP2_AUTH_STREAM(port))
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 39e4f5f7c817..7020e5806109 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -49,6 +49,7 @@
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
@@ -523,10 +524,12 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
0);
/* Wa_14013475917 */
- if (IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC)
- return;
+ if (!(IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC))
+ val |= hsw_infoframe_enable(type);
+
+ if (type == DP_SDP_VSC)
+ val |= VSC_DIP_HW_DATA_SW_HEA;
- val |= hsw_infoframe_enable(type);
intel_de_write(dev_priv, ctl_reg, val);
intel_de_posting_read(dev_priv, ctl_reg);
}
@@ -2503,6 +2506,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(dev_priv))
+ return connector->status;
+
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (DISPLAY_VER(dev_priv) >= 11 &&
@@ -2531,6 +2537,9 @@ intel_hdmi_force(struct drm_connector *connector)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ if (!intel_display_driver_check_access(i915))
+ return;
+
intel_hdmi_unset_edid(connector);
if (connector->status != connector_status_connected)
@@ -3015,6 +3024,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
connector->ycbcr_420_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_connector->base.polled = intel_connector->polled;
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 0c0700c6ec66..d9ec349f3c8c 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -177,6 +177,46 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
return storm;
}
+static bool detection_work_enabled(struct drm_i915_private *i915)
+{
+ lockdep_assert_held(&i915->irq_lock);
+
+ return i915->display.hotplug.detection_work_enabled;
+}
+
+static bool
+mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+{
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (!detection_work_enabled(i915))
+ return false;
+
+ return mod_delayed_work(i915->unordered_wq, work, delay);
+}
+
+static bool
+queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+{
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (!detection_work_enabled(i915))
+ return false;
+
+ return queue_delayed_work(i915->unordered_wq, work, delay);
+}
+
+static bool
+queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
+{
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (!detection_work_enabled(i915))
+ return false;
+
+ return queue_work(i915->unordered_wq, work);
+}
+
static void
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
@@ -213,9 +253,9 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_reschedule(&dev_priv->drm);
- mod_delayed_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.reenable_work,
- msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+ mod_delayed_detection_work(dev_priv,
+ &dev_priv->display.hotplug.reenable_work,
+ msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
@@ -348,9 +388,9 @@ static void i915_digport_work_func(struct work_struct *work)
if (old_bits) {
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.event_bits |= old_bits;
+ queue_delayed_detection_work(dev_priv,
+ &dev_priv->display.hotplug.hotplug_work, 0);
spin_unlock_irq(&dev_priv->irq_lock);
- queue_delayed_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.hotplug_work, 0);
}
}
@@ -467,11 +507,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (retry) {
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.retry_bits |= retry;
- spin_unlock_irq(&dev_priv->irq_lock);
- mod_delayed_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.hotplug_work,
- msecs_to_jiffies(HPD_RETRY_DELAY));
+ mod_delayed_detection_work(dev_priv,
+ &dev_priv->display.hotplug.hotplug_work,
+ msecs_to_jiffies(HPD_RETRY_DELAY));
+ spin_unlock_irq(&dev_priv->irq_lock);
}
}
@@ -590,7 +630,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*/
if (storm_detected)
intel_hpd_irq_setup(dev_priv);
- spin_unlock(&dev_priv->irq_lock);
/*
* Our hotplug handler can grab modeset locks (by calling down into the
@@ -601,8 +640,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig)
queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_detection_work(dev_priv,
+ &dev_priv->display.hotplug.hotplug_work, 0);
+
+ spin_unlock(&dev_priv->irq_lock);
}
/**
@@ -710,6 +751,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
cancel_work(&dev_priv->display.hotplug.poll_init_work);
}
+ spin_lock_irq(&dev_priv->irq_lock);
+
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -718,6 +761,9 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (pin == HPD_NONE)
continue;
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ continue;
+
connector->base.polled = connector->polled;
if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
@@ -726,6 +772,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
}
drm_connector_list_iter_end(&conn_iter);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
if (enabled)
drm_kms_helper_poll_reschedule(&dev_priv->drm);
@@ -774,8 +822,10 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- queue_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.poll_init_work);
+ spin_lock_irq(&dev_priv->irq_lock);
+ queue_detection_work(dev_priv,
+ &dev_priv->display.hotplug.poll_init_work);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
/**
@@ -803,8 +853,11 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
- queue_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.poll_init_work);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ queue_detection_work(dev_priv,
+ &dev_priv->display.hotplug.poll_init_work);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
void intel_hpd_init_early(struct drm_i915_private *i915)
@@ -826,6 +879,20 @@ void intel_hpd_init_early(struct drm_i915_private *i915)
i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
}
+static bool cancel_all_detection_work(struct drm_i915_private *i915)
+{
+ bool was_pending = false;
+
+ if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
+ was_pending = true;
+ if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
+ was_pending = true;
+ if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
+ was_pending = true;
+
+ return was_pending;
+}
+
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{
if (!HAS_DISPLAY(dev_priv))
@@ -841,9 +908,13 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
- cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
- cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
- cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
+
+ /*
+ * All other work triggered by hotplug events should be canceled by
+ * now.
+ */
+ if (cancel_all_detection_work(dev_priv))
+ drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
}
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
@@ -873,6 +944,62 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
spin_unlock_irq(&dev_priv->irq_lock);
}
+static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
+{
+ bool queue_work = false;
+ enum hpd_pin pin;
+
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (i915->display.hotplug.event_bits ||
+ i915->display.hotplug.retry_bits)
+ queue_work = true;
+
+ for_each_hpd_pin(pin) {
+ switch (i915->display.hotplug.stats[pin].state) {
+ case HPD_MARK_DISABLED:
+ queue_work = true;
+ break;
+ case HPD_ENABLED:
+ break;
+ default:
+ MISSING_CASE(i915->display.hotplug.stats[pin].state);
+ }
+ }
+
+ if (queue_work)
+ queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+}
+
+void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
+{
+ spin_lock_irq(&i915->irq_lock);
+ i915->display.hotplug.detection_work_enabled = true;
+ queue_work_for_missed_irqs(i915);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
+{
+ spin_lock_irq(&i915->irq_lock);
+ i915->display.hotplug.detection_work_enabled = false;
+ spin_unlock_irq(&i915->irq_lock);
+
+ cancel_all_detection_work(i915);
+}
+
+bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&i915->irq_lock, flags);
+ ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+ spin_unlock_irqrestore(&i915->irq_lock, flags);
+
+ return ret;
+}
+
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 424ae5dbf5a0..a17253ddec83 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -30,4 +30,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_debugfs_register(struct drm_i915_private *i915);
+void intel_hpd_enable_detection_work(struct drm_i915_private *i915);
+void intel_hpd_disable_detection_work(struct drm_i915_private *i915);
+bool intel_hpd_schedule_detection(struct drm_i915_private *i915);
+
#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 04f62f27ad74..76076509f771 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -163,12 +163,10 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
(!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
return;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_LNL)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
hpd->pch_hpd = hpd_mtp;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
hpd->pch_hpd = hpd_sde_dg1;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
- hpd->pch_hpd = hpd_mtp;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
@@ -1139,7 +1137,7 @@ static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
if (INTEL_PCH_TYPE(i915) >= PCH_LNL)
xe2lpd_sde_hpd_irq_setup(i915);
- else if (INTEL_PCH_TYPE(i915) >= PCH_MTP)
+ else if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
mtp_hpd_irq_setup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 1ce785db6a5e..f242bb320610 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -250,11 +250,36 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500
+#define OPREGION_SIZE (8 * 1024)
+
+struct intel_opregion {
+ struct drm_i915_private *i915;
+
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
+ struct opregion_asle *asle;
+ struct opregion_asle_ext *asle_ext;
+ void *rvda;
+ void *vbt_firmware;
+ const void *vbt;
+ u32 vbt_size;
+ struct work_struct asle_work;
+ struct notifier_block acpi_notifier;
+};
+
static int check_swsci_function(struct drm_i915_private *i915, u32 function)
{
- struct opregion_swsci *swsci = i915->display.opregion.swsci;
+ struct intel_opregion *opregion = i915->display.opregion;
+ struct opregion_swsci *swsci;
u32 main_function, sub_function;
+ if (!opregion)
+ return -ENODEV;
+
+ swsci = opregion->swsci;
if (!swsci)
return -ENODEV;
@@ -265,11 +290,11 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
- if ((i915->display.opregion.swsci_sbcb_sub_functions &
+ if ((opregion->swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
- if ((i915->display.opregion.swsci_gbda_sub_functions &
+ if ((opregion->swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
@@ -280,7 +305,7 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
static int swsci(struct drm_i915_private *dev_priv,
u32 function, u32 parm, u32 *parm_out)
{
- struct opregion_swsci *swsci = dev_priv->display.opregion.swsci;
+ struct opregion_swsci *swsci;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 scic, dslp;
u16 swsci_val;
@@ -290,6 +315,8 @@ static int swsci(struct drm_i915_private *dev_priv,
if (ret)
return ret;
+ swsci = dev_priv->display.opregion->swsci;
+
/* Driver sleep timeout in ms. */
dslp = swsci->dslp;
if (!dslp) {
@@ -462,7 +489,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- struct opregion_asle *asle = dev_priv->display.opregion.asle;
+ struct opregion_asle *asle = dev_priv->display.opregion->asle;
drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
@@ -584,9 +611,8 @@ static void asle_work(struct work_struct *work)
{
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
- struct drm_i915_private *dev_priv =
- container_of(opregion, struct drm_i915_private, display.opregion);
- struct opregion_asle *asle = dev_priv->display.opregion.asle;
+ struct drm_i915_private *dev_priv = opregion->i915;
+ struct opregion_asle *asle = opregion->asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -632,11 +658,17 @@ static void asle_work(struct work_struct *work)
asle->aslc = aslc_stat;
}
-void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+bool intel_opregion_asle_present(struct drm_i915_private *i915)
+{
+ return i915->display.opregion && i915->display.opregion->asle;
+}
+
+void intel_opregion_asle_intr(struct drm_i915_private *i915)
{
- if (dev_priv->display.opregion.asle)
- queue_work(dev_priv->unordered_wq,
- &dev_priv->display.opregion.asle_work);
+ struct intel_opregion *opregion = i915->display.opregion;
+
+ if (opregion && opregion->asle)
+ queue_work(i915->unordered_wq, &opregion->asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -692,7 +724,7 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion = dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
@@ -731,7 +763,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion = dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0;
@@ -761,7 +793,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
static void swsci_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion = dev_priv->display.opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -839,7 +871,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion = dev_priv->display.opregion;
const struct firmware *fw = NULL;
const char *name = dev_priv->display.params.vbt_firmware;
int ret;
@@ -879,7 +911,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
@@ -902,11 +934,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
return -ENOTSUPP;
}
+ opregion = kzalloc(sizeof(*opregion), GFP_KERNEL);
+ if (!opregion)
+ return -ENOMEM;
+
+ opregion->i915 = dev_priv;
+ dev_priv->display.opregion = opregion;
+
INIT_WORK(&opregion->asle_work, asle_work);
base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
- if (!base)
- return -ENOMEM;
+ if (!base) {
+ err = -ENOMEM;
+ goto err_memremap;
+ }
memcpy(buf, base, sizeof(buf));
@@ -916,7 +957,6 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
goto err_out;
}
opregion->header = base;
- opregion->lid_state = base + ACPI_CLID;
drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
opregion->header->over.major,
@@ -1034,6 +1074,10 @@ out:
err_out:
memunmap(base);
+err_memremap:
+ kfree(opregion);
+ dev_priv->display.opregion = NULL;
+
return err;
}
@@ -1106,12 +1150,12 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
{
struct drm_connector *connector = &intel_connector->base;
struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
const struct drm_edid *drm_edid;
const void *edid;
int len;
- if (!opregion->asle_ext)
+ if (!opregion || !opregion->asle_ext)
return NULL;
edid = opregion->asle_ext->bddc;
@@ -1132,10 +1176,28 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
return drm_edid;
}
+const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
+{
+ struct intel_opregion *opregion = i915->display.opregion;
+
+ if (!opregion || !opregion->vbt)
+ return NULL;
+
+ if (size)
+ *size = opregion->vbt_size;
+
+ return opregion->vbt;
+}
+
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
- struct opregion_header *header = opregion->header;
+ struct intel_opregion *opregion = i915->display.opregion;
+ struct opregion_header *header;
+
+ if (!opregion)
+ return false;
+
+ header = opregion->header;
if (!header || header->over.major < 2 ||
(header->over.major == 2 && header->over.minor < 3))
@@ -1146,9 +1208,9 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915)
void intel_opregion_register(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
- if (!opregion->header)
+ if (!opregion)
return;
if (opregion->acpi) {
@@ -1162,7 +1224,7 @@ void intel_opregion_register(struct drm_i915_private *i915)
static void intel_opregion_resume_display(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
if (opregion->acpi) {
intel_didl_outputs(i915);
@@ -1188,9 +1250,9 @@ static void intel_opregion_resume_display(struct drm_i915_private *i915)
void intel_opregion_resume(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
- if (!opregion->header)
+ if (!opregion)
return;
if (HAS_DISPLAY(i915))
@@ -1201,12 +1263,12 @@ void intel_opregion_resume(struct drm_i915_private *i915)
static void intel_opregion_suspend_display(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
- cancel_work_sync(&i915->display.opregion.asle_work);
+ cancel_work_sync(&opregion->asle_work);
if (opregion->acpi)
opregion->acpi->drdy = 0;
@@ -1214,9 +1276,9 @@ static void intel_opregion_suspend_display(struct drm_i915_private *i915)
void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
- if (!opregion->header)
+ if (!opregion)
return;
intel_opregion_notify_adapter(i915, state);
@@ -1227,11 +1289,11 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
void intel_opregion_unregister(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
intel_opregion_suspend(i915, PCI_D1);
- if (!opregion->header)
+ if (!opregion)
return;
if (opregion->acpi_notifier.notifier_call) {
@@ -1242,26 +1304,36 @@ void intel_opregion_unregister(struct drm_i915_private *i915)
void intel_opregion_cleanup(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
- if (!opregion->header)
+ if (!opregion)
return;
- /* just clear all opregion memory pointers now */
memunmap(opregion->header);
- if (opregion->rvda) {
+ if (opregion->rvda)
memunmap(opregion->rvda);
- opregion->rvda = NULL;
- }
- if (opregion->vbt_firmware) {
- kfree(opregion->vbt_firmware);
- opregion->vbt_firmware = NULL;
- }
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
- opregion->asle_ext = NULL;
- opregion->vbt = NULL;
- opregion->lid_state = NULL;
+ kfree(opregion->vbt_firmware);
+ kfree(opregion);
+ i915->display.opregion = NULL;
+}
+
+static int intel_opregion_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ struct intel_opregion *opregion = i915->display.opregion;
+
+ if (opregion)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_opregion);
+
+void intel_opregion_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_opregion", 0444, minor->debugfs_root,
+ i915, &intel_opregion_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index fd2ea8ef0fa2..0bec224f711f 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -25,38 +25,13 @@
#ifndef _INTEL_OPREGION_H_
#define _INTEL_OPREGION_H_
-#include <linux/workqueue.h>
#include <linux/pci.h>
+#include <linux/types.h>
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
-struct opregion_header;
-struct opregion_acpi;
-struct opregion_swsci;
-struct opregion_asle;
-struct opregion_asle_ext;
-
-struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- u32 swsci_gbda_sub_functions;
- u32 swsci_sbcb_sub_functions;
- struct opregion_asle *asle;
- struct opregion_asle_ext *asle_ext;
- void *rvda;
- void *vbt_firmware;
- const void *vbt;
- u32 vbt_size;
- u32 *lid_state;
- struct work_struct asle_work;
- struct notifier_block acpi_notifier;
-};
-
-#define OPREGION_SIZE (8 * 1024)
-
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
@@ -69,6 +44,7 @@ void intel_opregion_resume(struct drm_i915_private *dev_priv);
void intel_opregion_suspend(struct drm_i915_private *dev_priv,
pci_power_t state);
+bool intel_opregion_asle_present(struct drm_i915_private *i915);
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
@@ -77,8 +53,12 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
+const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size);
+
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
+void intel_opregion_debugfs_register(struct drm_i915_private *i915);
+
#else /* CONFIG_ACPI*/
static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
@@ -107,6 +87,11 @@ static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
{
}
+static inline bool intel_opregion_asle_present(struct drm_i915_private *i915)
+{
+ return false;
+}
+
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
}
@@ -134,11 +119,21 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
+static inline const void *
+intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
+{
+ return NULL;
+}
+
static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915)
{
return false;
}
+static inline void intel_opregion_debugfs_register(struct drm_i915_private *i915)
+{
+}
+
#endif /* CONFIG_ACPI */
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 0d8e5320a4f8..073ea3166c36 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -37,6 +37,7 @@
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_lvds_regs.h"
@@ -683,6 +684,9 @@ intel_panel_detect(struct drm_connector *connector, bool force)
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->status;
+
return connector_status_connected;
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index a55c09cbd0e4..ada1792df5b3 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -3,9 +3,11 @@
* Copyright © 2021 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -13,20 +15,21 @@
#include "intel_plane_initial.h"
static bool
-intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
- const struct intel_initial_plane_config *plane_config,
+intel_reuse_initial_plane_obj(struct intel_crtc *this,
+ const struct intel_initial_plane_config plane_configs[],
struct drm_framebuffer **fb,
struct i915_vma **vma)
{
+ struct drm_i915_private *i915 = to_i915(this->base.dev);
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
+ const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
if (!crtc_state->uapi.active)
continue;
@@ -34,7 +37,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
if (!plane_state->ggtt_vma)
continue;
- if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) {
*fb = plane_state->hw.fb;
*vma = plane_state->ggtt_vma;
return true;
@@ -44,12 +47,100 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
return false;
}
+static bool
+initial_plane_phys_lmem(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ struct intel_memory_region *mem;
+ dma_addr_t dma_addr;
+ gen8_pte_t pte;
+ u32 base;
+
+ base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
+
+ gte += base / I915_GTT_PAGE_SIZE;
+
+ pte = ioread64(gte);
+ if (!(pte & GEN12_GGTT_PTE_LM)) {
+ drm_err(&i915->drm,
+ "Initial plane programming missing PTE_LM bit\n");
+ return false;
+ }
+
+ dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
+
+ if (IS_DGFX(i915))
+ mem = i915->mm.regions[INTEL_REGION_LMEM_0];
+ else
+ mem = i915->mm.stolen_region;
+ if (!mem) {
+ drm_dbg_kms(&i915->drm,
+ "Initial plane memory region not initialized\n");
+ return false;
+ }
+
+ /*
+ * On lmem we don't currently expect this to
+ * ever be placed in the stolen portion.
+ */
+ if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
+ drm_err(&i915->drm,
+ "Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
+ &dma_addr, mem->region.name, &mem->region.start, &mem->region.end);
+ return false;
+ }
+
+ drm_dbg(&i915->drm,
+ "Using dma_addr=%pa, based on initial plane programming\n",
+ &dma_addr);
+
+ plane_config->phys_base = dma_addr - mem->region.start;
+ plane_config->mem = mem;
+
+ return true;
+}
+
+static bool
+initial_plane_phys_smem(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct intel_memory_region *mem;
+ u32 base;
+
+ base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
+
+ mem = i915->mm.stolen_region;
+ if (!mem) {
+ drm_dbg_kms(&i915->drm,
+ "Initial plane memory region not initialized\n");
+ return false;
+ }
+
+ /* FIXME get and validate the dma_addr from the PTE */
+ plane_config->phys_base = base;
+ plane_config->mem = mem;
+
+ return true;
+}
+
+static bool
+initial_plane_phys(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
+ return initial_plane_phys_lmem(i915, plane_config);
+ else
+ return initial_plane_phys_smem(i915, plane_config);
+}
+
static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
+ struct drm_mm_node orig_mm = {};
struct i915_vma *vma;
resource_size_t phys_base;
u32 base, size;
@@ -58,45 +149,13 @@ initial_plane_vma(struct drm_i915_private *i915,
if (plane_config->size == 0)
return NULL;
- base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
- if (IS_DGFX(i915)) {
- gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
- gen8_pte_t pte;
-
- gte += base / I915_GTT_PAGE_SIZE;
-
- pte = ioread64(gte);
- if (!(pte & GEN12_GGTT_PTE_LM)) {
- drm_err(&i915->drm,
- "Initial plane programming missing PTE_LM bit\n");
- return NULL;
- }
-
- phys_base = pte & I915_GTT_PAGE_MASK;
- mem = i915->mm.regions[INTEL_REGION_LMEM_0];
-
- /*
- * We don't currently expect this to ever be placed in the
- * stolen portion.
- */
- if (phys_base >= resource_size(&mem->region)) {
- drm_err(&i915->drm,
- "Initial plane programming using invalid range, phys_base=%pa\n",
- &phys_base);
- return NULL;
- }
-
- drm_dbg(&i915->drm,
- "Using phys_base=%pa, based on initial plane programming\n",
- &phys_base);
- } else {
- phys_base = base;
- mem = i915->mm.stolen_region;
- }
-
- if (!mem)
+ if (!initial_plane_phys(i915, plane_config))
return NULL;
+ phys_base = plane_config->phys_base;
+ mem = plane_config->mem;
+
+ base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
size = round_up(plane_config->base + plane_config->size,
mem->min_page_size);
size -= base;
@@ -108,14 +167,19 @@ initial_plane_vma(struct drm_i915_private *i915,
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
- size * 2 > i915->dsm.usable_size)
+ size * 2 > i915->dsm.usable_size) {
+ drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
+ }
obj = i915_gem_object_create_region_at(mem, phys_base, size,
I915_BO_ALLOC_USER |
I915_BO_PREALLOC);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ drm_dbg_kms(&i915->drm, "Failed to preallocate initial FB in %s\n",
+ mem->region.name);
return NULL;
+ }
/*
* Mark it WT ahead of time to avoid changing the
@@ -139,23 +203,66 @@ initial_plane_vma(struct drm_i915_private *i915,
goto err_obj;
}
+ /*
+ * MTL GOP likes to place the framebuffer high up in ggtt,
+ * which can cause problems for ggtt_reserve_guc_top().
+ * Try to pin it to a low ggtt address instead to avoid that.
+ */
+ base = 0;
+
+ if (base != plane_config->base) {
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+ int ret;
+
+ /*
+ * Make sure the original and new locations
+ * can't overlap. That would corrupt the original
+ * PTEs which are still being used for scanout.
+ */
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &orig_mm,
+ size, plane_config->base,
+ I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
+ if (ret)
+ goto err_obj;
+ }
+
vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err_obj;
+retry:
pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base;
- if (HAS_GMCH(i915))
+ if (!i915_gem_object_is_lmem(obj))
pinctl |= PIN_MAPPABLE;
- if (i915_vma_pin(vma, 0, 0, pinctl))
+ if (i915_vma_pin(vma, 0, 0, pinctl)) {
+ if (drm_mm_node_allocated(&orig_mm)) {
+ drm_mm_remove_node(&orig_mm);
+ /*
+ * Try again, but this time pin
+ * it to its original location.
+ */
+ base = plane_config->base;
+ goto retry;
+ }
goto err_obj;
+ }
if (i915_gem_object_is_tiled(obj) &&
!i915_vma_is_map_and_fenceable(vma))
goto err_obj;
+ if (drm_mm_node_allocated(&orig_mm))
+ drm_mm_remove_node(&orig_mm);
+
+ drm_dbg_kms(&i915->drm,
+ "Initial plane fb bound to 0x%x in the ggtt (original 0x%x)\n",
+ i915_ggtt_offset(vma), plane_config->base);
+
return vma;
err_obj:
+ if (drm_mm_node_allocated(&orig_mm))
+ drm_mm_remove_node(&orig_mm);
i915_gem_object_put(obj);
return NULL;
}
@@ -210,10 +317,11 @@ err_vma:
static void
intel_find_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+ struct intel_initial_plane_config plane_configs[])
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_initial_plane_config *plane_config =
+ &plane_configs[crtc->pipe];
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
@@ -239,7 +347,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
*/
- if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
+ if (intel_reuse_initial_plane_obj(crtc, plane_configs, &fb, &vma))
goto valid_fb;
/*
@@ -302,25 +410,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
i915_vma_put(plane_config->vma);
}
-void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
+void intel_initial_plane_config(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_initial_plane_config plane_config = {};
+ struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
- /*
- * Note that reserving the BIOS fb up front prevents us
- * from stuffing other stolen allocations like the ring
- * on top. This prevents some ugliness at boot time, and
- * can even allow for smooth boot transitions if the BIOS
- * fb is large enough for the active pipe configuration.
- */
- dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_initial_plane_config *plane_config =
+ &plane_configs[crtc->pipe];
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &plane_config);
+ if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
+ continue;
+
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
- plane_config_fini(&plane_config);
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, plane_configs);
+
+ if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ plane_config_fini(plane_config);
+ }
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
index c7e35ab3182b..64ab95239cd4 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.h
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_PLANE_INITIAL_H__
#define __INTEL_PLANE_INITIAL_H__
-struct intel_crtc;
+struct drm_i915_private;
-void intel_crtc_initial_plane_config(struct intel_crtc *crtc);
+void intel_initial_plane_config(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index a8fa3a20990e..2d65a538f83e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -366,7 +366,7 @@ static bool intel_pps_is_valid(struct intel_dp *intel_dp)
if (intel_dp->pps.pps_idx == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) < PCH_MTP)
+ INTEL_PCH_TYPE(i915) <= PCH_ADP)
return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 57bbf3e3af92..72cadad09db5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -173,6 +173,12 @@
* irrelevant for normal operation.
*/
+#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
+ (intel_dp)->psr.source_support)
+
+#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
+ (intel_dp)->psr.source_panel_replay_support)
+
bool intel_encoder_can_psr(struct intel_encoder *encoder)
{
if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
@@ -528,7 +534,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp_get_sink_sync_latency(intel_dp);
if (DISPLAY_VER(i915) >= 9 &&
- intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
+ intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
bool alpm = intel_dp_get_alpm_status(intel_dp);
@@ -560,11 +566,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (intel_dp->psr_dpcd[0])
_psr_init_dpcd(intel_dp);
- if (intel_dp->psr.sink_psr2_support) {
- intel_dp->psr.colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
+ if (intel_dp->psr.sink_psr2_support)
intel_dp_get_su_granularity(intel_dp);
- }
}
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
@@ -604,6 +607,18 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
aux_ctl);
}
+static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (DISPLAY_VER(i915) >= 20 &&
+ intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
+ !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
+ return true;
+
+ return false;
+}
+
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -619,6 +634,8 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
+ if (psr2_su_region_et_valid(intel_dp))
+ dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
} else {
if (intel_dp->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
@@ -762,8 +779,8 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
static int psr2_block_count_lines(struct intel_dp *intel_dp)
{
- return intel_dp->psr.io_wake_lines < 9 &&
- intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
+ return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
+ intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
}
static int psr2_block_count(struct intel_dp *intel_dp)
@@ -800,6 +817,7 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
@@ -841,17 +859,18 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
*/
int tmp;
- tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = map[psr->alpm_parameters.io_wake_lines -
+ TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
- tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
} else if (DISPLAY_VER(dev_priv) >= 12) {
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
- val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
- val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -869,6 +888,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
}
+ if (psr2_su_region_et_valid(intel_dp))
+ val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
+
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
@@ -1031,6 +1053,9 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (psr2_su_region_et_valid(intel_dp))
+ crtc_state->enable_psr2_su_region_et = true;
+
return crtc_state->enable_psr2_sel_fetch = true;
}
@@ -1101,10 +1126,34 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
-static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int check_entry_lines;
+
+ if (DISPLAY_VER(i915) < 20)
+ return true;
+
+ /* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
+ check_entry_lines = 2 +
+ intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5);
+
+ if (check_entry_lines > 15)
+ return false;
+
+ if (i915->display.params.psr_safest_params)
+ check_entry_lines = 15;
+
+ intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
+
+ return true;
+}
+
+static bool _compute_alpm_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
u8 max_wake_lines;
@@ -1115,6 +1164,8 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
* it is not enough -> use 45 us.
*/
fast_wake_time = 45;
+
+ /* TODO: Check how we can use ALPM_CTL fast wake extended field */
max_wake_lines = 12;
} else {
io_wake_time = 50;
@@ -1131,12 +1182,15 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
fast_wake_lines > max_wake_lines)
return false;
+ if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
+ return false;
+
if (i915->display.params.psr_safest_params)
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
- intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
- intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
+ intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
+ intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
return true;
}
@@ -1268,7 +1322,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
+ if (!_compute_alpm_params(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, Unable to use long enough wake times\n");
return false;
@@ -1377,10 +1431,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
-
- crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
- intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
- &crtc_state->psr_vsc);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1504,6 +1554,21 @@ static void wm_optimization_wa(struct intel_dp *intel_dp,
wa_16013835468_bit_get(intel_dp), 0);
}
+static void lnl_alpm_configure(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct intel_psr *psr = &intel_dp->psr;
+
+ if (DISPLAY_VER(dev_priv) < 20)
+ return;
+
+ intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
+ ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
+ ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
+ ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
+}
+
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1569,6 +1634,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
+ lnl_alpm_configure(intel_dp);
+
/*
* Wa_16013835468
* Wa_14015648006
@@ -1634,7 +1701,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- struct intel_encoder *encoder = &dig_port->base;
u32 val;
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
@@ -1662,7 +1728,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
@@ -1951,7 +2016,7 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
- struct drm_rect *clip, bool full_update)
+ bool full_update)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1966,17 +2031,21 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
goto exit;
}
- if (clip->y1 == -1)
+ if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
- val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
- val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
+ val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
+ val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
- drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+ drm_WARN_ON(crtc_state->uapi.crtc->dev,
+ crtc_state->psr2_su_area.y1 % 4 ||
+ crtc_state->psr2_su_area.y2 % 4);
- val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
- val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
+ crtc_state->psr2_su_area.y1 / 4 + 1);
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
+ crtc_state->psr2_su_area.y2 / 4 + 1);
}
exit:
crtc_state->psr2_man_track_ctl = val;
@@ -2002,8 +2071,7 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
overlap_damage_area->y2 = damage_area->y2;
}
-static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
- struct drm_rect *pipe_clip)
+static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
@@ -2016,9 +2084,32 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
else
y_alignment = crtc_state->su_y_granularity;
- pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
- if (pipe_clip->y2 % y_alignment)
- pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
+ crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
+ if (crtc_state->psr2_su_area.y2 % y_alignment)
+ crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
+ y_alignment) + 1) * y_alignment;
+}
+
+/*
+ * When early transport is in use we need to extend SU area to cover
+ * cursor fully when cursor is in SU area.
+ */
+static void
+intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *cursor_state)
+{
+ struct drm_rect inter;
+
+ if (!crtc_state->enable_psr2_su_region_et ||
+ !cursor_state->uapi.visible)
+ return;
+
+ inter = crtc_state->psr2_su_area;
+ if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
+ return;
+
+ clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
+ &crtc_state->pipe_src);
}
/*
@@ -2061,8 +2152,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
- struct intel_plane_state *new_plane_state, *old_plane_state;
+ struct intel_plane_state *new_plane_state, *old_plane_state,
+ *cursor_plane_state = NULL;
struct intel_plane *plane;
bool full_update = false;
int i, ret;
@@ -2075,6 +2166,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
goto skip_sel_fetch_set_loop;
}
+ crtc_state->psr2_su_area.x1 = 0;
+ crtc_state->psr2_su_area.y1 = -1;
+ crtc_state->psr2_su_area.x2 = INT_MAX;
+ crtc_state->psr2_su_area.y2 = -1;
+
/*
* Calculate minimal selective fetch area of each plane and calculate
* the pipe damaged area.
@@ -2109,14 +2205,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (old_plane_state->uapi.visible) {
damaged_area.y1 = old_plane_state->uapi.dst.y1;
damaged_area.y2 = old_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area,
+ clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
&crtc_state->pipe_src);
}
if (new_plane_state->uapi.visible) {
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area,
+ clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
&crtc_state->pipe_src);
}
continue;
@@ -2124,7 +2220,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
/* If alpha changed mark the whole plane area as damaged */
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area,
+ clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
&crtc_state->pipe_src);
continue;
}
@@ -2141,7 +2237,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
- clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
+ clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
+
+ /*
+ * Cursor plane new state is stored to adjust su area to cover
+ * cursor are fully.
+ */
+ if (plane->id == PLANE_CURSOR)
+ cursor_plane_state = new_plane_state;
}
/*
@@ -2150,7 +2253,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
* should identify cases where this happens and fix the area
* calculation for those.
*/
- if (pipe_clip.y1 == -1) {
+ if (crtc_state->psr2_su_area.y1 == -1) {
drm_info_once(&dev_priv->drm,
"Selective fetch area calculation failed in pipe %c\n",
pipe_name(crtc->pipe));
@@ -2164,13 +2267,17 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
crtc_state->splitter.enable)
- pipe_clip.y1 = 0;
+ crtc_state->psr2_su_area.y1 = 0;
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
if (ret)
return ret;
- intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
+ /* Adjust su area to cover cursor fully as necessary */
+ if (cursor_plane_state)
+ intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
+
+ intel_psr2_sel_fetch_pipe_alignment(crtc_state);
/*
* Now that we have the pipe damaged area check if it intersect with
@@ -2185,7 +2292,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
!new_plane_state->uapi.visible)
continue;
- inter = pipe_clip;
+ inter = crtc_state->psr2_su_area;
sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
sel_fetch_area->y1 = -1;
@@ -2230,7 +2337,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
}
skip_sel_fetch_set_loop:
- psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
+ psr2_man_trk_ctl_calc(crtc_state, full_update);
return 0;
}
@@ -2799,6 +2906,9 @@ void intel_psr_init(struct intel_dp *intel_dp)
else
intel_dp->psr.source_support = true;
+ /* Disable early transport for now */
+ intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
+
/* Set link_standby x link_off defaults */
if (DISPLAY_VER(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 143e0595c097..cde781df84d5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -21,12 +21,6 @@ struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
-#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
- (intel_dp)->psr.source_support)
-
-#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
- (intel_dp)->psr.source_panel_replay_support)
-
bool intel_encoder_can_psr(struct intel_encoder *encoder);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index efe4306b37e0..8427a736f639 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -159,6 +159,7 @@
#define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28)
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 0)
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 1)
+#define LNL_EDP_PSR2_SU_REGION_ET_ENABLE REG_BIT(27)
#define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */
#define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */
#define EDP_MAX_SU_DISABLE_TIME_MASK REG_GENMASK(24, 20)
@@ -245,6 +246,11 @@
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
+/* PSR2 Early transport */
+#define _PIPE_SRCSZ_ERLY_TPT_A 0x70074
+
+#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(trans, _PIPE_SRCSZ_ERLY_TPT_A)
+
#define _SEL_FETCH_PLANE_BASE_1_A 0x70890
#define _SEL_FETCH_PLANE_BASE_2_A 0x708B0
#define _SEL_FETCH_PLANE_BASE_3_A 0x708D0
@@ -290,4 +296,61 @@
_SEL_FETCH_PLANE_OFFSET_1_A - \
_SEL_FETCH_PLANE_BASE_1_A)
+#define _ALPM_CTL_A 0x60950
+#define ALPM_CTL(tran) _MMIO_TRANS2(tran, _ALPM_CTL_A)
+#define ALPM_CTL_ALPM_ENABLE REG_BIT(31)
+#define ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(30)
+#define ALPM_CTL_LOBF_ENABLE REG_BIT(29)
+#define ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE REG_BIT(28)
+#define ALPM_CTL_KEEP_FEC_ENABLE_FOR_AUX_WAKE_SLEEP REG_BIT(27)
+#define ALPM_CTL_RESTORE_OCCURED REG_BIT(26)
+#define ALPM_CTL_RESTORE_TO_SLEEP REG_BIT(25)
+#define ALPM_CTL_RESTORE_TO_DEEP_SLEEP REG_BIT(24)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK REG_GENMASK(23, 21)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 0)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_128_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 1)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_256_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 2)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_512_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 3)
+#define ALPM_CTL_AUX_WAKE_SLEEP_HOLD_ENABLE REG_BIT(20)
+#define ALPM_CTL_ALPM_ENTRY_CHECK_MASK REG_GENMASK(19, 16)
+#define ALPM_CTL_ALPM_ENTRY_CHECK(val) REG_FIELD_PREP(ALPM_CTL_ALPM_ENTRY_CHECK_MASK, val)
+#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK REG_GENMASK(13, 8)
+#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
+#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
+#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
+#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
+
+#define _ALPM_CTL2_A 0x60954
+#define ALPM_CTL2(tran) _MMIO_TRANS2(tran, _ALPM_CTL2_A)
+#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK REG_GENMASK(28, 24)
+#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY(val) REG_FIELD_PREP(ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK, val)
+#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK REG_GENMASK(19, 16)
+#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION(val) REG_FIELD_PREP(ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK, val)
+#define ALPM_CTL2_NUMBER_OF_LTTPR_MASK REG_GENMASK(15, 12)
+#define ALPM_CTL2_NUMBER_OF_LTTPR(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_OF_LTTPR_MASK, val)
+#define ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME_MASK REG_GENMASK(10, 8)
+#define ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME(val) REG_FIELD_PREP(ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME_MASK, val)
+#define ALPM_CTL2_FEC_DECODE_EN_POSITION_AFTER_WAKE_SR REG_BIT(4)
+#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK REG_GENMASK(2, 0)
+#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK, val)
+
+#define _PORT_ALPM_CTL_A 0x16fa2c
+#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_CTL_A)
+#define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31)
+#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20)
+#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val)
+#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK REG_GENMASK(19, 16)
+#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK, val)
+#define PORT_ALPM_CTL_SILENCE_PERIOD_MASK REG_GENMASK(7, 0)
+#define PORT_ALPM_CTL_SILENCE_PERIOD(val) REG_FIELD_PREP(PORT_ALPM_CTL_SILENCE_PERIOD_MASK, val)
+
+#define _PORT_ALPM_LFPS_CTL_A 0x16fa30
+#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_LFPS_CTL_A)
+#define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31)
+#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24)
+#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
+#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
+#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
+#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
+
#endif /* __INTEL_PSR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 2915d7afe5cc..093106c1e101 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -44,6 +44,7 @@
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
@@ -2140,6 +2141,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->status;
+
if (!intel_sdvo_set_target_output(intel_sdvo,
intel_sdvo_connector->output_flag))
return connector_status_unknown;
@@ -2805,6 +2809,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
} else {
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
+ intel_connector->base.polled = intel_connector->polled;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2880,6 +2885,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ intel_connector->base.polled = intel_connector->polled;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index dcf05e00e505..6b374d481cd9 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -122,6 +122,15 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
}
+bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ return intel_phy_is_tc(i915, phy) && !tc->legacy_port;
+}
+
/*
* The display power domains used for TC ports depending on the
* platform and TC mode (legacy, DP-alt, TBT):
@@ -986,10 +995,11 @@ xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE;
+ return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
}
static bool
@@ -1012,16 +1022,17 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
+ val = intel_de_read(i915, reg);
if (enable)
val |= XELPDP_TCSS_POWER_REQUEST;
else
val &= ~XELPDP_TCSS_POWER_REQUEST;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
+ intel_de_write(i915, reg, val);
}
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
@@ -1055,26 +1066,28 @@ static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
+ val = intel_de_read(i915, reg);
if (take)
val |= XELPDP_TC_PHY_OWNERSHIP;
else
val &= ~XELPDP_TC_PHY_OWNERSHIP;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
+ intel_de_write(i915, reg, val);
}
static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP;
+ return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
}
static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
@@ -1590,7 +1603,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
-bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
+bool intel_tc_port_connected(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -1605,19 +1618,6 @@ bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
return tc_phy_hpd_live_status(tc) & mask;
}
-bool intel_tc_port_connected(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_tc_port *tc = to_tc_port(dig_port);
- bool is_connected;
-
- mutex_lock(&tc->lock);
- is_connected = intel_tc_port_connected_locked(encoder);
- mutex_unlock(&tc->lock);
-
- return is_connected;
-}
-
static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
{
bool ret;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 80a61e52850e..26c4265368c1 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -15,9 +15,9 @@ struct intel_encoder;
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
+bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
-bool intel_tc_port_connected_locked(struct intel_encoder *encoder);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 992a725de751..2b77d399f1a1 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -40,6 +40,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
@@ -1327,7 +1328,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
* the active portion. Hence following this formula seems
* more trouble that it's worth.
*
- * if (GRAPHICS_VER(dev_priv) == 4) {
+ * if (DISPLAY_VER(dev_priv) == 4) {
* num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
* den = tv_mode->clock;
* } else {
@@ -1723,6 +1724,9 @@ intel_tv_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->status;
+
if (force) {
struct drm_atomic_state *state;
@@ -1990,6 +1994,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
* More recent chipsets favour HDMI rather than integrated S-Video.
*/
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ intel_connector->base.polled = intel_connector->polled;
drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index fe256bf7b485..baf7354cb6e2 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
@@ -581,3 +582,132 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
intel_vblank_section_exit(i915);
spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
}
+
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+ int vblank_start = mode->crtc_vblank_start;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ return vblank_start;
+}
+
+void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state,
+ struct intel_vblank_evade_ctx *evade)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state;
+ const struct drm_display_mode *adjusted_mode;
+
+ evade->crtc = crtc;
+
+ evade->need_vlv_dsi_wa = (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
+
+ /*
+ * During fastsets/etc. the transcoder is still
+ * running with the old timings at this point.
+ *
+ * TODO: maybe just use the active timings here?
+ */
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ crtc_state = new_crtc_state;
+ else
+ crtc_state = old_crtc_state;
+
+ adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+ /* timing changes should happen with VRR disabled */
+ drm_WARN_ON(crtc->base.dev, intel_crtc_needs_modeset(new_crtc_state) ||
+ new_crtc_state->update_m_n || new_crtc_state->update_lrr);
+
+ if (intel_vrr_is_push_sent(crtc_state))
+ evade->vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ else
+ evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ } else {
+ evade->vblank_start = intel_mode_vblank_start(adjusted_mode);
+ }
+
+ /* FIXME needs to be calibrated sensibly */
+ evade->min = evade->vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+ VBLANK_EVASION_TIME_US);
+ evade->max = evade->vblank_start - 1;
+
+ /*
+ * M/N and TRANS_VTOTAL are double buffered on the transcoder's
+ * undelayed vblank, so with seamless M/N and LRR we must evade
+ * both vblanks.
+ *
+ * DSB execution waits for the transcoder's undelayed vblank,
+ * hence we must kick off the commit before that.
+ */
+ if (new_crtc_state->dsb || new_crtc_state->update_m_n || new_crtc_state->update_lrr)
+ evade->min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+}
+
+/* must be called with vblank interrupt already enabled! */
+int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
+{
+ struct intel_crtc *crtc = evade->crtc;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ long timeout = msecs_to_jiffies_timeout(1);
+ wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+ DEFINE_WAIT(wait);
+ int scanline;
+
+ if (evade->min <= 0 || evade->max <= 0)
+ return 0;
+
+ for (;;) {
+ /*
+ * prepare_to_wait() has a memory barrier, which guarantees
+ * other CPUs can see the task state update by the time we
+ * read the scanline.
+ */
+ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+
+ scanline = intel_get_crtc_scanline(crtc);
+ if (scanline < evade->min || scanline > evade->max)
+ break;
+
+ if (!timeout) {
+ drm_err(&i915->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
+ break;
+ }
+
+ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+ local_irq_disable();
+ }
+
+ finish_wait(wq, &wait);
+
+ /*
+ * On VLV/CHV DSI the scanline counter would appear to
+ * increment approx. 1/3 of a scanline before start of vblank.
+ * The registers still get latched at start of vblank however.
+ * This means we must not write any registers on the first
+ * line of vblank (since not the whole line is actually in
+ * vblank). And unfortunately we can't use the interrupt to
+ * wait here since it will fire too soon. We could use the
+ * frame start interrupt instead since it will fire after the
+ * critical scanline, but that would require more changes
+ * in the interrupt code. So for now we'll just do the nasty
+ * thing and poll for the bad scanline to pass us by.
+ *
+ * FIXME figure out if BXT+ DSI suffers from this as well
+ */
+ while (evade->need_vlv_dsi_wa && scanline == evade->vblank_start)
+ scanline = intel_get_crtc_scanline(crtc);
+
+ return scanline;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 17636f140c71..ec6c3da3eeac 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -13,6 +13,18 @@ struct drm_crtc;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_vblank_evade_ctx {
+ struct intel_crtc *crtc;
+ int min, max, vblank_start;
+ bool need_vlv_dsi_wa;
+};
+
+void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state,
+ struct intel_vblank_evade_ctx *evade);
+/* must be called with vblank interrupt already enabled! */
+int intel_vblank_evade(struct intel_vblank_evade_ctx *evade);
+
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 511dc1544854..392d93e97bf8 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -2624,3 +2624,31 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
error:
kfree(intel_fb);
}
+
+bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = crtc->pipe;
+ u32 base;
+
+ if (!plane_state->uapi.visible)
+ return false;
+
+ base = intel_plane_ggtt_offset(plane_state);
+
+ /*
+ * We may have moved the surface to a different
+ * part of ggtt, make the plane aware of that.
+ */
+ if (plane_config->base == base)
+ return false;
+
+ intel_de_write(i915, PLANE_SURF(pipe, plane_id), base);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.h b/drivers/gpu/drm/i915/display/skl_universal_plane.h
index be64c201f9b3..e92e00c01b29 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.h
@@ -22,6 +22,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
void skl_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
+bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 56588d6e24ae..614f319d754e 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -443,12 +443,35 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc,
new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case).
+ *
+ * drm_atomic_check_only() gets upset if we pull more crtcs
+ * into the state, so we have to calculate this based on the
+ * individual intel_crtc_can_enable_sagv() rather than
+ * the overall intel_can_enable_sagv(). Otherwise the
+ * crtcs not included in the commit would not switch to the
+ * SAGV watermarks when we are about to enable SAGV, and that
+ * would lead to underruns. This does mean extra power draw
+ * when only a subset of the crtcs are blocking SAGV as the
+ * other crtcs can't be allowed to use the more optimal
+ * normal (ie. non-SAGV) watermarks.
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
+ DISPLAY_VER(i915) >= 12 &&
+ intel_crtc_can_enable_sagv(new_crtc_state);
+
if (intel_crtc_can_enable_sagv(new_crtc_state))
new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
else
@@ -478,21 +501,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return ret;
}
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case)
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
- DISPLAY_VER(i915) >= 12 &&
- intel_can_enable_sagv(i915, new_bw_state);
- }
-
return 0;
}
@@ -1367,7 +1375,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
u64 data_rate = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20)
+ if (plane_id == PLANE_CURSOR)
continue;
data_rate += crtc_state->rel_data_rate[plane_id];
@@ -1514,12 +1522,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
return 0;
/* Allocate fixed number of blocks for cursor. */
- if (DISPLAY_VER(i915) < 20) {
- cursor_size = skl_cursor_allocation(crtc_state, num_active);
- iter.size -= cursor_size;
- skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
- alloc->end - cursor_size, alloc->end);
- }
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
+ alloc->end - cursor_size, alloc->end);
iter.data_rate = skl_total_relative_data_rate(crtc_state);
@@ -1533,7 +1539,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) {
+ if (plane_id == PLANE_CURSOR) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1581,7 +1587,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20)
+ if (plane_id == PLANE_CURSOR)
continue;
if (DISPLAY_VER(i915) < 11 &&
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 555022c0652c..d3a771afb083 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2160,12 +2160,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
#ifdef CONFIG_MMU_NOTIFIER
if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
- read_lock(&eb->i915->mm.notifier_lock);
-
- /*
- * count is always at least 1, otherwise __EXEC_USERPTR_USED
- * could not have been set
- */
for (i = 0; i < count; i++) {
struct eb_vma *ev = &eb->vma[i];
struct drm_i915_gem_object *obj = ev->vma->obj;
@@ -2177,8 +2171,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (err)
break;
}
-
- read_unlock(&eb->i915->mm.notifier_lock);
}
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 0d812f4d787d..3b27218aabe2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -28,6 +28,13 @@ void i915_gem_suspend(struct drm_i915_private *i915)
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
+ /*
+ * On rare occasions, we've observed the fence completion triggers
+ * free_engines asynchronously via rcu_call. Ensure those are done.
+ * This path is only called on suspend, so it's an acceptable cost.
+ */
+ rcu_barrier();
+
flush_workqueue(i915->wq);
/*
@@ -160,6 +167,9 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/
+ /* Like i915_gem_suspend, flush tasks staged from fence triggers */
+ rcu_barrier();
+
for_each_gt(gt, i915, i)
intel_gt_suspend_late(gt);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index a4fb577eceb4..b09b74a2448b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -129,7 +129,7 @@ i915_gem_object_create_region_at(struct intel_memory_region *mem,
return ERR_PTR(-EINVAL);
if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
- offset + size > mem->io_size &&
+ offset + size > resource_size(&mem->io) &&
!i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
return ERR_PTR(-ENOSPC);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 8c88075eeab2..ad6dd7f3259b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -541,7 +541,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
/* Exclude the reserved region from driver use */
mem->region.end = i915->dsm.reserved.start - 1;
- mem->io_size = min(mem->io_size, resource_size(&mem->region));
+ mem->io = DEFINE_RES_MEM(mem->io.start,
+ min(resource_size(&mem->io),
+ resource_size(&mem->region)));
i915->dsm.usable_size = resource_size(&mem->region);
@@ -752,7 +754,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
* With discrete devices, where we lack a mappable aperture there is no
* possible way to ever access this memory on the CPU side.
*/
- if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
+ if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
!(flags & I915_BO_ALLOC_GPU_ONLY))
return -ENOSPC;
@@ -826,7 +828,6 @@ static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
static int init_stolen_lmem(struct intel_memory_region *mem)
{
- struct drm_i915_private *i915 = mem->i915;
int err;
if (GEM_WARN_ON(resource_size(&mem->region) == 0))
@@ -838,14 +839,10 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
return 0;
}
- if (mem->io_size &&
- !io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
+ if (resource_size(&mem->io) &&
+ !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
goto err_cleanup;
- drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
- &mem->io_start);
- drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
-
return 0;
err_cleanup:
@@ -855,7 +852,7 @@ err_cleanup:
static int release_stolen_lmem(struct intel_memory_region *mem)
{
- if (mem->io_size)
+ if (resource_size(&mem->io))
io_mapping_fini(&mem->iomap);
i915_gem_cleanup_stolen(mem->i915);
return 0;
@@ -938,13 +935,17 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
} else {
/* Use DSM base address instead for stolen memory */
- dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
+ dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
- if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
+ if (i915_direct_stolen_access(i915)) {
+ drm_dbg(&i915->drm, "Using direct DSM access\n");
+ io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
+ io_size = dsm_size;
+ } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
io_start = 0;
io_size = 0;
} else {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 9227f8146a58..27dcfd8a34bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -65,8 +65,6 @@ static const struct ttm_place sys_placement_flags = {
static struct ttm_placement i915_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags,
};
/**
@@ -144,45 +142,41 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
place->fpfn = offset >> PAGE_SHIFT;
WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
- } else if (mr->io_size && mr->io_size < mr->total) {
+ } else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place->fpfn = 0;
- WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn));
- place->lpfn = mr->io_size >> PAGE_SHIFT;
+ WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
+ place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
}
}
}
static void
i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
- struct ttm_place *requested,
- struct ttm_place *busy,
+ struct ttm_place *places,
struct ttm_placement *placement)
{
unsigned int num_allowed = obj->mm.n_placements;
unsigned int flags = obj->flags;
unsigned int i;
- placement->num_placement = 1;
+ places[0].flags |= TTM_PL_FLAG_DESIRED;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested, obj->bo_offset,
+ obj->mm.region, &places[0], obj->bo_offset,
obj->base.size, flags);
/* Cache this on object? */
- placement->num_busy_placement = num_allowed;
- for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
- obj->bo_offset, obj->base.size, flags);
-
- if (num_allowed == 0) {
- *busy = *requested;
- placement->num_busy_placement = 1;
+ for (i = 0; i < num_allowed; ++i) {
+ i915_ttm_place_from_region(obj->mm.placements[i],
+ &places[i + 1], obj->bo_offset,
+ obj->base.size, flags);
+ places[i + 1].flags |= TTM_PL_FLAG_FALLBACK;
}
- placement->placement = requested;
- placement->busy_placement = busy;
+ placement->num_placement = num_allowed + 1;
+ placement->placement = places;
}
static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
@@ -789,7 +783,8 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
int ret;
/* First try only the requested placement. No eviction. */
- real_num_busy = fetch_and_zero(&placement->num_busy_placement);
+ real_num_busy = placement->num_placement;
+ placement->num_placement = 1;
ret = ttm_bo_validate(bo, placement, &ctx);
if (ret) {
ret = i915_ttm_err_to_gem(ret);
@@ -805,7 +800,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements,
* evicting if necessary.
*/
- placement->num_busy_placement = real_num_busy;
+ placement->num_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx);
if (ret)
return i915_ttm_err_to_gem(ret);
@@ -839,7 +834,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
{
- struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
+ struct ttm_place places[I915_TTM_MAX_PLACEMENTS + 1];
struct ttm_placement placement;
/* restricted by sg_alloc_table */
@@ -849,7 +844,7 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
/* Move to the requested placement. */
- i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
+ i915_ttm_placement_from_obj(obj, places, &placement);
return __i915_ttm_get_pages(obj, &placement);
}
@@ -879,9 +874,7 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
obj->base.size, flags);
placement.num_placement = 1;
- placement.num_busy_placement = 1;
placement.placement = &requested;
- placement.busy_placement = &requested;
ret = __i915_ttm_get_pages(obj, &placement);
if (ret)
@@ -1090,7 +1083,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags;
- if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
+ if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
continue;
flags = obj->flags;
@@ -1101,8 +1094,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
}
if (err) {
- drm_dbg(dev, "Unable to make resource CPU accessible(err = %pe)\n",
- ERR_PTR(err));
+ drm_dbg_ratelimited(dev,
+ "Unable to make resource CPU accessible(err = %pe)\n",
+ ERR_PTR(err));
dma_resv_unlock(bo->base.resv);
ret = VM_FAULT_SIGBUS;
goto out_rpm;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 1d3ebdf4069b..0e21ce9d3e5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -42,7 +42,6 @@
#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
-#include "i915_gem_userptr.h"
#include "i915_scatterlist.h"
#ifdef CONFIG_MMU_NOTIFIER
@@ -61,36 +60,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
- struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- long r;
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- write_lock(&i915->mm.notifier_lock);
-
mmu_interval_set_seq(mni, cur_seq);
-
- write_unlock(&i915->mm.notifier_lock);
-
- /*
- * We don't wait when the process is exiting. This is valid
- * because the object will be cleaned up anyway.
- *
- * This is also temporarily required as a hack, because we
- * cannot currently force non-consistent batch buffers to preempt
- * and reschedule by waiting on it, hanging processes on exit.
- */
- if (current->flags & PF_EXITING)
- return true;
-
- /* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
- MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
-
return true;
}
@@ -580,15 +550,3 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
#endif
}
-int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
-{
-#ifdef CONFIG_MMU_NOTIFIER
- rwlock_init(&dev_priv->mm.notifier_lock);
-#endif
-
- return 0;
-}
-
-void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
-{
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h b/drivers/gpu/drm/i915/gem/i915_gem_userptr.h
deleted file mode 100644
index 8dadb2f8436d..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#ifndef __I915_GEM_USERPTR_H__
-#define __I915_GEM_USERPTR_H__
-
-struct drm_i915_private;
-
-int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-
-#endif /* __I915_GEM_USERPTR_H__ */
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 2c51a2c452fc..99a9ade73956 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1054,7 +1054,7 @@ static int igt_fill_mappable(struct intel_memory_region *mr,
int err;
total = 0;
- size = mr->io_size;
+ size = resource_size(&mr->io);
do {
struct drm_i915_gem_object *obj;
@@ -1315,28 +1315,28 @@ static int igt_mmap_migrate(void *arg)
struct intel_memory_region *mixed[] = { mr, system };
struct intel_memory_region *single[] = { mr };
struct ttm_resource_manager *man = mr->region_private;
- resource_size_t saved_io_size;
+ struct resource saved_io;
int err;
if (mr->private)
continue;
- if (!mr->io_size)
+ if (!resource_size(&mr->io))
continue;
/*
* For testing purposes let's force small BAR, if not already
* present.
*/
- saved_io_size = mr->io_size;
- if (mr->io_size == mr->total) {
- resource_size_t io_size = mr->io_size;
+ saved_io = mr->io;
+ if (resource_size(&mr->io) == mr->total) {
+ resource_size_t io_size = resource_size(&mr->io);
io_size = rounddown_pow_of_two(io_size >> 1);
if (io_size < PAGE_SIZE)
continue;
- mr->io_size = io_size;
+ mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
i915_ttm_buddy_man_force_visible_size(man,
io_size >> PAGE_SHIFT);
}
@@ -1396,9 +1396,9 @@ static int igt_mmap_migrate(void *arg)
IGT_MMAP_MIGRATE_FAIL_GPU |
IGT_MMAP_MIGRATE_UNFAULTABLE);
out_io_size:
- mr->io_size = saved_io_size;
+ mr->io = saved_io;
i915_ttm_buddy_man_force_visible_size(man,
- mr->io_size >> PAGE_SHIFT);
+ resource_size(&mr->io) >> PAGE_SHIFT);
if (err)
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 86a04afff64b..e1bf13e3d307 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -226,7 +226,7 @@ u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
static int mtl_dummy_pipe_control(struct i915_request *rq)
{
/* Wa_14016712196 */
- if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 71)) ||
+ if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
IS_DG2(rq->i915)) {
u32 *cs;
@@ -822,7 +822,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
flags |= PIPE_CONTROL_FLUSH_L3;
/* Wa_14016712196 */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
/* dummy PIPE_CONTROL + depth flush */
cs = gen12_emit_pipe_control(cs, 0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 40687806d22a..1ade568ffbfa 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1190,7 +1190,8 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
num = ARRAY_SIZE(xelpmp_regs);
}
} else {
- if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
+ if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 1a8e2b7db013..5f8d86e25993 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -96,7 +96,8 @@ static void heartbeat_commit(struct i915_request *rq,
static void show_heartbeat(const struct i915_request *rq,
struct intel_engine_cs *engine)
{
- struct drm_printer p = drm_debug_printer("heartbeat");
+ struct drm_printer p = drm_dbg_printer(&rq->i915->drm, DRM_UT_DRIVER,
+ "heartbeat");
if (!rq) {
intel_engine_dump(engine, &p,
@@ -290,6 +291,9 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
heartbeat_commit(rq, &attr);
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+ /* Ensure the forced pulse gets a full period to execute */
+ next_heartbeat(engine);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 21a7e3191c18..ec1cbe229f0e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -24,6 +24,7 @@
#include "intel_ring.h"
#include "i915_drv.h"
#include "i915_pci.h"
+#include "i915_reg.h"
#include "i915_request.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
@@ -1152,13 +1153,20 @@ static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
phys_addr_t phys_addr;
u32 pte_flags;
int ret;
GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
+
+ if (i915_direct_stolen_access(i915)) {
+ drm_dbg(&i915->drm, "Using direct GSM access\n");
+ phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK;
+ } else {
+ phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
+ }
if (needs_wc_ggtt_mapping(i915))
ggtt->gsm = ioremap_wc(phys_addr, size);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index f0dea54880af..c0b202223940 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -176,27 +176,13 @@ static u32 get_residency(struct intel_gt *gt, enum intel_rc6_res_type id)
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
-static u8 get_rc6_mask(struct intel_gt *gt)
-{
- u8 mask = 0;
-
- if (HAS_RC6(gt->i915))
- mask |= BIT(0);
- if (HAS_RC6p(gt->i915))
- mask |= BIT(1);
- if (HAS_RC6pp(gt->i915))
- mask |= BIT(2);
-
- return mask;
-}
-
static ssize_t rc6_enable_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
+ return sysfs_emit(buff, "%x\n", gt->rc6.enabled);
}
static ssize_t rc6_enable_dev_show(struct device *dev,
@@ -205,7 +191,7 @@ static ssize_t rc6_enable_dev_show(struct device *dev,
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(&dev->kobj, attr->attr.name);
- return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
+ return sysfs_emit(buff, "%x\n", gt->rc6.enabled);
}
static u32 __rc6_residency_ms_show(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 86f73fe558ca..7811a8c9da06 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -24,7 +24,8 @@
bool i915_ggtt_require_binder(struct drm_i915_private *i915)
{
/* Wa_13010847436 & Wa_14019519902 */
- return MEDIA_VER_FULL(i915) == IP_VER(13, 0);
+ return !i915_direct_stolen_access(i915) &&
+ MEDIA_VER_FULL(i915) == IP_VER(13, 0);
}
static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 353f93baaca0..25c1023eb5f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -495,7 +495,7 @@ static unsigned int get_mocs_settings(struct drm_i915_private *i915,
memset(table, 0, sizeof(struct drm_i915_mocs_table));
table->unused_entries_index = I915_MOCS_PTE;
- if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) {
table->size = ARRAY_SIZE(mtl_mocs_table);
table->table = mtl_mocs_table;
table->n_entries = MTL_NUM_MOCS_ENTRIES;
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 7090e4be29cb..8f4b3c8af09c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -123,7 +123,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
* temporary wa and should be removed after fixing real cause
* of forcewake timeouts.
*/
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
pg_enable =
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index f8512aee58a8..51bb27e10a4f 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -144,8 +144,8 @@ region_lmem_init(struct intel_memory_region *mem)
int ret;
if (!io_mapping_init_wc(&mem->iomap,
- mem->io_start,
- mem->io_size))
+ mem->io.start,
+ resource_size(&mem->io)))
return -EIO;
ret = intel_region_ttm_init(mem);
@@ -240,7 +240,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size -= tile_stolen;
} else {
/* Stolen starts from GSMBASE without CCS */
- lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
+ lmem_size = intel_uncore_read64(&i915->uncore, GEN6_GSMBASE);
}
i915_resize_lmem_bar(i915, lmem_size);
@@ -273,14 +273,6 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
if (err)
goto err_region_put;
- drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
- drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
- &mem->io_start);
- drm_info(&i915->drm, "Local memory IO size: %pa\n",
- &mem->io_size);
- drm_info(&i915->drm, "Local memory available: %pa\n",
- &lmem_size);
-
if (io_size < lmem_size)
drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
(u64)io_size >> 20);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 6801f8b95c53..c8e9aa41fdea 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1015,7 +1015,8 @@ void intel_gt_set_wedged(struct intel_gt *gt)
mutex_lock(&gt->reset.mutex);
if (GEM_SHOW_DEBUG()) {
- struct drm_printer p = drm_debug_printer(__func__);
+ struct drm_printer p = drm_dbg_printer(&gt->i915->drm,
+ DRM_UT_DRIVER, __func__);
struct intel_engine_cs *engine;
enum intel_engine_id id;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 3eacbc50caf8..d67d44611c28 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -789,8 +789,13 @@ static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
dg2_ctx_gt_tuning_init(engine, wal);
- if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
+ /*
+ * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
+ * gen12_emit_indirect_ctx_rcs() rather than here on some early
+ * steppings.
+ */
+ if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
+ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
}
@@ -820,6 +825,9 @@ static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_18019271663 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+
+ /* Wa_14019877138 */
+ wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
}
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
@@ -908,7 +916,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_ctx_workarounds_init(engine, wal);
else if (IS_PONTEVECCHIO(i915))
; /* noop; none at this time */
@@ -1233,7 +1241,8 @@ static void __set_mcr_steering(struct i915_wa_list *wal,
static void debug_dump_steering(struct intel_gt *gt)
{
- struct drm_printer p = drm_debug_printer("MCR Steering:");
+ struct drm_printer p = drm_dbg_printer(&gt->i915->drm, DRM_UT_DRIVER,
+ "MCR Steering:");
if (drm_debug_enabled(DRM_UT_DRIVER))
intel_gt_mcr_report_steering(&p, gt, false);
@@ -1643,7 +1652,7 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- /* Wa_14018778641 / Wa_18018781329 */
+ /* Wa_14018575942 / Wa_18018781329 */
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@@ -1710,7 +1719,7 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
{
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
@@ -1743,7 +1752,7 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
return;
}
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_gt_workarounds_init(gt, wal);
else if (IS_PONTEVECCHIO(i915))
pvc_gt_workarounds_init(gt, wal);
@@ -2216,7 +2225,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
if (engine->gt->type == GT_MEDIA)
; /* none yet */
- else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_whitelist_build(engine);
else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
@@ -2828,7 +2837,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
/*
@@ -2881,7 +2890,8 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
}
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
+ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
+ IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74)))
/* Wa_14017856879 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 47070cba7eb1..12eca750f7d0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -285,7 +285,8 @@ out_engine:
intel_engine_pm_flush(engine);
if (intel_engine_pm_is_awake(engine)) {
- struct drm_printer p = drm_debug_printer(__func__);
+ struct drm_printer p = drm_dbg_printer(&engine->i915->drm,
+ DRM_UT_DRIVER, __func__);
intel_engine_dump(engine, &p,
"%s is still awake:%d after idle-barriers\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index bc441ce7b380..ef014df4c4fc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -122,9 +122,9 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
if (engine_sync_barrier(engine)) {
- struct drm_printer m = drm_err_printer("pulse");
+ struct drm_printer m = drm_err_printer(&engine->i915->drm, "pulse");
- pr_err("%s: no heartbeat pulse?\n", engine->name);
+ drm_printf(&m, "%s: no heartbeat pulse?\n", engine->name);
intel_engine_dump(engine, &m, "%s", engine->name);
err = -ETIME;
@@ -136,10 +136,10 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
pulse_unlock_wait(p); /* synchronize with the retirement callback */
if (!i915_active_is_idle(&p->active)) {
- struct drm_printer m = drm_err_printer("pulse");
+ struct drm_printer m = drm_err_printer(&engine->i915->drm, "pulse");
- pr_err("%s: heartbeat pulse did not flush idle tasks\n",
- engine->name);
+ drm_printf(&m, "%s: heartbeat pulse did not flush idle tasks\n",
+ engine->name);
i915_active_print(&p->active, &m);
err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index a7189c2d660c..1aa1446c8fb0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -62,12 +62,12 @@ int live_rc6_manual(void *arg)
dt = ktime_get();
rc0_power = librapl_energy_uJ();
- msleep(250);
+ msleep(1000);
rc0_power = librapl_energy_uJ() - rc0_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) {
- pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
+ pr_err("RC6 residency increased by %lldus while disabled for 1000ms!\n",
(res[1] - res[0]) >> 10);
err = -EINVAL;
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 00b872b6380b..3941f2d6fa47 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -206,8 +206,8 @@ static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
* of pages. To succeed with both allocations, especially in case of Small
* BAR, try to allocate no more than quarter of mappable memory.
*/
- if (mr && size > mr->io_size / 4)
- size = mr->io_size / 4;
+ if (mr && size > resource_size(&mr->io) / 4)
+ size = resource_size(&mr->io) / 4;
return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 63724e17829a..f7372f736a77 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -377,8 +377,13 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
CCS_MASK(engine->gt))
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true);
+ /*
+ * some of the WA registers are MCR registers. As it is safe to
+ * use MCR form for non-MCR registers, for code simplicity, all
+ * WA registers are added with MCR form.
+ */
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg);
+ ret |= GUC_MCR_REG_ADD(gt, regset, wa->mcr_reg, wa->masked_reg);
/* Be extra paranoid and include all whitelist registers. */
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
@@ -394,13 +399,13 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
if (GRAPHICS_VER(engine->i915) >= 12) {
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL0, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL1, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL2, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL3, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL4, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL5, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL6, false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL0)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL1)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL2)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL3)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL4)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL5)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL6)), false);
}
return ret ? -1 : 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 0f79cb658518..52332bb14339 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -184,7 +184,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
* in the seconds range. However, there is a limit on how long an
* individual wait_for() can wait. So wrap it in a loop.
*/
- before_freq = intel_rps_read_actual_frequency(&uncore->gt->rps);
+ before_freq = intel_rps_read_actual_frequency(&gt->rps);
before = ktime_get();
for (count = 0; count < GUC_LOAD_RETRY_LIMIT; count++) {
ret = wait_for(guc_load_done(uncore, &status, &success), 1000);
@@ -192,7 +192,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
break;
guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz, status = 0x%08X [0x%02X/%02X]\n",
- count, intel_rps_read_actual_frequency(&uncore->gt->rps), status,
+ count, intel_rps_read_actual_frequency(&gt->rps), status,
REG_FIELD_GET(GS_BOOTROM_MASK, status),
REG_FIELD_GET(GS_UKERNEL_MASK, status));
}
@@ -204,7 +204,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
guc_info(guc, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz, ret = %d\n",
- status, delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), ret);
+ status, delta_ms, intel_rps_read_actual_frequency(&gt->rps), ret);
guc_info(guc, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
bootrom, ukernel,
@@ -254,11 +254,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, status, count, ret);
guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq,
+ intel_rps_read_actual_frequency(&gt->rps), before_freq,
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
- delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
+ delta_ms, intel_rps_read_actual_frequency(&gt->rps),
before_freq, status, count, ret);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index a259f1118c5a..f3dcae4b9d45 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -236,6 +236,13 @@ set_context_destroyed(struct intel_context *ce)
ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
}
+static inline void
+clr_context_destroyed(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_DESTROYED;
+}
+
static inline bool context_pending_disable(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
@@ -613,6 +620,8 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc,
u32 g2h_len_dw,
bool loop)
{
+ int ret;
+
/*
* We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
* so we don't handle the case where we don't get a reply because we
@@ -623,7 +632,11 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc,
if (g2h_len_dw)
atomic_inc(&guc->outstanding_submission_g2h);
- return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+ ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+ if (ret)
+ atomic_dec(&guc->outstanding_submission_g2h);
+
+ return ret;
}
int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
@@ -1362,7 +1375,45 @@ static void guc_enable_busyness_worker(struct intel_guc *guc)
static void guc_cancel_busyness_worker(struct intel_guc *guc)
{
- cancel_delayed_work_sync(&guc->timestamp.work);
+ /*
+ * There are many different call stacks that can get here. Some of them
+ * hold the reset mutex. The busyness worker also attempts to acquire the
+ * reset mutex. Synchronously flushing a worker thread requires acquiring
+ * the worker mutex. Lockdep sees this as a conflict. It thinks that the
+ * flush can deadlock because it holds the worker mutex while waiting for
+ * the reset mutex, but another thread is holding the reset mutex and might
+ * attempt to use other worker functions.
+ *
+ * In practice, this scenario does not exist because the busyness worker
+ * does not block waiting for the reset mutex. It does a try-lock on it and
+ * immediately exits if the lock is already held. Unfortunately, the mutex
+ * in question (I915_RESET_BACKOFF) is an i915 implementation which has lockdep
+ * annotation but not to the extent of explaining the 'might lock' is also a
+ * 'does not need to lock'. So one option would be to add more complex lockdep
+ * annotations to ignore the issue (if at all possible). A simpler option is to
+ * just not flush synchronously when a rest in progress. Given that the worker
+ * will just early exit and re-schedule itself anyway, there is no advantage
+ * to running it immediately.
+ *
+ * If a reset is not in progress, then the synchronous flush may be required.
+ * As noted many call stacks lead here, some during suspend and driver unload
+ * which do require a synchronous flush to make sure the worker is stopped
+ * before memory is freed.
+ *
+ * Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
+ * every possible call stack is unfeasible. It would be too intrusive to many
+ * areas that really don't care about the GuC backend. However, there is the
+ * 'reset_in_progress' flag available, so just use that.
+ *
+ * And note that in the case of a reset occurring during driver unload
+ * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
+ * is fine because there is another cancel in _finish (when the reset flag is
+ * not).
+ */
+ if (guc_to_gt(guc)->uc.reset_in_progress)
+ cancel_delayed_work(&guc->timestamp.work);
+ else
+ cancel_delayed_work_sync(&guc->timestamp.work);
}
static void __reset_guc_busyness_stats(struct intel_guc *guc)
@@ -1613,6 +1664,11 @@ static void guc_flush_submissions(struct intel_guc *guc)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+void intel_guc_submission_flush_work(struct intel_guc *guc)
+{
+ flush_work(&guc->submission_state.destroyed_worker);
+}
+
static void guc_flush_destroyed_contexts(struct intel_guc *guc);
void intel_guc_submission_reset_prepare(struct intel_guc *guc)
@@ -1948,8 +2004,16 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
+ /*
+ * Ensure the busyness worker gets cancelled even on a fatal wedge.
+ * Note that reset_prepare is not allowed to because it confuses lockdep.
+ */
+ if (guc_submission_initialized(guc))
+ guc_cancel_busyness_worker(guc);
+
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
+ !intel_guc_is_fw_running(guc) ||
intel_gt_is_wedged(guc_to_gt(guc)))) {
return;
}
@@ -3283,12 +3347,13 @@ static void guc_context_close(struct intel_context *ce)
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
-static inline void guc_lrc_desc_unpin(struct intel_context *ce)
+static inline int guc_lrc_desc_unpin(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
struct intel_gt *gt = guc_to_gt(guc);
unsigned long flags;
bool disabled;
+ int ret;
GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
@@ -3299,18 +3364,41 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
spin_lock_irqsave(&ce->guc_state.lock, flags);
disabled = submission_disabled(guc);
if (likely(!disabled)) {
+ /*
+ * Take a gt-pm ref and change context state to be destroyed.
+ * NOTE: a G2H IRQ that comes after will put this gt-pm ref back
+ */
__intel_gt_pm_get(gt);
set_context_destroyed(ce);
clr_context_registered(ce);
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
if (unlikely(disabled)) {
release_guc_id(guc, ce);
__guc_context_destroy(ce);
- return;
+ return 0;
}
- deregister_context(ce, ce->guc_id.id);
+ /*
+ * GuC is active, lets destroy this context, but at this point we can still be racing
+ * with suspend, so we undo everything if the H2G fails in deregister_context so
+ * that GuC reset will find this context during clean up.
+ */
+ ret = deregister_context(ce, ce->guc_id.id);
+ if (ret) {
+ spin_lock(&ce->guc_state.lock);
+ set_context_registered(ce);
+ clr_context_destroyed(ce);
+ spin_unlock(&ce->guc_state.lock);
+ /*
+ * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
+ * the wakeref immediately but per function spec usage call this after unlock.
+ */
+ intel_wakeref_put_async(&gt->wakeref);
+ }
+
+ return ret;
}
static void __guc_context_destroy(struct intel_context *ce)
@@ -3378,7 +3466,22 @@ static void deregister_destroyed_contexts(struct intel_guc *guc)
if (!ce)
break;
- guc_lrc_desc_unpin(ce);
+ if (guc_lrc_desc_unpin(ce)) {
+ /*
+ * This means GuC's CT link severed mid-way which could happen
+ * in suspend-resume corner cases. In this case, put the
+ * context back into the destroyed_contexts list which will
+ * get picked up on the next context deregistration event or
+ * purged in a GuC sanitization event (reset/unload/wedged/...).
+ */
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ list_add_tail(&ce->destroyed_link,
+ &guc->submission_state.destroyed_contexts);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+ /* Bail now since the list might never be emptied if h2gs fail */
+ break;
+ }
+
}
}
@@ -3389,6 +3492,17 @@ static void destroyed_worker_func(struct work_struct *w)
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
+ /*
+ * In rare cases we can get here via async context-free fence-signals that
+ * come very late in suspend flow or very early in resume flows. In these
+ * cases, GuC won't be ready but just skipping it here is fine as these
+ * pending-destroy-contexts get destroyed totally at GuC reset time at the
+ * end of suspend.. OR.. this worker can be picked up later on the next
+ * context destruction trigger after resume-completes
+ */
+ if (!intel_guc_is_ready(guc))
+ return;
+
with_intel_gt_pm(gt, wakeref)
deregister_destroyed_contexts(guc);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index c57b29cdb1a6..b6df75622d3b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -38,6 +38,8 @@ int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
bool interruptible,
long timeout);
+void intel_guc_submission_flush_work(struct intel_guc *guc);
+
static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
{
return guc->submission_supported;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index ba9e07fc2b57..0945b177d5f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "gt/intel_rps.h"
#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "intel_huc_print.h"
@@ -447,17 +448,68 @@ static const char *auth_mode_string(struct intel_huc *huc,
return partial ? "clear media" : "all workloads";
}
+/*
+ * Use a longer timeout for debug builds so that problems can be detected
+ * and analysed. But a shorter timeout for releases so that user's don't
+ * wait forever to find out there is a problem. Note that the only reason
+ * an end user should hit the timeout is in case of extreme thermal throttling.
+ * And a system that is that hot during boot is probably dead anyway!
+ */
+#if defined(CONFIG_DRM_I915_DEBUG_GEM)
+#define HUC_LOAD_RETRY_LIMIT 20
+#else
+#define HUC_LOAD_RETRY_LIMIT 3
+#endif
+
int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
- int ret;
+ struct intel_uncore *uncore = gt->uncore;
+ ktime_t before, after, delta;
+ int ret, count;
+ u64 delta_ms;
+ u32 before_freq;
- ret = __intel_wait_for_register(gt->uncore,
- huc->status[type].reg,
- huc->status[type].mask,
- huc->status[type].value,
- 2, 50, NULL);
+ /*
+ * The KMD requests maximum frequency during driver load, however thermal
+ * throttling can force the frequency down to minimum (although the board
+ * really should never get that hot in real life!). IFWI issues have been
+ * seen to cause sporadic failures to grant the higher frequency. And at
+ * minimum frequency, the authentication time can be in the seconds range.
+ * Note that there is a limit on how long an individual wait_for() can wait.
+ * So wrap it in a loop.
+ */
+ before_freq = intel_rps_read_actual_frequency(&gt->rps);
+ before = ktime_get();
+ for (count = 0; count < HUC_LOAD_RETRY_LIMIT; count++) {
+ ret = __intel_wait_for_register(gt->uncore,
+ huc->status[type].reg,
+ huc->status[type].mask,
+ huc->status[type].value,
+ 2, 1000, NULL);
+ if (!ret)
+ break;
+
+ huc_dbg(huc, "auth still in progress, count = %d, freq = %dMHz, status = 0x%08X\n",
+ count, intel_rps_read_actual_frequency(&gt->rps),
+ huc->status[type].reg.reg);
+ }
+ after = ktime_get();
+ delta = ktime_sub(after, before);
+ delta_ms = ktime_to_ms(delta);
+
+ if (delta_ms > 50) {
+ huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
+ delta_ms, huc->status[type].reg.reg, count, ret);
+ huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
+ intel_rps_read_actual_frequency(&gt->rps), before_freq,
+ intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
+ } else {
+ huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
+ delta_ms, intel_rps_read_actual_frequency(&gt->rps),
+ before_freq, huc->status[type].reg.reg, count, ret);
+ }
/* mark the load process as complete even if the wait failed */
delayed_huc_load_complete(huc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 3872d309ed31..6dfe5d9456c6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -640,7 +640,7 @@ void intel_uc_reset_finish(struct intel_uc *uc)
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */
- if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_reset_finish(guc);
}
@@ -690,6 +690,8 @@ void intel_uc_suspend(struct intel_uc *uc)
return;
}
+ intel_guc_submission_flush_work(guc);
+
with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
err = intel_guc_suspend(guc);
if (err)
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index faf21be724c3..4f74d867fe1a 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -574,7 +574,7 @@ int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
ret = intel_vgpu_register_reg(vgpu,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
- &intel_vgpu_regops_opregion, OPREGION_SIZE,
+ &intel_vgpu_regops_opregion, INTEL_GVT_OPREGION_SIZE,
VFIO_REGION_INFO_FLAG_READ, base);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index db99c2ef66db..990eaa029d9c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -147,7 +147,7 @@ static const char *i915_cache_level_str(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = obj_to_i915(obj);
- if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) {
switch (obj->pat_index) {
case 0: return " WB";
case 1: return " WT";
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index c7d7c3b7ecc6..9ee902d5b72c 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -681,7 +681,8 @@ i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
if (drm_debug_enabled(DRM_UT_DRIVER)) {
- struct drm_printer p = drm_debug_printer("i915 device info:");
+ struct drm_printer p = drm_dbg_printer(&dev_priv->drm, DRM_UT_DRIVER,
+ "device info:");
struct intel_gt *gt;
unsigned int i;
@@ -1003,8 +1004,10 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable(&i915->runtime_pm);
intel_power_domains_disable(i915);
+ intel_fbdev_set_suspend(&i915->drm, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
+ intel_display_driver_disable_user_access(i915);
drm_atomic_helper_shutdown(&i915->drm);
}
@@ -1014,6 +1017,9 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable_interrupts(i915);
intel_hpd_cancel_work(i915);
+ if (HAS_DISPLAY(i915))
+ intel_display_driver_suspend_access(i915);
+
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
@@ -1080,8 +1086,11 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_power_domains_disable(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
+ if (HAS_DISPLAY(dev_priv)) {
drm_kms_helper_poll_disable(dev);
+ intel_display_driver_disable_user_access(dev_priv);
+ }
pci_save_state(pdev);
@@ -1092,6 +1101,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
+ if (HAS_DISPLAY(dev_priv))
+ intel_display_driver_suspend_access(dev_priv);
+
intel_suspend_encoders(dev_priv);
/* Must be called before GGTT is suspended. */
@@ -1103,8 +1115,6 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_suspend(dev_priv, opregion_target_state);
- intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
-
dev_priv->suspend_count++;
intel_dmc_suspend(dev_priv);
@@ -1243,15 +1253,21 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_driver_init_hw(dev_priv);
intel_clock_gating_init(dev_priv);
+
+ if (HAS_DISPLAY(dev_priv))
+ intel_display_driver_resume_access(dev_priv);
+
intel_hpd_init(dev_priv);
/* MST sideband requires HPD interrupts enabled */
intel_dp_mst_resume(dev_priv);
intel_display_driver_resume(dev_priv);
- intel_hpd_poll_disable(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ if (HAS_DISPLAY(dev_priv)) {
+ intel_display_driver_enable_user_access(dev_priv);
drm_kms_helper_poll_enable(dev);
+ }
+ intel_hpd_poll_disable(dev_priv);
intel_opregion_resume(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index fa6852713bee..f58682505491 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -53,7 +53,7 @@ obj_meminfo(struct drm_i915_gem_object *obj,
obj->mm.region->id : INTEL_REGION_SMEM;
const u64 sz = obj->base.size;
- if (obj->base.handle_count > 1)
+ if (drm_gem_object_is_shared_for_memory_stats(&obj->base))
stats[id].shared += sz;
else
stats[id].private += sz;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 861567362abd..e81b3b2858ac 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -165,14 +165,6 @@ struct i915_gem_mm {
struct notifier_block vmap_notifier;
struct shrinker *shrinker;
-#ifdef CONFIG_MMU_NOTIFIER
- /**
- * notifier_lock for mmu notifiers, memory may not be allocated
- * while holding this lock.
- */
- rwlock_t notifier_lock;
-#endif
-
/* shrinker accounting, also useful for userland debugging */
u64 shrink_memory;
u32 shrink_count;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 92758b6b41f0..1391c01d7663 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -48,7 +48,6 @@
#include "gem/i915_gem_object_frontbuffer.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gem_region.h"
-#include "gem/i915_gem_userptr.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
@@ -1165,10 +1164,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
- ret = i915_gem_init_userptr(dev_priv);
- if (ret)
- return ret;
-
for_each_gt(gt, dev_priv, i) {
intel_uc_fetch_firmwares(&gt->uc);
intel_wopcm_init(&gt->wopcm);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d04660b60046..a0b784ebaddd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1157,7 +1157,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
dma_addr_t offset = dma - mem->region.start;
void __iomem *s;
- if (offset + PAGE_SIZE > mem->io_size) {
+ if (offset + PAGE_SIZE > resource_size(&mem->io)) {
ret = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2d695818f006..bd9d812b1afa 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3225,7 +3225,7 @@ u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
struct intel_gt *gt = to_gt(i915);
/* Wa_18013179988 */
- if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
intel_wakeref_t wakeref;
u32 reg, shift;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 00871ef99792..3baa2f54a86e 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -502,7 +502,7 @@ static int query_memregion_info(struct drm_i915_private *i915,
info.probed_size = mr->total;
if (mr->type == INTEL_MEMORY_LOCAL)
- info.probed_cpu_visible_size = mr->io_size;
+ info.probed_cpu_visible_size = resource_size(&mr->io);
else
info.probed_cpu_visible_size = mr->total;
@@ -551,6 +551,38 @@ static int query_hwconfig_blob(struct drm_i915_private *i915,
return hwconfig->size;
}
+static int
+query_guc_submission_version(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query)
+{
+ struct drm_i915_query_guc_submission_version __user *query_ptr =
+ u64_to_user_ptr(query->data_ptr);
+ struct drm_i915_query_guc_submission_version ver;
+ struct intel_guc *guc = &to_gt(i915)->uc.guc;
+ const size_t size = sizeof(ver);
+ int ret;
+
+ if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ return -ENODEV;
+
+ ret = copy_query_item(&ver, size, size, query);
+ if (ret != 0)
+ return ret;
+
+ if (ver.branch || ver.major || ver.minor || ver.patch)
+ return -EINVAL;
+
+ ver.branch = 0;
+ ver.major = guc->submission_version.major;
+ ver.minor = guc->submission_version.minor;
+ ver.patch = guc->submission_version.patch;
+
+ if (copy_to_user(query_ptr, &ver, size))
+ return -EFAULT;
+
+ return 0;
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
@@ -559,6 +591,7 @@ static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
query_memregion_info,
query_hwconfig_blob,
query_geometry_subslices,
+ query_guc_submission_version,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 27dc903f0553..e00557e1a57f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3059,6 +3059,7 @@
#define MCURSOR_MODE_64_ARGB_AX (0x20 | MCURSOR_MODE_64_32B_AX)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
+#define _CURAPOS_ERLY_TPT 0x7008c
#define CURSOR_POS_Y_SIGN REG_BIT(31)
#define CURSOR_POS_Y_MASK REG_GENMASK(30, 16)
#define CURSOR_POS_Y(y) REG_FIELD_PREP(CURSOR_POS_Y_MASK, (y))
@@ -3087,6 +3088,7 @@
#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR)
#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE)
#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS)
+#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(pipe, _CURAPOS_ERLY_TPT)
#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE)
#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A)
#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A)
@@ -5412,6 +5414,9 @@
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
+#define MTL_PCODE_STOLEN_ACCESS _MMIO(0x138914)
+#define STOLEN_ACCESS_ALLOWED 0x1
+
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14)
@@ -5652,6 +5657,10 @@ enum skl_power_gate {
#define DP_TP_CTL_MODE_SST (0 << 27)
#define DP_TP_CTL_MODE_MST (1 << 27)
#define DP_TP_CTL_FORCE_ACT (1 << 25)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK (3 << 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A (0 << 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B (1 << 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C (2 << 19)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
@@ -5684,6 +5693,8 @@ enum skl_power_gate {
/* Known as DDI_CTL_DE in MTL+ */
#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1 << 31)
+#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
+#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
#define DDI_BUF_EMP_MASK (0xf << 24)
#define DDI_BUF_PHY_LINK_RATE(r) ((r) << 20)
@@ -6314,9 +6325,10 @@ enum skl_power_gate {
#define GMS_MASK REG_GENMASK(15, 8)
#define GGMS_MASK REG_GENMASK(7, 6)
-#define GEN12_GSMBASE _MMIO(0x108100)
-#define GEN12_DSMBASE _MMIO(0x1080C0)
-#define GEN12_BDSM_MASK REG_GENMASK64(63, 20)
+#define GEN6_GSMBASE _MMIO(0x108100)
+#define GEN6_DSMBASE _MMIO(0x1080C0)
+#define GEN6_BDSM_MASK REG_GENMASK64(31, 20)
+#define GEN11_BDSM_MASK REG_GENMASK64(63, 20)
#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014)
#define SGSI_SIDECLK_DIS REG_BIT(17)
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 60404dbb2e9f..df6437c37373 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -75,13 +75,10 @@ struct i915_syncmap {
unsigned int height;
unsigned int bitmap;
struct i915_syncmap *parent;
- /*
- * Following this header is an array of either seqno or child pointers:
- * union {
- * u32 seqno[KSYNCMAP];
- * struct i915_syncmap *child[KSYNCMAP];
- * };
- */
+ union {
+ DECLARE_FLEX_ARRAY(u32, seqno);
+ DECLARE_FLEX_ARRAY(struct i915_syncmap *, child);
+ };
};
/**
@@ -99,13 +96,13 @@ void i915_syncmap_init(struct i915_syncmap **root)
static inline u32 *__sync_seqno(struct i915_syncmap *p)
{
GEM_BUG_ON(p->height);
- return (u32 *)(p + 1);
+ return p->seqno;
}
static inline struct i915_syncmap **__sync_child(struct i915_syncmap *p)
{
GEM_BUG_ON(!p->height);
- return (struct i915_syncmap **)(p + 1);
+ return p->child;
}
static inline unsigned int
@@ -200,7 +197,7 @@ __sync_alloc_leaf(struct i915_syncmap *parent, u64 id)
{
struct i915_syncmap *p;
- p = kmalloc(sizeof(*p) + KSYNCMAP * sizeof(u32), GFP_KERNEL);
+ p = kmalloc(struct_size(p, seqno, KSYNCMAP), GFP_KERNEL);
if (unlikely(!p))
return NULL;
@@ -282,7 +279,7 @@ static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno)
unsigned int above;
/* Insert a join above the current layer */
- next = kzalloc(sizeof(*next) + KSYNCMAP * sizeof(next),
+ next = kzalloc(struct_size(next, child, KSYNCMAP),
GFP_KERNEL);
if (unlikely(!next))
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 29fd02bf5ea8..6f9e7b354b54 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include "i915_drv.h"
+#include "i915_reg.h"
#include "i915_utils.h"
#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
@@ -125,3 +126,19 @@ bool i915_vtd_active(struct drm_i915_private *i915)
/* Running as a guest, we assume the host is enforcing VT'd */
return i915_run_as_guest();
}
+
+bool i915_direct_stolen_access(struct drm_i915_private *i915)
+{
+ /*
+ * Wa_22018444074
+ *
+ * Access via BAR can hang MTL, go directly to GSM/DSM,
+ * except for VM guests which won't have access to it.
+ *
+ * Normally this would not work but on MTL the system firmware
+ * should have relaxed the access permissions sufficiently.
+ * 0x138914==0x1 indicates that the firmware has done its job.
+ */
+ return IS_METEORLAKE(i915) && !i915_run_as_guest() &&
+ intel_uncore_read(&i915->uncore, MTL_PCODE_STOLEN_ACCESS) == STOLEN_ACCESS_ALLOWED;
+}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index f98577967b7f..b45ef0560611 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -391,4 +391,6 @@ static inline bool i915_run_as_guest(void)
bool i915_vtd_active(struct drm_i915_private *i915);
+bool i915_direct_stolen_access(struct drm_i915_private *i915);
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 60a03340bbd4..52d998e5c21a 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -50,7 +50,7 @@ static int __iopagetest(struct intel_memory_region *mem,
if (memchr_inv(result, value, sizeof(result))) {
dev_err(mem->i915->drm.dev,
"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
- &mem->region, &mem->io_start, &offset, caller,
+ &mem->region, &mem->io.start, &offset, caller,
value, result[0], result[1], result[2]);
return -EINVAL;
}
@@ -67,11 +67,11 @@ static int iopagetest(struct intel_memory_region *mem,
int err;
int i;
- va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
+ va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
if (!va) {
dev_err(mem->i915->drm.dev,
"Failed to ioremap memory region [%pa + %pa] for %ps\n",
- &mem->io_start, &offset, caller);
+ &mem->io.start, &offset, caller);
return -EFAULT;
}
@@ -102,10 +102,10 @@ static int iomemtest(struct intel_memory_region *mem,
resource_size_t last, page;
int err;
- if (mem->io_size < PAGE_SIZE)
+ if (resource_size(&mem->io) < PAGE_SIZE)
return 0;
- last = mem->io_size - PAGE_SIZE;
+ last = resource_size(&mem->io) - PAGE_SIZE;
/*
* Quick test to check read/write access to the iomap (backing store).
@@ -207,7 +207,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
int err = 0;
- if (!mem->io_start)
+ if (!mem->io.start)
return 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
@@ -252,8 +252,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->i915 = i915;
mem->region = DEFINE_RES_MEM(start, size);
- mem->io_start = io_start;
- mem->io_size = io_size;
+ mem->io = DEFINE_RES_MEM(io_start, io_size);
mem->min_page_size = min_page_size;
mem->ops = ops;
mem->total = size;
@@ -373,6 +372,24 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
i915->mm.regions[i] = mem;
}
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *mem = i915->mm.regions[i];
+ u64 region_size, io_size;
+
+ if (!mem)
+ continue;
+
+ region_size = resource_size(&mem->region) >> 20;
+ io_size = resource_size(&mem->io) >> 20;
+
+ if (resource_size(&mem->io))
+ drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n",
+ mem->id, mem->name, region_size, &mem->region, io_size, &mem->io);
+ else
+ drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n",
+ mem->id, mem->name, region_size, &mem->region);
+ }
+
return 0;
out_cleanup:
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 9ba36454e51b..40810cfb3fd9 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -71,8 +71,7 @@ struct intel_memory_region {
struct io_mapping iomap;
struct resource region;
- resource_size_t io_start;
- resource_size_t io_size;
+ struct resource io;
resource_size_t min_page_size;
resource_size_t total;
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index bf6097e7433d..04525d92bec5 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -87,7 +87,7 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
resource_size(&mem->region),
- mem->io_size,
+ resource_size(&mem->io),
mem->min_page_size, PAGE_SIZE);
if (ret)
return ret;
@@ -219,16 +219,16 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
goto out;
}
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
- } else if (mem->io_size && mem->io_size < mem->total) {
+ } else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place.fpfn = 0;
- if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) {
+ if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) {
ret = -E2BIG;
goto out;
}
- place.lpfn = mem->io_size >> PAGE_SHIFT;
+ place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT;
}
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index dfefad5a5fec..76400e9c40f0 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1800,7 +1800,10 @@ static const struct intel_forcewake_range __mtl_fw_ranges[] = {
GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
0x24000 - 0x2407f: always on
0x24080 - 0x2ffff: reserved */
- GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT)
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x40000, 0x1901ef, 0),
+ GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
+ /* FIXME: WA to wake GT while triggering H2G */
};
/*
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index b61fe850e924..0d89d70b9c36 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -156,9 +156,9 @@ static int live_active_wait(void *arg)
__i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(active->retired)) {
- struct drm_printer p = drm_err_printer(__func__);
+ struct drm_printer p = drm_err_printer(&i915->drm, __func__);
- pr_err("i915_active not retired after waiting!\n");
+ drm_printf(&p, "i915_active not retired after waiting!\n");
i915_active_print(&active->base, &p);
err = -EINVAL;
@@ -189,9 +189,9 @@ static int live_active_retire(void *arg)
err = -EIO;
if (!READ_ONCE(active->retired)) {
- struct drm_printer p = drm_err_printer(__func__);
+ struct drm_printer p = drm_err_printer(&i915->drm, __func__);
- pr_err("i915_active not retired after flushing!\n");
+ drm_printf(&p, "i915_active not retired after flushing!\n");
i915_active_print(&active->base, &p);
err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index d985d9bae2e8..ae6070b5bf07 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -544,8 +544,8 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
u64 start = drm_buddy_block_offset(block);
u64 end = start + drm_buddy_block_size(mm, block);
- if (start < mr->io_size)
- total += min_t(u64, end, mr->io_size) - start;
+ if (start < resource_size(&mr->io))
+ total += min_t(u64, end, resource_size(&mr->io)) - start;
}
return total;
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
index 240beafb38ed..3cad6dac06b0 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ b/drivers/gpu/drm/i915/soc/intel_pch.c
@@ -140,11 +140,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
!IS_ALDERLAKE_P(dev_priv));
return PCH_ADP;
- case INTEL_PCH_MTP_DEVICE_ID_TYPE:
- case INTEL_PCH_MTP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv));
- return PCH_MTP;
default:
return PCH_NONE;
}
@@ -173,9 +168,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
* make an educated guess as to which PCH is really there.
*/
- if (IS_METEORLAKE(dev_priv))
- id = INTEL_PCH_MTP_DEVICE_ID_TYPE;
- else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
@@ -225,6 +218,13 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 20) {
dev_priv->pch_type = PCH_LNL;
return;
+ } else if (IS_METEORLAKE(dev_priv)) {
+ /*
+ * Both north display and south display are on the SoC die.
+ * The real PCH is uninvolved in display.
+ */
+ dev_priv->pch_type = PCH_MTL;
+ return;
} else if (IS_DG2(dev_priv)) {
dev_priv->pch_type = PCH_DG2;
return;
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h
index 1b03ea60a7a8..89e89ede265d 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.h
+++ b/drivers/gpu/drm/i915/soc/intel_pch.h
@@ -25,11 +25,11 @@ enum intel_pch {
PCH_ICP, /* Ice Lake/Jasper Lake PCH */
PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
PCH_ADP, /* Alder Lake PCH */
- PCH_MTP, /* Meteor Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
PCH_DG1 = 1024,
PCH_DG2,
+ PCH_MTL,
PCH_LNL,
};
@@ -59,16 +59,12 @@ enum intel_pch {
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
-#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00
-#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
-#define HAS_PCH_LNL(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LNL)
-#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP)
#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-blkctl.c b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
index c9b54bb2692d..803e3fcdb50f 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
@@ -42,14 +42,13 @@ int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base)
{
struct dcss_blkctl *blkctl;
- blkctl = kzalloc(sizeof(*blkctl), GFP_KERNEL);
+ blkctl = devm_kzalloc(dcss->dev, sizeof(*blkctl), GFP_KERNEL);
if (!blkctl)
return -ENOMEM;
- blkctl->base_reg = ioremap(blkctl_base, SZ_4K);
+ blkctl->base_reg = devm_ioremap(dcss->dev, blkctl_base, SZ_4K);
if (!blkctl->base_reg) {
dev_err(dcss->dev, "unable to remap BLK CTRL base\n");
- kfree(blkctl);
return -ENOMEM;
}
@@ -60,11 +59,3 @@ int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base)
return 0;
}
-
-void dcss_blkctl_exit(struct dcss_blkctl *blkctl)
-{
- if (blkctl->base_reg)
- iounmap(blkctl->base_reg);
-
- kfree(blkctl);
-}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-ctxld.c b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
index 3a84cb3209c4..e41d5c2a3ea4 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
@@ -202,7 +202,7 @@ int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
struct dcss_ctxld *ctxld;
int ret;
- ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL);
+ ctxld = devm_kzalloc(dcss->dev, sizeof(*ctxld), GFP_KERNEL);
if (!ctxld)
return -ENOMEM;
@@ -217,7 +217,7 @@ int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
goto err;
}
- ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K);
+ ctxld->ctxld_reg = devm_ioremap(dcss->dev, ctxld_base, SZ_4K);
if (!ctxld->ctxld_reg) {
dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
ret = -ENOMEM;
@@ -226,18 +226,14 @@ int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
if (ret)
- goto err_irq;
+ goto err;
dcss_ctxld_hw_cfg(ctxld);
return 0;
-err_irq:
- iounmap(ctxld->ctxld_reg);
-
err:
dcss_ctxld_free_ctx(ctxld);
- kfree(ctxld);
return ret;
}
@@ -246,11 +242,7 @@ void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
{
free_irq(ctxld->irq, ctxld);
- if (ctxld->ctxld_reg)
- iounmap(ctxld->ctxld_reg);
-
dcss_ctxld_free_ctx(ctxld);
- kfree(ctxld);
}
static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
index 4f3af0dfb344..597e9b7bd4bf 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -109,8 +109,6 @@ dtg_err:
dcss_ctxld_exit(dcss->ctxld);
ctxld_err:
- dcss_blkctl_exit(dcss->blkctl);
-
dcss_clocks_disable(dcss);
return ret;
@@ -124,7 +122,6 @@ static void dcss_submodules_stop(struct dcss_dev *dcss)
dcss_ss_exit(dcss->ss);
dcss_dtg_exit(dcss->dtg);
dcss_ctxld_exit(dcss->ctxld);
- dcss_blkctl_exit(dcss->blkctl);
dcss_clocks_disable(dcss);
}
@@ -170,6 +167,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
struct resource *res;
struct dcss_dev *dcss;
const struct dcss_type_data *devtype;
+ resource_size_t res_len;
devtype = of_device_get_match_data(dev);
if (!devtype) {
@@ -183,7 +181,13 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
return ERR_PTR(-EINVAL);
}
- dcss = kzalloc(sizeof(*dcss), GFP_KERNEL);
+ res_len = res->end - res->start;
+ if (!devm_request_mem_region(dev, res->start, res_len, "dcss")) {
+ dev_err(dev, "cannot request memory region\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ dcss = devm_kzalloc(dev, sizeof(*dcss), GFP_KERNEL);
if (!dcss)
return ERR_PTR(-ENOMEM);
@@ -194,7 +198,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
ret = dcss_clks_init(dcss);
if (ret) {
dev_err(dev, "clocks initialization failed\n");
- goto err;
+ return ERR_PTR(ret);
}
dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0);
@@ -226,9 +230,6 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
clks_err:
dcss_clks_release(dcss);
-err:
- kfree(dcss);
-
return ERR_PTR(ret);
}
@@ -246,8 +247,6 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
dcss_submodules_stop(dcss);
dcss_clks_release(dcss);
-
- kfree(dcss);
}
static int dcss_dev_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h
index f27b87c09599..b032e873d227 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.h
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h
@@ -104,7 +104,6 @@ extern const struct dev_pm_ops dcss_dev_pm_ops;
/* BLKCTL */
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base);
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl);
-void dcss_blkctl_exit(struct dcss_blkctl *blkctl);
/* CTXLD */
int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dpr.c b/drivers/gpu/drm/imx/dcss/dcss-dpr.c
index df9dab949bf2..072eb209249f 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dpr.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dpr.c
@@ -135,7 +135,7 @@ static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base)
ch->base_ofs = dpr_base + i * 0x1000;
- ch->base_reg = ioremap(ch->base_ofs, SZ_4K);
+ ch->base_reg = devm_ioremap(dpr->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(dpr->dev, "dpr: unable to remap ch %d base\n",
i);
@@ -155,7 +155,7 @@ int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base)
{
struct dcss_dpr *dpr;
- dpr = kzalloc(sizeof(*dpr), GFP_KERNEL);
+ dpr = devm_kzalloc(dcss->dev, sizeof(*dpr), GFP_KERNEL);
if (!dpr)
return -ENOMEM;
@@ -164,18 +164,8 @@ int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base)
dpr->ctxld = dcss->ctxld;
dpr->ctx_id = CTX_SB_HP;
- if (dcss_dpr_ch_init_all(dpr, dpr_base)) {
- int i;
-
- for (i = 0; i < 3; i++) {
- if (dpr->ch[i].base_reg)
- iounmap(dpr->ch[i].base_reg);
- }
-
- kfree(dpr);
-
+ if (dcss_dpr_ch_init_all(dpr, dpr_base))
return -ENOMEM;
- }
return 0;
}
@@ -189,12 +179,7 @@ void dcss_dpr_exit(struct dcss_dpr *dpr)
struct dcss_dpr_ch *ch = &dpr->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0);
-
- if (ch->base_reg)
- iounmap(ch->base_reg);
}
-
- kfree(dpr);
}
static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide,
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index ad5f29ea8f6a..d881f5a34760 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -51,15 +51,13 @@ static int dcss_drv_platform_probe(struct platform_device *pdev)
of_node_put(remote);
- mdrv = kzalloc(sizeof(*mdrv), GFP_KERNEL);
+ mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL);
if (!mdrv)
return -ENOMEM;
mdrv->dcss = dcss_dev_create(dev, hdmi_output);
- if (IS_ERR(mdrv->dcss)) {
- err = PTR_ERR(mdrv->dcss);
- goto err;
- }
+ if (IS_ERR(mdrv->dcss))
+ return PTR_ERR(mdrv->dcss);
dev_set_drvdata(dev, mdrv);
@@ -75,8 +73,6 @@ static int dcss_drv_platform_probe(struct platform_device *pdev)
dcss_shutoff:
dcss_dev_destroy(mdrv->dcss);
-err:
- kfree(mdrv);
return err;
}
@@ -86,8 +82,6 @@ static void dcss_drv_platform_remove(struct platform_device *pdev)
dcss_kms_detach(mdrv->kms);
dcss_dev_destroy(mdrv->dcss);
-
- kfree(mdrv);
}
static void dcss_drv_platform_shutdown(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dtg.c b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
index 30de00540f63..2968f5d5bd41 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dtg.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
@@ -152,7 +152,7 @@ int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
int ret = 0;
struct dcss_dtg *dtg;
- dtg = kzalloc(sizeof(*dtg), GFP_KERNEL);
+ dtg = devm_kzalloc(dcss->dev, sizeof(*dtg), GFP_KERNEL);
if (!dtg)
return -ENOMEM;
@@ -160,11 +160,10 @@ int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
dtg->dev = dcss->dev;
dtg->ctxld = dcss->ctxld;
- dtg->base_reg = ioremap(dtg_base, SZ_4K);
+ dtg->base_reg = devm_ioremap(dtg->dev, dtg_base, SZ_4K);
if (!dtg->base_reg) {
- dev_err(dcss->dev, "dtg: unable to remap dtg base\n");
- ret = -ENOMEM;
- goto err_ioremap;
+ dev_err(dtg->dev, "dtg: unable to remap dtg base\n");
+ return -ENOMEM;
}
dtg->base_ofs = dtg_base;
@@ -175,17 +174,7 @@ int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL |
((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK);
- ret = dcss_dtg_irq_config(dtg, to_platform_device(dcss->dev));
- if (ret)
- goto err_irq;
-
- return 0;
-
-err_irq:
- iounmap(dtg->base_reg);
-
-err_ioremap:
- kfree(dtg);
+ ret = dcss_dtg_irq_config(dtg, to_platform_device(dtg->dev));
return ret;
}
@@ -193,11 +182,6 @@ err_ioremap:
void dcss_dtg_exit(struct dcss_dtg *dtg)
{
free_irq(dtg->ctxld_kick_irq, dtg);
-
- if (dtg->base_reg)
- iounmap(dtg->base_reg);
-
- kfree(dtg);
}
void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
index 47852b9dd5ea..825728c356ff 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-scaler.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
@@ -302,7 +302,7 @@ static int dcss_scaler_ch_init_all(struct dcss_scaler *scl,
ch->base_ofs = scaler_base + i * 0x400;
- ch->base_reg = ioremap(ch->base_ofs, SZ_4K);
+ ch->base_reg = devm_ioremap(scl->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(scl->dev, "scaler: unable to remap ch base\n");
return -ENOMEM;
@@ -318,7 +318,7 @@ int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base)
{
struct dcss_scaler *scaler;
- scaler = kzalloc(sizeof(*scaler), GFP_KERNEL);
+ scaler = devm_kzalloc(dcss->dev, sizeof(*scaler), GFP_KERNEL);
if (!scaler)
return -ENOMEM;
@@ -327,18 +327,8 @@ int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base)
scaler->ctxld = dcss->ctxld;
scaler->ctx_id = CTX_SB_HP;
- if (dcss_scaler_ch_init_all(scaler, scaler_base)) {
- int i;
-
- for (i = 0; i < 3; i++) {
- if (scaler->ch[i].base_reg)
- iounmap(scaler->ch[i].base_reg);
- }
-
- kfree(scaler);
-
+ if (dcss_scaler_ch_init_all(scaler, scaler_base))
return -ENOMEM;
- }
return 0;
}
@@ -351,12 +341,7 @@ void dcss_scaler_exit(struct dcss_scaler *scl)
struct dcss_scaler_ch *ch = &scl->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL);
-
- if (ch->base_reg)
- iounmap(ch->base_reg);
}
-
- kfree(scl);
}
void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-ss.c b/drivers/gpu/drm/imx/dcss/dcss-ss.c
index 8ddf08da911b..0df81866fb7b 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-ss.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-ss.c
@@ -83,7 +83,7 @@ int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base)
{
struct dcss_ss *ss;
- ss = kzalloc(sizeof(*ss), GFP_KERNEL);
+ ss = devm_kzalloc(dcss->dev, sizeof(*ss), GFP_KERNEL);
if (!ss)
return -ENOMEM;
@@ -91,10 +91,9 @@ int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base)
ss->dev = dcss->dev;
ss->ctxld = dcss->ctxld;
- ss->base_reg = ioremap(ss_base, SZ_4K);
+ ss->base_reg = devm_ioremap(ss->dev, ss_base, SZ_4K);
if (!ss->base_reg) {
- dev_err(dcss->dev, "ss: unable to remap ss base\n");
- kfree(ss);
+ dev_err(ss->dev, "ss: unable to remap ss base\n");
return -ENOMEM;
}
@@ -108,11 +107,6 @@ void dcss_ss_exit(struct dcss_ss *ss)
{
/* stop SS */
dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
-
- if (ss->base_reg)
- iounmap(ss->base_reg);
-
- kfree(ss);
}
void dcss_ss_subsam_set(struct dcss_ss *ss)
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index 53840ab054c7..71d70194fcbd 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
@@ -655,7 +655,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
for (i = 0; i < 4; i++) {
char clkname[16];
- sprintf(clkname, "di%d_sel", i);
+ snprintf(clkname, sizeof(clkname), "di%d_sel", i);
imx_ldb->clk_sel[i] = devm_clk_get(imx_ldb->dev, clkname);
if (IS_ERR(imx_ldb->clk_sel[i])) {
ret = PTR_ERR(imx_ldb->clk_sel[i]);
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index b440e0cdc057..3db117c5edd9 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -11,7 +11,6 @@ config DRM_INGENIC
select DRM_GEM_DMA_HELPER
select REGMAP
select REGMAP_MMIO
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Ingenic SoCs.
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
index 8389f2d7d021..0e668fc1e0f9 100644
--- a/drivers/gpu/drm/lima/lima_ctx.c
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -19,7 +19,7 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
kref_init(&ctx->refcnt);
for (i = 0; i < lima_pipe_num; i++) {
- err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty);
+ err = lima_sched_context_init(dev->pipe + i, ctx->context + i);
if (err)
goto err_out0;
}
diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h
index 74e2be09090f..5b1063ce968b 100644
--- a/drivers/gpu/drm/lima/lima_ctx.h
+++ b/drivers/gpu/drm/lima/lima_ctx.h
@@ -13,7 +13,6 @@ struct lima_ctx {
struct kref refcnt;
struct lima_device *dev;
struct lima_sched_context context[lima_pipe_num];
- atomic_t guilty;
/* debug info */
char pname[TASK_COMM_LEN];
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 4f9736e5f929..7ea244d876ca 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -75,29 +75,34 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
} else {
bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
if (!bo->base.sgt) {
- sg_free_table(&sgt);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_out0;
}
}
ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
- if (ret) {
- sg_free_table(&sgt);
- kfree(bo->base.sgt);
- bo->base.sgt = NULL;
- return ret;
- }
+ if (ret)
+ goto err_out1;
*bo->base.sgt = sgt;
if (vm) {
ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
if (ret)
- return ret;
+ goto err_out2;
}
bo->heap_size = new_size;
return 0;
+
+err_out2:
+ dma_unmap_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
+err_out1:
+ kfree(bo->base.sgt);
+ bo->base.sgt = NULL;
+err_out0:
+ sg_free_table(&sgt);
+ return ret;
}
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index 8dd501b7a3d0..6b354e2fb61d 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -34,11 +34,11 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
if (state & LIMA_GP_IRQ_MASK_ERROR) {
if ((state & LIMA_GP_IRQ_MASK_ERROR) ==
LIMA_GP_IRQ_PLBU_OUT_OF_MEM) {
- dev_dbg(dev->dev, "gp out of heap irq status=%x\n",
- status);
+ dev_dbg(dev->dev, "%s out of heap irq status=%x\n",
+ lima_ip_name(ip), status);
} else {
- dev_err(dev->dev, "gp error irq state=%x status=%x\n",
- state, status);
+ dev_err(dev->dev, "%s error irq state=%x status=%x\n",
+ lima_ip_name(ip), state, status);
if (task)
task->recoverable = false;
}
@@ -89,7 +89,8 @@ static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
v & LIMA_GP_IRQ_RESET_COMPLETED,
0, 100);
if (err) {
- dev_err(dev->dev, "gp soft reset time out\n");
+ dev_err(dev->dev, "%s soft reset time out\n",
+ lima_ip_name(ip));
return err;
}
@@ -166,6 +167,11 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
gp_write(LIMA_GP_CMD, cmd);
}
+static int lima_gp_bus_stop_poll(struct lima_ip *ip)
+{
+ return !!(gp_read(LIMA_GP_STATUS) & LIMA_GP_STATUS_BUS_STOPPED);
+}
+
static int lima_gp_hard_reset_poll(struct lima_ip *ip)
{
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
@@ -179,16 +185,30 @@ static int lima_gp_hard_reset(struct lima_ip *ip)
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
gp_write(LIMA_GP_INT_MASK, 0);
+
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_STOP_BUS);
+ ret = lima_poll_timeout(ip, lima_gp_bus_stop_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip));
+ return ret;
+ }
gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
if (ret) {
- dev_err(dev->dev, "gp hard reset timeout\n");
+ dev_err(dev->dev, "%s hard reset timeout\n", lima_ip_name(ip));
return ret;
}
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0);
gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+
+ /*
+ * if there was an async soft reset queued,
+ * don't wait for it in the next job
+ */
+ ip->data.async_reset = false;
+
return 0;
}
@@ -201,8 +221,9 @@ static void lima_gp_task_error(struct lima_sched_pipe *pipe)
{
struct lima_ip *ip = pipe->processor[0];
- dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n",
- gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS));
+ dev_err(ip->dev->dev, "%s task error int_state=%x status=%x\n",
+ lima_ip_name(ip), gp_read(LIMA_GP_INT_STAT),
+ gp_read(LIMA_GP_STATUS));
lima_gp_hard_reset(ip);
}
@@ -305,7 +326,7 @@ int lima_gp_init(struct lima_ip *ip)
err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
- dev_err(dev->dev, "gp %s fail to request irq\n",
+ dev_err(dev->dev, "%s fail to request irq\n",
lima_ip_name(ip));
return err;
}
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
index c4080a02957b..184106ce55f8 100644
--- a/drivers/gpu/drm/lima/lima_l2_cache.c
+++ b/drivers/gpu/drm/lima/lima_l2_cache.c
@@ -21,7 +21,8 @@ static int lima_l2_cache_wait_idle(struct lima_ip *ip)
!(v & LIMA_L2_CACHE_STATUS_COMMAND_BUSY),
0, 1000);
if (err) {
- dev_err(dev->dev, "l2 cache wait command timeout\n");
+ dev_err(dev->dev, "%s wait command timeout\n",
+ lima_ip_name(ip));
return err;
}
return 0;
@@ -83,7 +84,8 @@ int lima_l2_cache_init(struct lima_ip *ip)
spin_lock_init(&ip->data.lock);
size = l2_cache_read(LIMA_L2_CACHE_SIZE);
- dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
+ dev_info(dev->dev, "%s %uK, %u-way, %ubyte cache line, %ubit external bus\n",
+ lima_ip_name(ip),
1 << (((size >> 16) & 0xff) - 10),
1 << ((size >> 8) & 0xff),
1 << (size & 0xff),
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index a1ae6c252dc2..e18317c5ca8c 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -22,7 +22,8 @@
cond, 0, 100); \
if (__ret) \
dev_err(dev->dev, \
- "mmu command %x timeout\n", cmd); \
+ "%s command %x timeout\n", \
+ lima_ip_name(ip), cmd); \
__ret; \
})
@@ -40,14 +41,13 @@ static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
if (status & LIMA_MMU_INT_PAGE_FAULT) {
u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR);
- dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
- fault, LIMA_MMU_STATUS_BUS_ID(status),
- status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
- lima_ip_name(ip));
+ dev_err(dev->dev, "%s page fault at 0x%x from bus id %d of type %s\n",
+ lima_ip_name(ip), fault, LIMA_MMU_STATUS_BUS_ID(status),
+ status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read");
}
if (status & LIMA_MMU_INT_READ_BUS_ERROR)
- dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
+ dev_err(dev->dev, "%s irq bus error\n", lima_ip_name(ip));
/* mask all interrupts before resume */
mmu_write(LIMA_MMU_INT_MASK, 0);
@@ -102,14 +102,14 @@ int lima_mmu_init(struct lima_ip *ip)
mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) {
- dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip));
+ dev_err(dev->dev, "%s dte write test fail\n", lima_ip_name(ip));
return -EIO;
}
err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
- dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip));
+ dev_err(dev->dev, "%s fail to request irq\n", lima_ip_name(ip));
return err;
}
@@ -152,7 +152,7 @@ void lima_mmu_page_fault_resume(struct lima_ip *ip)
u32 v;
if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) {
- dev_info(dev->dev, "mmu resume\n");
+ dev_info(dev->dev, "%s resume\n", lima_ip_name(ip));
mmu_write(LIMA_MMU_INT_MASK, 0);
mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c
index e397e1146e96..113cb9b215cd 100644
--- a/drivers/gpu/drm/lima/lima_pmu.c
+++ b/drivers/gpu/drm/lima/lima_pmu.c
@@ -21,7 +21,8 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
v, v & LIMA_PMU_INT_CMD_MASK,
100, 100000);
if (err) {
- dev_err(dev->dev, "timeout wait pmu cmd\n");
+ dev_err(dev->dev, "%s timeout wait pmu cmd\n",
+ lima_ip_name(ip));
return err;
}
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index a5c95bed08c0..d0d2db0ef1ce 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -26,8 +26,8 @@ static void lima_pp_handle_irq(struct lima_ip *ip, u32 state)
if (state & LIMA_PP_IRQ_MASK_ERROR) {
u32 status = pp_read(LIMA_PP_STATUS);
- dev_err(dev->dev, "pp error irq state=%x status=%x\n",
- state, status);
+ dev_err(dev->dev, "%s error irq state=%x status=%x\n",
+ lima_ip_name(ip), state, status);
pipe->error = true;
@@ -125,7 +125,7 @@ static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip)
ret = lima_poll_timeout(ip, lima_pp_soft_reset_poll, 0, 100);
if (ret) {
- dev_err(dev->dev, "pp %s reset time out\n", lima_ip_name(ip));
+ dev_err(dev->dev, "%s reset time out\n", lima_ip_name(ip));
return ret;
}
@@ -168,6 +168,11 @@ static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb)
}
}
+static int lima_pp_bus_stop_poll(struct lima_ip *ip)
+{
+ return !!(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_BUS_STOPPED);
+}
+
static int lima_pp_hard_reset_poll(struct lima_ip *ip)
{
pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC01A0000);
@@ -181,16 +186,31 @@ static int lima_pp_hard_reset(struct lima_ip *ip)
pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC0FFE000);
pp_write(LIMA_PP_INT_MASK, 0);
+
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_STOP_BUS);
+ ret = lima_poll_timeout(ip, lima_pp_bus_stop_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip));
+ return ret;
+ }
+
pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_FORCE_RESET);
ret = lima_poll_timeout(ip, lima_pp_hard_reset_poll, 10, 100);
if (ret) {
- dev_err(dev->dev, "pp hard reset timeout\n");
+ dev_err(dev->dev, "%s hard reset timeout\n", lima_ip_name(ip));
return ret;
}
pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0);
pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
+
+ /*
+ * if there was an async soft reset queued,
+ * don't wait for it in the next job
+ */
+ ip->data.async_reset = false;
+
return 0;
}
@@ -254,7 +274,7 @@ int lima_pp_init(struct lima_ip *ip)
err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
- dev_err(dev->dev, "pp %s fail to request irq\n",
+ dev_err(dev->dev, "%s fail to request irq\n",
lima_ip_name(ip));
return err;
}
@@ -289,7 +309,7 @@ int lima_pp_bcast_init(struct lima_ip *ip)
err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
- dev_err(dev->dev, "pp %s fail to request irq\n",
+ dev_err(dev->dev, "%s fail to request irq\n",
lima_ip_name(ip));
return err;
}
@@ -403,8 +423,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe)
for (i = 0; i < pipe->num_processor; i++) {
struct lima_ip *ip = pipe->processor[i];
- dev_err(ip->dev->dev, "pp task error %d int_state=%x status=%x\n",
- i, pp_read(LIMA_PP_INT_STATUS), pp_read(LIMA_PP_STATUS));
+ dev_err(ip->dev->dev, "%s task error %d int_state=%x status=%x\n",
+ lima_ip_name(ip), i, pp_read(LIMA_PP_INT_STATUS),
+ pp_read(LIMA_PP_STATUS));
lima_pp_hard_reset(ip);
}
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index c3bf8cda8498..00b19adfc888 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+#include <linux/hardirq.h>
#include <linux/iosys-map.h>
#include <linux/kthread.h>
#include <linux/slab.h>
@@ -153,13 +154,12 @@ void lima_sched_task_fini(struct lima_sched_task *task)
}
int lima_sched_context_init(struct lima_sched_pipe *pipe,
- struct lima_sched_context *context,
- atomic_t *guilty)
+ struct lima_sched_context *context)
{
struct drm_gpu_scheduler *sched = &pipe->base;
return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
- &sched, 1, guilty);
+ &sched, 1, NULL);
}
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
@@ -401,9 +401,35 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_sched_task *task = to_lima_task(job);
struct lima_device *ldev = pipe->ldev;
+ struct lima_ip *ip = pipe->processor[0];
+ int i;
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timeout is
+ * spurious. Bail out.
+ */
+ if (dma_fence_is_signaled(task->fence)) {
+ DRM_WARN("%s spurious timeout\n", lima_ip_name(ip));
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
+
+ /*
+ * Lima IRQ handler may take a long time to process an interrupt
+ * if there is another IRQ handler hogging the processing.
+ * In order to catch such cases and not report spurious Lima job
+ * timeouts, synchronize the IRQ handler and re-check the fence
+ * status.
+ */
+ for (i = 0; i < pipe->num_processor; i++)
+ synchronize_irq(pipe->processor[i]->irq);
+
+ if (dma_fence_is_signaled(task->fence)) {
+ DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
if (!pipe->error)
- DRM_ERROR("lima job timeout\n");
+ DRM_ERROR("%s job timeout\n", lima_ip_name(ip));
drm_sched_stop(&pipe->base, &task->base);
@@ -417,8 +443,6 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
if (pipe->bcast_mmu)
lima_mmu_page_fault_resume(pipe->bcast_mmu);
else {
- int i;
-
for (i = 0; i < pipe->num_mmu; i++)
lima_mmu_page_fault_resume(pipe->mmu[i]);
}
@@ -481,7 +505,7 @@ static void lima_sched_recover_work(struct work_struct *work)
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
- lima_sched_timeout_ms : 500;
+ lima_sched_timeout_ms : 10000;
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 6a11764d87b3..6bd4f3b70109 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -91,8 +91,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
void lima_sched_task_fini(struct lima_sched_task *task);
int lima_sched_context_init(struct lima_sched_pipe *pipe,
- struct lima_sched_context *context,
- atomic_t *guilty);
+ struct lima_sched_context *context);
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context);
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
index 89ccc0c43169..d8ff60b46abe 100644
--- a/drivers/gpu/drm/loongson/lsdc_drv.c
+++ b/drivers/gpu/drm/loongson/lsdc_drv.c
@@ -184,7 +184,7 @@ static int lsdc_get_dedicated_vram(struct lsdc_device *ldev,
drm_info(ddev, "Dedicated vram start: 0x%llx, size: %uMiB\n",
(u64)base, (u32)(size >> 20));
- return 0;
+ return (size > SZ_1M) ? 0 : -ENODEV;
}
static struct lsdc_device *
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c
index bf79dc55afa4..465f622ac05d 100644
--- a/drivers/gpu/drm/loongson/lsdc_ttm.c
+++ b/drivers/gpu/drm/loongson/lsdc_ttm.c
@@ -54,7 +54,6 @@ static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
pflags |= TTM_PL_FLAG_TOPDOWN;
lbo->placement.placement = lbo->placements;
- lbo->placement.busy_placement = lbo->placements;
if (domain & LSDC_GEM_DOMAIN_VRAM) {
lbo->placements[c].mem_type = TTM_PL_VRAM;
@@ -77,7 +76,6 @@ static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
}
lbo->placement.num_placement = c;
- lbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
lbo->placements[i].fpfn = 0;
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
index 4f3d68e11bc1..907460b69d4f 100644
--- a/drivers/gpu/drm/mcde/Kconfig
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -11,7 +11,6 @@ config DRM_MCDE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the ST-Ericsson MCDE
Multi-Channel Display Engine.
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 2136a596efa1..0ba72102636a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2042,12 +2042,12 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
return ret;
}
-static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *mtk_dp_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
bool enabled = mtk_dp->enabled;
- struct edid *new_edid = NULL;
+ const struct drm_edid *drm_edid;
struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
if (!enabled) {
@@ -2055,7 +2055,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
mtk_dp_aux_panel_poweron(mtk_dp, true);
}
- new_edid = drm_get_edid(connector, &mtk_dp->aux.ddc);
+ drm_edid = drm_edid_read_ddc(connector, &mtk_dp->aux.ddc);
/*
* Parse capability here to let atomic_get_input_bus_fmts and
@@ -2063,17 +2063,26 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
*/
if (mtk_dp_parse_capabilities(mtk_dp)) {
drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
- kfree(new_edid);
- new_edid = NULL;
+ drm_edid_free(drm_edid);
+ drm_edid = NULL;
}
- if (new_edid) {
+ if (drm_edid) {
+ /*
+ * FIXME: get rid of drm_edid_raw()
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
struct cea_sad *sads;
- audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
+ audio_caps->sad_count = drm_edid_to_sad(edid, &sads);
kfree(sads);
- audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
+ /*
+ * FIXME: This should use connector->display_info.has_audio from
+ * a path that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ audio_caps->detect_monitor = drm_detect_monitor_audio(edid);
}
if (!enabled) {
@@ -2081,7 +2090,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
}
- return new_edid;
+ return drm_edid;
}
static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
@@ -2433,7 +2442,7 @@ static const struct drm_bridge_funcs mtk_dp_bridge_funcs = {
.atomic_enable = mtk_dp_bridge_atomic_enable,
.atomic_disable = mtk_dp_bridge_atomic_disable,
.mode_valid = mtk_dp_bridge_mode_valid,
- .get_edid = mtk_dp_get_edid,
+ .edid_read = mtk_dp_edid_read,
.detect = mtk_dp_bdg_detect,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 86133bf16326..c6bdc565e4a9 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1265,19 +1265,27 @@ static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridg
return mtk_hdmi_detect(hdmi);
}
-static struct edid *mtk_hdmi_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
if (!hdmi->ddc_adpt)
return NULL;
- edid = drm_get_edid(connector, hdmi->ddc_adpt);
- if (!edid)
- return NULL;
- hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
- return edid;
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc_adpt);
+ if (drm_edid) {
+ /*
+ * FIXME: This should use !connector->display_info.has_audio (or
+ * !connector->display_info.is_hdmi) from a path that has read
+ * the EDID and called drm_edid_connector_update().
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
+
+ hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
+ }
+
+ return drm_edid;
}
static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
@@ -1417,7 +1425,7 @@ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
.atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable,
.atomic_enable = mtk_hdmi_bridge_atomic_enable,
.detect = mtk_hdmi_bridge_detect,
- .get_edid = mtk_hdmi_bridge_get_edid,
+ .edid_read = mtk_hdmi_bridge_edid_read,
};
static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index cb674966e9ac..17a5cca007e2 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -312,7 +312,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
/* Encoder Initialization */
- ret = meson_encoder_cvbs_init(priv);
+ ret = meson_encoder_cvbs_probe(priv);
if (ret)
goto exit_afbcd;
@@ -326,12 +326,12 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
}
}
- ret = meson_encoder_hdmi_init(priv);
+ ret = meson_encoder_hdmi_probe(priv);
if (ret)
goto exit_afbcd;
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- ret = meson_encoder_dsi_init(priv);
+ ret = meson_encoder_dsi_probe(priv);
if (ret)
goto exit_afbcd;
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 3407450435e2..d1191de855d9 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -219,7 +219,7 @@ static const struct drm_bridge_funcs meson_encoder_cvbs_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
};
-int meson_encoder_cvbs_init(struct meson_drm *priv)
+int meson_encoder_cvbs_probe(struct meson_drm *priv)
{
struct drm_device *drm = priv->drm;
struct meson_encoder_cvbs *meson_encoder_cvbs;
@@ -240,10 +240,9 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote);
of_node_put(remote);
- if (!meson_encoder_cvbs->next_bridge) {
- dev_err(priv->dev, "Failed to find CVBS Connector bridge\n");
- return -EPROBE_DEFER;
- }
+ if (!meson_encoder_cvbs->next_bridge)
+ return dev_err_probe(priv->dev, -EPROBE_DEFER,
+ "Failed to find CVBS Connector bridge\n");
/* CVBS Encoder Bridge */
meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
@@ -259,10 +258,9 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
/* Encoder */
ret = drm_simple_encoder_init(priv->drm, &meson_encoder_cvbs->encoder,
DRM_MODE_ENCODER_TVDAC);
- if (ret) {
- dev_err(priv->dev, "Failed to init CVBS encoder: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to init CVBS encoder\n");
meson_encoder_cvbs->encoder.possible_crtcs = BIT(0);
@@ -276,10 +274,10 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
/* Initialize & attach Bridge Connector */
connector = drm_bridge_connector_init(priv->drm, &meson_encoder_cvbs->encoder);
- if (IS_ERR(connector)) {
- dev_err(priv->dev, "Unable to create CVBS bridge connector\n");
- return PTR_ERR(connector);
- }
+ if (IS_ERR(connector))
+ return dev_err_probe(priv->dev, PTR_ERR(connector),
+ "Unable to create CVBS bridge connector\n");
+
drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder);
priv->encoders[MESON_ENC_CVBS] = meson_encoder_cvbs;
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
index 09710fec3c66..7b7bc85c03f7 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.h
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
@@ -24,7 +24,7 @@ struct meson_cvbs_mode {
/* Modes supported by the CVBS output */
extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
-int meson_encoder_cvbs_init(struct meson_drm *priv);
+int meson_encoder_cvbs_probe(struct meson_drm *priv);
void meson_encoder_cvbs_remove(struct meson_drm *priv);
#endif /* __MESON_VENC_CVBS_H */
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 311b91630fbe..7816902f5907 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -100,7 +100,7 @@ static const struct drm_bridge_funcs meson_encoder_dsi_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
};
-int meson_encoder_dsi_init(struct meson_drm *priv)
+int meson_encoder_dsi_probe(struct meson_drm *priv)
{
struct meson_encoder_dsi *meson_encoder_dsi;
struct device_node *remote;
@@ -118,10 +118,9 @@ int meson_encoder_dsi_init(struct meson_drm *priv)
}
meson_encoder_dsi->next_bridge = of_drm_find_bridge(remote);
- if (!meson_encoder_dsi->next_bridge) {
- dev_dbg(priv->dev, "Failed to find DSI transceiver bridge\n");
- return -EPROBE_DEFER;
- }
+ if (!meson_encoder_dsi->next_bridge)
+ return dev_err_probe(priv->dev, -EPROBE_DEFER,
+ "Failed to find DSI transceiver bridge\n");
/* DSI Encoder Bridge */
meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs;
@@ -135,19 +134,17 @@ int meson_encoder_dsi_init(struct meson_drm *priv)
/* Encoder */
ret = drm_simple_encoder_init(priv->drm, &meson_encoder_dsi->encoder,
DRM_MODE_ENCODER_DSI);
- if (ret) {
- dev_err(priv->dev, "Failed to init DSI encoder: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to init DSI encoder\n");
meson_encoder_dsi->encoder.possible_crtcs = BIT(0);
/* Attach DSI Encoder Bridge to Encoder */
ret = drm_bridge_attach(&meson_encoder_dsi->encoder, &meson_encoder_dsi->bridge, NULL, 0);
- if (ret) {
- dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to attach bridge\n");
/*
* We should have now in place:
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.h b/drivers/gpu/drm/meson/meson_encoder_dsi.h
index 9277d7015193..85d5b61805f2 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.h
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.h
@@ -7,7 +7,7 @@
#ifndef __MESON_ENCODER_DSI_H
#define __MESON_ENCODER_DSI_H
-int meson_encoder_dsi_init(struct meson_drm *priv);
+int meson_encoder_dsi_probe(struct meson_drm *priv);
void meson_encoder_dsi_remove(struct meson_drm *priv);
#endif /* __MESON_ENCODER_DSI_H */
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index c4686568c9ca..0593a1cde906 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -323,19 +323,31 @@ static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
- struct edid *edid;
if (!encoder_hdmi->cec_notifier)
return;
if (status == connector_status_connected) {
- edid = drm_bridge_get_edid(encoder_hdmi->next_bridge, encoder_hdmi->connector);
- if (!edid)
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
+
+ drm_edid = drm_bridge_edid_read(encoder_hdmi->next_bridge,
+ encoder_hdmi->connector);
+ if (!drm_edid)
return;
+ /*
+ * FIXME: The CEC physical address should be set using
+ * cec_notifier_set_phys_addr(encoder_hdmi->cec_notifier,
+ * connector->display_info.source_physical_address) from a path
+ * that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ edid = drm_edid_raw(drm_edid);
+
cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
- kfree(edid);
+ drm_edid_free(drm_edid);
} else
cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
}
@@ -354,7 +366,7 @@ static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
};
-int meson_encoder_hdmi_init(struct meson_drm *priv)
+int meson_encoder_hdmi_probe(struct meson_drm *priv)
{
struct meson_encoder_hdmi *meson_encoder_hdmi;
struct platform_device *pdev;
@@ -374,8 +386,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote);
if (!meson_encoder_hdmi->next_bridge) {
- dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n");
- ret = -EPROBE_DEFER;
+ ret = dev_err_probe(priv->dev, -EPROBE_DEFER,
+ "Failed to find HDMI transceiver bridge\n");
goto err_put_node;
}
@@ -393,7 +405,7 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
- dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret);
+ dev_err_probe(priv->dev, ret, "Failed to init HDMI encoder\n");
goto err_put_node;
}
@@ -403,7 +415,7 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
- dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
+ dev_err_probe(priv->dev, ret, "Failed to attach bridge\n");
goto err_put_node;
}
@@ -411,8 +423,9 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
meson_encoder_hdmi->connector = drm_bridge_connector_init(priv->drm,
&meson_encoder_hdmi->encoder);
if (IS_ERR(meson_encoder_hdmi->connector)) {
- dev_err(priv->dev, "Unable to create HDMI bridge connector\n");
- ret = PTR_ERR(meson_encoder_hdmi->connector);
+ ret = dev_err_probe(priv->dev,
+ PTR_ERR(meson_encoder_hdmi->connector),
+ "Unable to create HDMI bridge connector\n");
goto err_put_node;
}
drm_connector_attach_encoder(meson_encoder_hdmi->connector,
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
index a6cd38eb5f71..fd5485875db8 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
@@ -7,7 +7,7 @@
#ifndef __MESON_ENCODER_HDMI_H
#define __MESON_ENCODER_HDMI_H
-int meson_encoder_hdmi_init(struct meson_drm *priv);
+int meson_encoder_hdmi_probe(struct meson_drm *priv);
void meson_encoder_hdmi_remove(struct meson_drm *priv);
#endif /* __MESON_ENCODER_HDMI_H */
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 2fb18b782b05..54fce00e2136 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -146,14 +146,13 @@ int mgag200_device_preinit(struct mga_device *mdev)
}
mdev->vram_res = res;
- /* Don't fail on errors, but performance might be reduced. */
- devm_arch_io_reserve_memtype_wc(dev->dev, res->start, resource_size(res));
- devm_arch_phys_wc_add(dev->dev, res->start, resource_size(res));
-
- mdev->vram = devm_ioremap(dev->dev, res->start, resource_size(res));
+ mdev->vram = devm_ioremap_wc(dev->dev, res->start, resource_size(res));
if (!mdev->vram)
return -ENOMEM;
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_phys_wc_add(dev->dev, res->start, resource_size(res));
+
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 0f0d59938c3a..0eb769dd76ce 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -14,13 +14,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
@@ -717,17 +717,23 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector)
{
struct mga_device *mdev = to_mga_device(connector->dev);
- int ret;
+ const struct drm_edid *drm_edid;
+ int count;
/*
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
mutex_lock(&mdev->rmmio_lock);
- ret = drm_connector_helper_get_modes_from_ddc(connector);
+
+ drm_edid = drm_edid_read(connector);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
+
mutex_unlock(&mdev->rmmio_lock);
- return ret;
+ return count;
}
/*
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index d37d599aec27..c8e1bbebdffe 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -11,6 +11,7 @@
#include <linux/of_irq.h>
#include <linux/delay.h>
#include <drm/display/drm_dp_aux_bus.h>
+#include <drm/drm_edid.h>
#include "msm_drv.h"
#include "msm_kms.h"
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index f5e01471b0b0..4a5b5112227f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -236,24 +236,33 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
msm_hdmi_audio_update(hdmi);
}
-static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
uint32_t hdmi_ctrl;
hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
- edid = drm_get_edid(connector, hdmi->i2c);
+ drm_edid = drm_edid_read_ddc(connector, hdmi->i2c);
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+ if (drm_edid) {
+ /*
+ * FIXME: This should use connector->display_info.is_hdmi from a
+ * path that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
- return edid;
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+ }
+
+ return drm_edid;
}
static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
@@ -290,12 +299,12 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge
}
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
- .pre_enable = msm_hdmi_bridge_pre_enable,
- .post_disable = msm_hdmi_bridge_post_disable,
- .mode_set = msm_hdmi_bridge_mode_set,
- .mode_valid = msm_hdmi_bridge_mode_valid,
- .get_edid = msm_hdmi_bridge_get_edid,
- .detect = msm_hdmi_bridge_detect,
+ .pre_enable = msm_hdmi_bridge_pre_enable,
+ .post_disable = msm_hdmi_bridge_post_disable,
+ .mode_set = msm_hdmi_bridge_mode_set,
+ .mode_valid = msm_hdmi_bridge_mode_valid,
+ .edid_read = msm_hdmi_bridge_edid_read,
+ .detect = msm_hdmi_bridge_detect,
};
static void
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index a34917b048f9..4310ad71870b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -449,7 +449,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
}
-/**
+/*
* Sets up registers for the given mode/adjusted_mode pair.
*
* The clocks, CRTCs and outputs attached to this CRTC must be off.
@@ -625,7 +625,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
return ret;
}
-/**
+/*
* Sets up registers for the given mode/adjusted_mode pair.
*
* The clocks, CRTCs and outputs attached to this CRTC must be off.
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 5f490fbf1877..83355dbc15ee 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -32,6 +32,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 00cc7d1abaa3..56dcd25db1ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -405,27 +405,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
-{
- *n = 0;
-
- if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
- pl[*n].mem_type = TTM_PL_VRAM;
- pl[*n].flags = 0;
- (*n)++;
- }
- if (domain & NOUVEAU_GEM_DOMAIN_GART) {
- pl[*n].mem_type = TTM_PL_TT;
- pl[*n].flags = 0;
- (*n)++;
- }
- if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
- pl[*n].mem_type = TTM_PL_SYSTEM;
- pl[(*n)++].flags = 0;
- }
-}
-
-static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
@@ -452,10 +431,6 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
nvbo->placements[i].fpfn = fpfn;
nvbo->placements[i].lpfn = lpfn;
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = fpfn;
- nvbo->busy_placements[i].lpfn = lpfn;
- }
}
}
@@ -463,15 +438,32 @@ void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
uint32_t busy)
{
- struct ttm_placement *pl = &nvbo->placement;
+ unsigned int *n = &nvbo->placement.num_placement;
+ struct ttm_place *pl = nvbo->placements;
- pl->placement = nvbo->placements;
- set_placement_list(nvbo->placements, &pl->num_placement, domain);
+ domain |= busy;
- pl->busy_placement = nvbo->busy_placements;
- set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
- domain | busy);
+ *n = 0;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
+ pl[*n].mem_type = TTM_PL_VRAM;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_VRAM ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_GART) {
+ pl[*n].mem_type = TTM_PL_TT;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_GART ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
+ pl[*n].mem_type = TTM_PL_SYSTEM;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_CPU ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ nvbo->placement.placement = nvbo->placements;
set_placement_range(nvbo, domain);
}
@@ -1314,11 +1306,6 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placements[i].lpfn = mappable;
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = 0;
- nvbo->busy_placements[i].lpfn = mappable;
- }
-
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 70c551921a9e..e9dfab6a8156 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -15,7 +15,6 @@ struct nouveau_bo {
struct ttm_placement placement;
u32 valid_domains;
struct ttm_place placements[3];
- struct ttm_place busy_placements[3];
bool force_coherent;
struct ttm_bo_kmap_obj kmap;
struct list_head head;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index a2df4918340c..0608cabed058 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -35,7 +35,6 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_util.h>
@@ -44,6 +43,7 @@
struct nvkm_i2c_port;
struct dcb_output;
+struct edid;
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
struct nouveau_backlight {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index adf01ca9e035..2af3615c5205 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file mga_ioc32.c
*
* 32-bit ioctl compatibility routines for the MGA DRM.
@@ -38,7 +38,7 @@
#include "nouveau_ioctl.h"
-/**
+/*
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 5e4565c5011a..b4da82ddbb6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -112,7 +112,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_svm_bind *args = data;
- unsigned target, cmd, priority;
+ unsigned target, cmd;
unsigned long addr, end;
struct mm_struct *mm;
@@ -136,9 +136,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
}
- priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
- priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
-
/* FIXME support CPU target ie all target value < GPU_VRAM */
target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
target &= NOUVEAU_SVM_BIND_TARGET_MASK;
@@ -926,15 +923,14 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
unsigned long addr, u64 *pfns, unsigned long npages)
{
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
- int ret;
args->p.addr = addr;
args->p.size = npages << PAGE_SHIFT;
mutex_lock(&svmm->mutex);
- ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args,
- struct_size(args, p.phys, npages), NULL);
+ nvif_object_ioctl(&svmm->vmm->vmm.object, args,
+ struct_size(args, p.phys, npages), NULL);
mutex_unlock(&svmm->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
index 5d3190c05250..6daeb7f0b09b 100644
--- a/drivers/gpu/drm/nouveau/nvif/outp.c
+++ b/drivers/gpu/drm/nouveau/nvif/outp.c
@@ -452,13 +452,12 @@ nvif_outp_edid_get(struct nvif_outp *outp, u8 **pedid)
if (ret)
goto done;
- *pedid = kmalloc(args->size, GFP_KERNEL);
+ *pedid = kmemdup(args->data, args->size, GFP_KERNEL);
if (!*pedid) {
ret = -ENOMEM;
goto done;
}
- memcpy(*pedid, args->data, args->size);
ret = args->size;
done:
kfree(args);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c494a1ff2d57..986e8d547c94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1040,7 +1040,7 @@ gf100_gr_zbc_init(struct gf100_gr *gr)
}
}
-/**
+/*
* Wait until GR goes idle. GR is considered idle if it is disabled by the
* MC (0x200) register, or GR is not busy and a context switch is not in
* progress.
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
index f36a359d4531..bd104a030243 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
@@ -218,7 +218,7 @@ nvkm_acr_lsfw_load_sig_image_desc_v2(struct nvkm_subdev *subdev,
const struct firmware *hsbl;
const struct nvfw_ls_hsbl_bin_hdr *hdr;
const struct nvfw_ls_hsbl_hdr *hshdr;
- u32 loc, sig, cnt, *meta;
+ u32 sig, cnt, *meta;
ret = nvkm_firmware_load_name(subdev, path, "hs_bl_sig", ver, &hsbl);
if (ret)
@@ -227,7 +227,6 @@ nvkm_acr_lsfw_load_sig_image_desc_v2(struct nvkm_subdev *subdev,
hdr = nvfw_ls_hsbl_bin_hdr(subdev, hsbl->data);
hshdr = nvfw_ls_hsbl_hdr(subdev, hsbl->data + hdr->header_offset);
meta = (u32 *)(hsbl->data + hshdr->meta_data_offset);
- loc = *(u32 *)(hsbl->data + hshdr->patch_loc);
sig = *(u32 *)(hsbl->data + hshdr->patch_sig);
cnt = *(u32 *)(hsbl->data + hshdr->num_sig);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index 142079403864..b54f044c4483 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -575,7 +575,7 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
* init opcode handlers
*****************************************************************************/
-/**
+/*
* init_reserved - stub for various unknown/unused single-byte opcodes
*
*/
@@ -602,7 +602,7 @@ init_reserved(struct nvbios_init *init)
init->offset += length;
}
-/**
+/*
* INIT_DONE - opcode 0x71
*
*/
@@ -613,7 +613,7 @@ init_done(struct nvbios_init *init)
init->offset = 0x0000;
}
-/**
+/*
* INIT_IO_RESTRICT_PROG - opcode 0x32
*
*/
@@ -650,7 +650,7 @@ init_io_restrict_prog(struct nvbios_init *init)
trace("}]\n");
}
-/**
+/*
* INIT_REPEAT - opcode 0x33
*
*/
@@ -676,7 +676,7 @@ init_repeat(struct nvbios_init *init)
init->repeat = repeat;
}
-/**
+/*
* INIT_IO_RESTRICT_PLL - opcode 0x34
*
*/
@@ -716,7 +716,7 @@ init_io_restrict_pll(struct nvbios_init *init)
trace("}]\n");
}
-/**
+/*
* INIT_END_REPEAT - opcode 0x36
*
*/
@@ -732,7 +732,7 @@ init_end_repeat(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_COPY - opcode 0x37
*
*/
@@ -759,7 +759,7 @@ init_copy(struct nvbios_init *init)
init_wrvgai(init, port, index, data);
}
-/**
+/*
* INIT_NOT - opcode 0x38
*
*/
@@ -771,7 +771,7 @@ init_not(struct nvbios_init *init)
init_exec_inv(init);
}
-/**
+/*
* INIT_IO_FLAG_CONDITION - opcode 0x39
*
*/
@@ -788,7 +788,7 @@ init_io_flag_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_GENERIC_CONDITION - opcode 0x3a
*
*/
@@ -840,7 +840,7 @@ init_generic_condition(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_IO_MASK_OR - opcode 0x3b
*
*/
@@ -859,7 +859,7 @@ init_io_mask_or(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, index, data &= ~(1 << or));
}
-/**
+/*
* INIT_IO_OR - opcode 0x3c
*
*/
@@ -878,7 +878,7 @@ init_io_or(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, index, data | (1 << or));
}
-/**
+/*
* INIT_ANDN_REG - opcode 0x47
*
*/
@@ -895,7 +895,7 @@ init_andn_reg(struct nvbios_init *init)
init_mask(init, reg, mask, 0);
}
-/**
+/*
* INIT_OR_REG - opcode 0x48
*
*/
@@ -912,7 +912,7 @@ init_or_reg(struct nvbios_init *init)
init_mask(init, reg, 0, mask);
}
-/**
+/*
* INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
*
*/
@@ -942,7 +942,7 @@ init_idx_addr_latched(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_IO_RESTRICT_PLL2 - opcode 0x4a
*
*/
@@ -977,7 +977,7 @@ init_io_restrict_pll2(struct nvbios_init *init)
trace("}]\n");
}
-/**
+/*
* INIT_PLL2 - opcode 0x4b
*
*/
@@ -994,7 +994,7 @@ init_pll2(struct nvbios_init *init)
init_prog_pll(init, reg, freq);
}
-/**
+/*
* INIT_I2C_BYTE - opcode 0x4c
*
*/
@@ -1025,7 +1025,7 @@ init_i2c_byte(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_ZM_I2C_BYTE - opcode 0x4d
*
*/
@@ -1051,7 +1051,7 @@ init_zm_i2c_byte(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_ZM_I2C - opcode 0x4e
*
*/
@@ -1085,7 +1085,7 @@ init_zm_i2c(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_TMDS - opcode 0x4f
*
*/
@@ -1111,7 +1111,7 @@ init_tmds(struct nvbios_init *init)
init_wr32(init, reg + 0, addr);
}
-/**
+/*
* INIT_ZM_TMDS_GROUP - opcode 0x50
*
*/
@@ -1138,7 +1138,7 @@ init_zm_tmds_group(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_CR_INDEX_ADDRESS_LATCHED - opcode 0x51
*
*/
@@ -1168,7 +1168,7 @@ init_cr_idx_adr_latch(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, addr0, save0);
}
-/**
+/*
* INIT_CR - opcode 0x52
*
*/
@@ -1188,7 +1188,7 @@ init_cr(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, addr, val | data);
}
-/**
+/*
* INIT_ZM_CR - opcode 0x53
*
*/
@@ -1205,7 +1205,7 @@ init_zm_cr(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, addr, data);
}
-/**
+/*
* INIT_ZM_CR_GROUP - opcode 0x54
*
*/
@@ -1229,7 +1229,7 @@ init_zm_cr_group(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_CONDITION_TIME - opcode 0x56
*
*/
@@ -1256,7 +1256,7 @@ init_condition_time(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_LTIME - opcode 0x57
*
*/
@@ -1273,7 +1273,7 @@ init_ltime(struct nvbios_init *init)
mdelay(msec);
}
-/**
+/*
* INIT_ZM_REG_SEQUENCE - opcode 0x58
*
*/
@@ -1298,7 +1298,7 @@ init_zm_reg_sequence(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_PLL_INDIRECT - opcode 0x59
*
*/
@@ -1317,7 +1317,7 @@ init_pll_indirect(struct nvbios_init *init)
init_prog_pll(init, reg, freq);
}
-/**
+/*
* INIT_ZM_REG_INDIRECT - opcode 0x5a
*
*/
@@ -1336,7 +1336,7 @@ init_zm_reg_indirect(struct nvbios_init *init)
init_wr32(init, addr, data);
}
-/**
+/*
* INIT_SUB_DIRECT - opcode 0x5b
*
*/
@@ -1362,7 +1362,7 @@ init_sub_direct(struct nvbios_init *init)
init->offset += 3;
}
-/**
+/*
* INIT_JUMP - opcode 0x5c
*
*/
@@ -1380,7 +1380,7 @@ init_jump(struct nvbios_init *init)
init->offset += 3;
}
-/**
+/*
* INIT_I2C_IF - opcode 0x5e
*
*/
@@ -1407,7 +1407,7 @@ init_i2c_if(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_COPY_NV_REG - opcode 0x5f
*
*/
@@ -1433,7 +1433,7 @@ init_copy_nv_reg(struct nvbios_init *init)
init_mask(init, dreg, ~dmask, (data & smask) ^ sxor);
}
-/**
+/*
* INIT_ZM_INDEX_IO - opcode 0x62
*
*/
@@ -1451,7 +1451,7 @@ init_zm_index_io(struct nvbios_init *init)
init_wrvgai(init, port, index, data);
}
-/**
+/*
* INIT_COMPUTE_MEM - opcode 0x63
*
*/
@@ -1469,7 +1469,7 @@ init_compute_mem(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_RESET - opcode 0x65
*
*/
@@ -1496,7 +1496,7 @@ init_reset(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_CONFIGURE_MEM - opcode 0x66
*
*/
@@ -1555,7 +1555,7 @@ init_configure_mem(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_CONFIGURE_CLK - opcode 0x67
*
*/
@@ -1589,7 +1589,7 @@ init_configure_clk(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_CONFIGURE_PREINIT - opcode 0x68
*
*/
@@ -1615,7 +1615,7 @@ init_configure_preinit(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_IO - opcode 0x69
*
*/
@@ -1655,7 +1655,7 @@ init_io(struct nvbios_init *init)
init_wrport(init, port, data | value);
}
-/**
+/*
* INIT_SUB - opcode 0x6b
*
*/
@@ -1682,7 +1682,7 @@ init_sub(struct nvbios_init *init)
init->offset += 2;
}
-/**
+/*
* INIT_RAM_CONDITION - opcode 0x6d
*
*/
@@ -1701,7 +1701,7 @@ init_ram_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_NV_REG - opcode 0x6e
*
*/
@@ -1719,7 +1719,7 @@ init_nv_reg(struct nvbios_init *init)
init_mask(init, reg, ~mask, data);
}
-/**
+/*
* INIT_MACRO - opcode 0x6f
*
*/
@@ -1743,7 +1743,7 @@ init_macro(struct nvbios_init *init)
init->offset += 2;
}
-/**
+/*
* INIT_RESUME - opcode 0x72
*
*/
@@ -1755,7 +1755,7 @@ init_resume(struct nvbios_init *init)
init_exec_set(init, true);
}
-/**
+/*
* INIT_STRAP_CONDITION - opcode 0x73
*
*/
@@ -1773,7 +1773,7 @@ init_strap_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_TIME - opcode 0x74
*
*/
@@ -1794,7 +1794,7 @@ init_time(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_CONDITION - opcode 0x75
*
*/
@@ -1811,7 +1811,7 @@ init_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_IO_CONDITION - opcode 0x76
*
*/
@@ -1828,7 +1828,7 @@ init_io_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_ZM_REG16 - opcode 0x77
*
*/
@@ -1845,7 +1845,7 @@ init_zm_reg16(struct nvbios_init *init)
init_wr32(init, addr, data);
}
-/**
+/*
* INIT_INDEX_IO - opcode 0x78
*
*/
@@ -1867,7 +1867,7 @@ init_index_io(struct nvbios_init *init)
init_wrvgai(init, port, index, data | value);
}
-/**
+/*
* INIT_PLL - opcode 0x79
*
*/
@@ -1884,7 +1884,7 @@ init_pll(struct nvbios_init *init)
init_prog_pll(init, reg, freq);
}
-/**
+/*
* INIT_ZM_REG - opcode 0x7a
*
*/
@@ -1904,7 +1904,7 @@ init_zm_reg(struct nvbios_init *init)
init_wr32(init, addr, data);
}
-/**
+/*
* INIT_RAM_RESTRICT_PLL - opcde 0x87
*
*/
@@ -1934,7 +1934,7 @@ init_ram_restrict_pll(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_RESET_BEGUN - opcode 0x8c
*
*/
@@ -1945,7 +1945,7 @@ init_reset_begun(struct nvbios_init *init)
init->offset += 1;
}
-/**
+/*
* INIT_RESET_END - opcode 0x8d
*
*/
@@ -1956,7 +1956,7 @@ init_reset_end(struct nvbios_init *init)
init->offset += 1;
}
-/**
+/*
* INIT_GPIO - opcode 0x8e
*
*/
@@ -1972,7 +1972,7 @@ init_gpio(struct nvbios_init *init)
nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
}
-/**
+/*
* INIT_RAM_RESTRICT_ZM_GROUP - opcode 0x8f
*
*/
@@ -2010,7 +2010,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_COPY_ZM_REG - opcode 0x90
*
*/
@@ -2027,7 +2027,7 @@ init_copy_zm_reg(struct nvbios_init *init)
init_wr32(init, dreg, init_rd32(init, sreg));
}
-/**
+/*
* INIT_ZM_REG_GROUP - opcode 0x91
*
*/
@@ -2049,7 +2049,7 @@ init_zm_reg_group(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_XLAT - opcode 0x96
*
*/
@@ -2077,7 +2077,7 @@ init_xlat(struct nvbios_init *init)
init_mask(init, daddr, ~dmask, data);
}
-/**
+/*
* INIT_ZM_MASK_ADD - opcode 0x97
*
*/
@@ -2098,7 +2098,7 @@ init_zm_mask_add(struct nvbios_init *init)
init_wr32(init, addr, data);
}
-/**
+/*
* INIT_AUXCH - opcode 0x98
*
*/
@@ -2122,7 +2122,7 @@ init_auxch(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_AUXCH - opcode 0x99
*
*/
@@ -2144,7 +2144,7 @@ init_zm_auxch(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_I2C_LONG_IF - opcode 0x9a
*
*/
@@ -2183,7 +2183,7 @@ init_i2c_long_if(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_GPIO_NE - opcode 0xa9
*
*/
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index 8c2faa964511..ccac88da8864 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -45,7 +45,7 @@ static const struct cvb_coef gk20a_cvb_coef[] = {
/* 852 */ { 1608418, -21643, -269, 0, 763, -48},
};
-/**
+/*
* cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0)
*/
static inline int
@@ -58,7 +58,7 @@ gk20a_volt_get_cvb_voltage(int speedo, int s_scale, const struct cvb_coef *coef)
return mv;
}
-/**
+/*
* cvb_t_mv =
* ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) +
* ((c3 * speedo / s_scale + c4 + c5 * T / t_scale) * T / t_scale)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index a26b77d99d52..9b8747d83ee8 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -436,11 +436,11 @@ static void hdmi4_bridge_hpd_notify(struct drm_bridge *bridge,
hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
}
-static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *hdmi4_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
- struct edid *edid = NULL;
+ const struct drm_edid *drm_edid = NULL;
unsigned int cec_addr;
bool need_enable;
int r;
@@ -461,13 +461,21 @@ static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
if (r)
goto done;
- edid = drm_do_get_edid(connector, hdmi4_core_ddc_read, &hdmi->core);
+ drm_edid = drm_edid_read_custom(connector, hdmi4_core_ddc_read, &hdmi->core);
done:
hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
- if (edid && edid->extensions) {
+ if (drm_edid) {
+ /*
+ * FIXME: The CEC physical address should be set using
+ * hdmi4_cec_set_phys_addr(&hdmi->core,
+ * connector->display_info.source_physical_address) from a path
+ * that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
unsigned int len = (edid->extensions + 1) * EDID_LENGTH;
cec_addr = cec_get_edid_phys_addr((u8 *)edid, len, NULL);
@@ -480,7 +488,7 @@ done:
if (need_enable)
hdmi4_core_disable(&hdmi->core);
- return edid;
+ return drm_edid;
}
static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
@@ -492,7 +500,7 @@ static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
.atomic_enable = hdmi4_bridge_enable,
.atomic_disable = hdmi4_bridge_disable,
.hpd_notify = hdmi4_bridge_hpd_notify,
- .get_edid = hdmi4_bridge_get_edid,
+ .edid_read = hdmi4_bridge_edid_read,
};
static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index e6611c683857..c7ae2235ae99 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -425,11 +425,11 @@ static void hdmi5_bridge_disable(struct drm_bridge *bridge,
mutex_unlock(&hdmi->lock);
}
-static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *hdmi5_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
bool need_enable;
int idlemode;
int r;
@@ -452,7 +452,7 @@ static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
hdmi5_core_ddc_init(&hdmi->core);
- edid = drm_do_get_edid(connector, hdmi5_core_ddc_read, &hdmi->core);
+ drm_edid = drm_edid_read_custom(connector, hdmi5_core_ddc_read, &hdmi->core);
hdmi5_core_ddc_uninit(&hdmi->core);
@@ -464,7 +464,7 @@ static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
if (need_enable)
hdmi_core_disable(hdmi);
- return (struct edid *)edid;
+ return drm_edid;
}
static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
@@ -475,7 +475,7 @@ static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = hdmi5_bridge_enable,
.atomic_disable = hdmi5_bridge_disable,
- .get_edid = hdmi5_bridge_get_edid,
+ .edid_read = hdmi5_bridge_edid_read,
};
static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 8f3783742208..d037b3b8b999 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -67,61 +67,25 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
-config DRM_PANEL_BOE_TV101WUM_NL6
- tristate "BOE TV101WUM and AUO KD101N80 45NA 1200x1920 panel"
+config DRM_PANEL_BOE_TH101MB31UIG002_28A
+ tristate "Boe TH101MB31UIG002-28A panel"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
help
- Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
- 45NA WUXGA PANEL DSI Video Mode panel
+ Say Y here if you want to enable support for Boe
+ TH101MB31UIG002-28A TFT-LCD modules. The panel has a 800x1280
+ resolution and uses 24 bit RGB per pixel. It provides a MIPI DSI
+ interface to the host and has a built-in LED backlight.
-config DRM_PANEL_DSI_CM
- tristate "Generic DSI command mode panels"
+config DRM_PANEL_BOE_TV101WUM_NL6
+ tristate "BOE TV101WUM and AUO KD101N80 45NA 1200x1920 panel"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
help
- DRM panel driver for DSI command mode panels with support for
- embedded and external backlights.
-
-config DRM_PANEL_LVDS
- tristate "Generic LVDS panel driver"
- depends on OF
- depends on BACKLIGHT_CLASS_DEVICE
- select VIDEOMODE_HELPERS
- help
- This driver supports LVDS panels that don't require device-specific
- handling of power supplies or control signals. It implements automatic
- backlight handling if the panel is attached to a backlight controller.
-
-config DRM_PANEL_SIMPLE
- tristate "support for simple panels (other than eDP ones)"
- depends on OF
- depends on BACKLIGHT_CLASS_DEVICE
- depends on PM
- select VIDEOMODE_HELPERS
- help
- DRM panel driver for dumb non-eDP panels that need at most a regulator
- and a GPIO to be powered up. Optionally a backlight can be attached so
- that it can be automatically turned off when the panel goes into a
- low power state.
-
-config DRM_PANEL_EDP
- tristate "support for simple Embedded DisplayPort panels"
- depends on OF
- depends on BACKLIGHT_CLASS_DEVICE
- depends on PM
- select VIDEOMODE_HELPERS
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
- select DRM_KMS_HELPER
- help
- DRM panel driver for dumb eDP panels that need at most a regulator and
- a GPIO to be powered up. Optionally a backlight can be attached so
- that it can be automatically turned off when the panel goes into a
- low power state.
+ Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
+ 45NA WUXGA PANEL DSI Video Mode panel
config DRM_PANEL_EBBG_FT8719
tristate "EBBG FT8719 panel driver"
@@ -162,6 +126,35 @@ config DRM_PANEL_FEIYANG_FY07024DI26A30D
Say Y if you want to enable support for panels based on the
Feiyang FY07024DI26A30-D MIPI-DSI interface.
+config DRM_PANEL_DSI_CM
+ tristate "Generic DSI command mode panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ DRM panel driver for DSI command mode panels with support for
+ embedded and external backlights.
+
+config DRM_PANEL_LVDS
+ tristate "Generic LVDS panel driver"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ This driver supports LVDS panels that don't require device-specific
+ handling of power supplies or control signals. It implements automatic
+ backlight handling if the panel is attached to a backlight controller.
+
+config DRM_PANEL_HIMAX_HX83112A
+ tristate "Himax HX83112A-based DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_KMS_HELPER
+ help
+ Say Y here if you want to enable support for Himax HX83112A-based
+ display panels, such as the one found in the Fairphone 4 smartphone.
+
config DRM_PANEL_HIMAX_HX8394
tristate "HIMAX HX8394 MIPI-DSI LCD panels"
depends on OF
@@ -251,17 +244,6 @@ config DRM_PANEL_JADARD_JD9365DA_H3
WXGA MIPI DSI panel. The panel support TFT dot matrix LCD with
800RGBx1280 dots at maximum.
-config DRM_PANEL_JDI_LT070ME05000
- tristate "JDI LT070ME05000 WUXGA DSI panel"
- depends on OF
- depends on DRM_MIPI_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Say Y here if you want to enable support for JDI DSI video mode
- panel as found in Google Nexus 7 (2013) devices.
- The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
- 24 bit per pixel.
-
config DRM_PANEL_JDI_LPM102A188A
tristate "JDI LPM102A188A DSI panel"
depends on OF && GPIOLIB
@@ -273,6 +255,17 @@ config DRM_PANEL_JDI_LPM102A188A
The panel has a 2560×1800 resolution. It provides a MIPI DSI interface
to the host.
+config DRM_PANEL_JDI_LT070ME05000
+ tristate "JDI LT070ME05000 WUXGA DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for JDI DSI video mode
+ panel as found in Google Nexus 7 (2013) devices.
+ The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
+ 24 bit per pixel.
+
config DRM_PANEL_JDI_R63452
tristate "JDI R63452 Full HD DSI panel"
depends on OF
@@ -326,12 +319,6 @@ config DRM_PANEL_LEADTEK_LTK500HD1829
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
-config DRM_PANEL_SAMSUNG_LD9040
- tristate "Samsung LD9040 RGB/SPI panel"
- depends on OF && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select VIDEOMODE_HELPERS
-
config DRM_PANEL_LG_LB035Q02
tristate "LG LB035Q024573 RGB panel"
depends on GPIOLIB && OF && SPI
@@ -359,6 +346,17 @@ config DRM_PANEL_MAGNACHIP_D53E6EA8966
with the Magnachip D53E6EA8966 panel IC. This panel receives
video data via DSI but commands via 9-bit SPI using DBI.
+config DRM_PANEL_MANTIX_MLAF057WE51
+ tristate "Mantix MLAF057WE51-X MIPI-DSI LCD panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Mantix
+ MLAF057WE51-X MIPI DSI panel as e.g. used in the Librem 5. It
+ has a resolution of 720x1440 pixels, a built in backlight and touch
+ controller.
+
config DRM_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 RGB panel"
depends on GPIOLIB && OF && SPI
@@ -438,6 +436,16 @@ config DRM_PANEL_NOVATEK_NT36672A
around the Novatek NT36672A display controller, such as some
Tianma panels used in a few Xiaomi Poco F1 mobile phones.
+config DRM_PANEL_NOVATEK_NT36672E
+ tristate "Novatek NT36672E DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Novatek NT36672E DSI Video Mode
+ LCD panel module. The panel has a resolution of 1080x2408 and uses 24 bit
+ RGB per pixel.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -447,17 +455,6 @@ config DRM_PANEL_NOVATEK_NT39016
Say Y here if you want to enable support for the panels built
around the Novatek NT39016 display controller.
-config DRM_PANEL_MANTIX_MLAF057WE51
- tristate "Mantix MLAF057WE51-X MIPI-DSI LCD panel"
- depends on OF
- depends on DRM_MIPI_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Say Y here if you want to enable support for the Mantix
- MLAF057WE51-X MIPI DSI panel as e.g. used in the Librem 5. It
- has a resolution of 720x1440 pixels, a built in backlight and touch
- controller.
-
config DRM_PANEL_OLIMEX_LCD_OLINUXINO
tristate "Olimex LCD-OLinuXino panel"
depends on OF
@@ -554,6 +551,12 @@ config DRM_PANEL_RONBO_RB070D30
Say Y here if you want to enable support for Ronbo Electronics
RB070D30 1024x600 DSI panel.
+config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
+ tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
+ depends on OF
+ select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_ATNA33XC20
tristate "Samsung ATNA33XC20 eDP panel"
depends on OF
@@ -577,6 +580,12 @@ config DRM_PANEL_SAMSUNG_DB7430
DB7430 DPI display controller used in such devices as the
LMS397KF04 480x800 DPI panel.
+config DRM_PANEL_SAMSUNG_LD9040
+ tristate "Samsung LD9040 RGB/SPI panel"
+ depends on OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6D16D0
tristate "Samsung S6D16D0 DSI video mode panel"
depends on OF
@@ -642,12 +651,6 @@ config DRM_PANEL_SAMSUNG_S6E63M0_DSI
Say Y here if you want to be able to access the Samsung
S6E63M0 panel using DSI.
-config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
- tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
- depends on OF
- select DRM_MIPI_DSI
- select VIDEOMODE_HELPERS
-
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
@@ -746,15 +749,6 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
-config DRM_PANEL_SYNAPTICS_R63353
- tristate "Synaptics R63353-based panels"
- depends on OF
- depends on DRM_MIPI_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Say Y if you want to enable support for panels based on the
- Synaptics R63353 controller.
-
config DRM_PANEL_SONY_ACX565AKM
tristate "Sony ACX565AKM panel"
depends on GPIOLIB && OF && SPI
@@ -794,6 +788,43 @@ config DRM_PANEL_STARTEK_KD070FHFID015
with a resolution of 1024 x 600 pixels. It provides a MIPI DSI interface to
the host, a built-in LED backlight and touch controller.
+config DRM_PANEL_EDP
+ tristate "support for simple Embedded DisplayPort panels"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on PM
+ select VIDEOMODE_HELPERS
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_DP_AUX_BUS
+ select DRM_KMS_HELPER
+ help
+ DRM panel driver for dumb eDP panels that need at most a regulator and
+ a GPIO to be powered up. Optionally a backlight can be attached so
+ that it can be automatically turned off when the panel goes into a
+ low power state.
+
+config DRM_PANEL_SIMPLE
+ tristate "support for simple panels (other than eDP ones)"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on PM
+ select VIDEOMODE_HELPERS
+ help
+ DRM panel driver for dumb non-eDP panels that need at most a regulator
+ and a GPIO to be powered up. Optionally a backlight can be attached so
+ that it can be automatically turned off when the panel goes into a
+ low power state.
+
+config DRM_PANEL_SYNAPTICS_R63353
+ tristate "Synaptics R63353-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Synaptics R63353 controller.
+
config DRM_PANEL_TDO_TL070WSH30
tristate "TDO TL070WSH30 DSI panel"
depends on OF
@@ -837,6 +868,17 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+config DRM_PANEL_VISIONOX_R66451
+ tristate "Visionox R66451"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
+ help
+ Say Y here if you want to enable support for Visionox
+ R66451 1080x2340 AMOLED DSI panel.
+
config DRM_PANEL_VISIONOX_RM69299
tristate "Visionox RM69299"
depends on OF
@@ -854,17 +896,6 @@ config DRM_PANEL_VISIONOX_VTDR6130
Say Y here if you want to enable support for Visionox
VTDR6130 1080x2400 AMOLED DSI panel.
-config DRM_PANEL_VISIONOX_R66451
- tristate "Visionox R66451"
- depends on OF
- depends on DRM_MIPI_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- help
- Say Y here if you want to enable support for Visionox
- R66451 1080x2340 AMOLED DSI panel.
-
config DRM_PANEL_WIDECHIPS_WS2401
tristate "Widechips WS2401 DPI panel driver"
depends on SPI && GPIOLIB
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d94a644d0a6c..f156d7fa0bcc 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.
obj-$(CONFIG_DRM_PANEL_AUO_A030JTN01) += panel-auo-a030jtn01.o
obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TH101MB31UIG002_28A) += panel-boe-th101mb31ig002-28a.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
@@ -14,6 +15,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
@@ -41,6 +43,7 @@ obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35560) += panel-novatek-nt35560.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36523) += panel-novatek-nt36523.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672E) += panel-novatek-nt36672e.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index 11b64acbe8a9..e225840b0d67 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -854,26 +854,20 @@ static int panel_add(struct panel_info *pinfo)
pinfo->pp18_gpio = devm_gpiod_get(dev, "pp18", GPIOD_OUT_HIGH);
if (IS_ERR(pinfo->pp18_gpio)) {
- ret = PTR_ERR(pinfo->pp18_gpio);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get pp18 gpio: %d\n", ret);
- return ret;
+ return dev_err_probe(dev, PTR_ERR(pinfo->pp18_gpio),
+ "failed to get pp18 gpio\n");
}
pinfo->pp33_gpio = devm_gpiod_get(dev, "pp33", GPIOD_OUT_HIGH);
if (IS_ERR(pinfo->pp33_gpio)) {
- ret = PTR_ERR(pinfo->pp33_gpio);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get pp33 gpio: %d\n", ret);
- return ret;
+ return dev_err_probe(dev, PTR_ERR(pinfo->pp33_gpio),
+ "failed to get pp33 gpio\n");
}
pinfo->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(pinfo->enable_gpio)) {
- ret = PTR_ERR(pinfo->enable_gpio);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get enable gpio: %d\n", ret);
- return ret;
+ return dev_err_probe(dev, PTR_ERR(pinfo->enable_gpio),
+ "failed to get enable gpio\n");
}
drm_panel_init(&pinfo->base, dev, &panel_funcs,
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
new file mode 100644
index 000000000000..763e9f8342d3
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Alexander Warnecke <awarnecke002@hotmail.com>
+ * Copyright (c) 2023 Manuel Traut <manut@mecka.net>
+ * Copyright (c) 2023 Dang Huynh <danct12@riseup.net>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct boe_th101mb31ig002 {
+ struct drm_panel panel;
+
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *power;
+ struct gpio_desc *enable;
+ struct gpio_desc *reset;
+
+ enum drm_panel_orientation orientation;
+};
+
+static void boe_th101mb31ig002_reset(struct boe_th101mb31ig002 *ctx)
+{
+ gpiod_direction_output(ctx->reset, 0);
+ usleep_range(10, 100);
+ gpiod_direction_output(ctx->reset, 1);
+ usleep_range(10, 100);
+ gpiod_direction_output(ctx->reset, 0);
+ usleep_range(5000, 6000);
+}
+
+static int boe_th101mb31ig002_enable(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ mipi_dsi_dcs_write_seq(dsi, 0xE0, 0xAB, 0xBA);
+ mipi_dsi_dcs_write_seq(dsi, 0xE1, 0xBA, 0xAB);
+ mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x10, 0x01, 0x47, 0xFF);
+ mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x0C, 0x14, 0x04, 0x50, 0x50, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x56, 0x53, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x33, 0x30, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xB6, 0xB0, 0x00, 0x00, 0x10, 0x00, 0x10,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x05, 0x12, 0x29, 0x49, 0x48, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x7C, 0x65, 0x55, 0x49, 0x46, 0x36,
+ 0x3B, 0x24, 0x3D, 0x3C, 0x3D, 0x5C, 0x4C,
+ 0x55, 0x47, 0x46, 0x39, 0x26, 0x06, 0x7C,
+ 0x65, 0x55, 0x49, 0x46, 0x36, 0x3B, 0x24,
+ 0x3D, 0x3C, 0x3D, 0x5C, 0x4C, 0x55, 0x47,
+ 0x46, 0x39, 0x26, 0x06);
+ mipi_dsi_dcs_write_seq(dsi, 0x00, 0xFF, 0x87, 0x12, 0x34, 0x44, 0x44,
+ 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0F,
+ 0x00, 0x00, 0xC1);
+ mipi_dsi_dcs_write_seq(dsi, 0xC1, 0x54, 0x94, 0x02, 0x85, 0x9F, 0x00,
+ 0x7F, 0x00, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xC2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
+ 0x22, 0x20, 0x44, 0xFF, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xC3, 0x86, 0x46, 0x05, 0x05, 0x1C, 0x1C,
+ 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
+ 0x0F, 0x0F, 0x0D, 0x0D, 0x13, 0x13, 0x11,
+ 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xC4, 0x07, 0x07, 0x04, 0x04, 0x1C, 0x1C,
+ 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
+ 0x0E, 0x0E, 0x0C, 0x0C, 0x12, 0x12, 0x10,
+ 0x10, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xC6, 0x2A, 0x2A);
+ mipi_dsi_dcs_write_seq(dsi, 0xC8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
+ mipi_dsi_dcs_write_seq(dsi, 0xCA, 0xCB, 0x43);
+ mipi_dsi_dcs_write_seq(dsi, 0xCD, 0x0E, 0x4B, 0x4B, 0x20, 0x19, 0x6B,
+ 0x06, 0xB3);
+ mipi_dsi_dcs_write_seq(dsi, 0xD2, 0xE3, 0x2B, 0x38, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xD4, 0x00, 0x01, 0x00, 0x0E, 0x04, 0x44,
+ 0x08, 0x10, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xE6, 0x80, 0x01, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF);
+ mipi_dsi_dcs_write_seq(dsi, 0xF0, 0x12, 0x03, 0x20, 0x00, 0xFF);
+ mipi_dsi_dcs_write_seq(dsi, 0xF3, 0x00);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set panel on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int boe_th101mb31ig002_disable(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ dev_err(dev, "Failed to set panel off: %d\n", ret);
+
+ msleep(120);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+
+ return 0;
+}
+
+static int boe_th101mb31ig002_unprepare(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+
+ gpiod_set_value_cansleep(ctx->reset, 1);
+ gpiod_set_value_cansleep(ctx->enable, 0);
+ regulator_disable(ctx->power);
+
+ return 0;
+}
+
+static int boe_th101mb31ig002_prepare(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_enable(ctx->power);
+ if (ret) {
+ dev_err(dev, "Failed to enable power supply: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ctx->enable, 1);
+ msleep(50);
+ boe_th101mb31ig002_reset(ctx);
+ boe_th101mb31ig002_enable(panel);
+
+ return 0;
+}
+
+static const struct drm_display_mode boe_th101mb31ig002_default_mode = {
+ .clock = 73500,
+ .hdisplay = 800,
+ .hsync_start = 800 + 64,
+ .hsync_end = 800 + 64 + 16,
+ .htotal = 800 + 64 + 16 + 64,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 2,
+ .vsync_end = 1280 + 2 + 4,
+ .vtotal = 1280 + 2 + 4 + 12,
+ .width_mm = 135,
+ .height_mm = 216,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int boe_th101mb31ig002_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev,
+ &boe_th101mb31ig002_default_mode);
+ if (!mode) {
+ dev_err(panel->dev, "Failed to add mode %ux%u@%u\n",
+ boe_th101mb31ig002_default_mode.hdisplay,
+ boe_th101mb31ig002_default_mode.vdisplay,
+ drm_mode_vrefresh(&boe_th101mb31ig002_default_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
+ drm_connector_set_panel_orientation(connector, ctx->orientation);
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static enum drm_panel_orientation
+boe_th101mb31ig002_get_orientation(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+
+ return ctx->orientation;
+}
+
+static const struct drm_panel_funcs boe_th101mb31ig002_funcs = {
+ .prepare = boe_th101mb31ig002_prepare,
+ .unprepare = boe_th101mb31ig002_unprepare,
+ .disable = boe_th101mb31ig002_disable,
+ .get_modes = boe_th101mb31ig002_get_modes,
+ .get_orientation = boe_th101mb31ig002_get_orientation,
+};
+
+static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct boe_th101mb31ig002 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_MODE_LPM;
+
+ ctx->power = devm_regulator_get(&dsi->dev, "power");
+ if (IS_ERR(ctx->power))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power),
+ "Failed to get power regulator\n");
+
+ ctx->enable = devm_gpiod_get(&dsi->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->enable))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->enable),
+ "Failed to get enable GPIO\n");
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
+ "Failed to get reset GPIO\n");
+
+ ret = of_drm_get_panel_orientation(dsi->dev.of_node,
+ &ctx->orientation);
+ if (ret)
+ return dev_err_probe(&dsi->dev, ret,
+ "Failed to get orientation\n");
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &boe_th101mb31ig002_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err_probe(&dsi->dev, ret,
+ "Failed to attach panel to DSI host\n");
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void boe_th101mb31ig002_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_th101mb31ig002 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id boe_th101mb31ig002_of_match[] = {
+ { .compatible = "boe,th101mb31ig002-28a", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_th101mb31ig002_of_match);
+
+static struct mipi_dsi_driver boe_th101mb31ig002_driver = {
+ .driver = {
+ .name = "boe-th101mb31ig002-28a",
+ .of_match_table = boe_th101mb31ig002_of_match,
+ },
+ .probe = boe_th101mb31ig002_dsi_probe,
+ .remove = boe_th101mb31ig002_dsi_remove,
+};
+module_mipi_dsi_driver(boe_th101mb31ig002_driver);
+
+MODULE_AUTHOR("Alexander Warnecke <awarnecke002@hotmail.com>");
+MODULE_DESCRIPTION("BOE TH101MB31IG002-28A MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index c4c0f08e9202..bc08814954f9 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1871,6 +1871,8 @@ static int boe_panel_add(struct boe_panel *boe)
gpiod_set_value(boe->enable_gpio, 0);
+ boe->base.prepare_prev_first = true;
+
drm_panel_init(&boe->base, dev, &boe_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index a0b6f69b916f..bd71d239272a 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -71,6 +71,21 @@ struct panel_delay {
unsigned int hpd_absent;
/**
+ * @powered_on_to_enable: Time between panel powered on and enable.
+ *
+ * The minimum time, in milliseconds, that needs to have passed
+ * between when panel powered on and enable may begin.
+ *
+ * This is (T3+T4+T5+T6+T8)-min on eDP timing diagrams or after the
+ * power supply enabled until we can turn the backlight on and see
+ * valid data.
+ *
+ * This doesn't normally need to be set if timings are already met by
+ * prepare_to_enable or enable.
+ */
+ unsigned int powered_on_to_enable;
+
+ /**
* @prepare_to_enable: Time between prepare and enable.
*
* The minimum time, in milliseconds, that needs to have passed
@@ -216,6 +231,7 @@ struct panel_edp {
bool prepared;
ktime_t prepared_time;
+ ktime_t powered_on_time;
ktime_t unprepared_time;
const struct panel_desc *desc;
@@ -413,8 +429,7 @@ static int panel_edp_unprepare(struct drm_panel *panel)
if (!p->prepared)
return 0;
- pm_runtime_mark_last_busy(panel->dev);
- ret = pm_runtime_put_autosuspend(panel->dev);
+ ret = pm_runtime_put_sync_suspend(panel->dev);
if (ret < 0)
return ret;
p->prepared = false;
@@ -455,6 +470,8 @@ static int panel_edp_prepare_once(struct panel_edp *p)
gpiod_set_value_cansleep(p->enable_gpio, 1);
+ p->powered_on_time = ktime_get_boottime();
+
delay = p->desc->delay.hpd_reliable;
if (p->no_hpd)
delay = max(delay, p->desc->delay.hpd_absent);
@@ -579,6 +596,8 @@ static int panel_edp_enable(struct drm_panel *panel)
panel_edp_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
+ panel_edp_wait(p->powered_on_time, p->desc->delay.powered_on_to_enable);
+
p->enabled = true;
return 0;
@@ -983,19 +1002,6 @@ static const struct panel_desc auo_b101ean01 = {
},
};
-static const struct drm_display_mode auo_b116xa3_mode = {
- .clock = 70589,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 40,
- .htotal = 1366 + 40 + 40 + 32,
- .vdisplay = 768,
- .vsync_start = 768 + 10,
- .vsync_end = 768 + 10 + 12,
- .vtotal = 768 + 10 + 12 + 6,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
static const struct drm_display_mode auo_b116xak01_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -1837,6 +1843,13 @@ static const struct panel_delay delay_200_500_p2e80 = {
.prepare_to_enable = 80,
};
+static const struct panel_delay delay_200_500_e50_p2e80 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .prepare_to_enable = 80,
+};
+
static const struct panel_delay delay_200_500_p2e100 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -1874,6 +1887,13 @@ static const struct panel_delay delay_200_500_e200 = {
.enable = 200,
};
+static const struct panel_delay delay_200_500_e200_d200 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 200,
+};
+
static const struct panel_delay delay_200_500_e200_d10 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -1887,6 +1907,13 @@ static const struct panel_delay delay_200_150_e200 = {
.enable = 200,
};
+static const struct panel_delay delay_200_500_e50_po2e200 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .powered_on_to_enable = 200,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.name = _name, \
@@ -1912,7 +1939,9 @@ static const struct panel_delay delay_200_150_e200 = {
* Sort first by vendor, then by product ID.
*/
static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x105c, &delay_200_500_e50, "B116XTN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x125c, &delay_200_500_e50, "Unknown"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
@@ -1921,56 +1950,91 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
- EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
- &auo_b116xa3_mode),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
- EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1",
- &auo_b116xa3_mode),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0607, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0608, &delay_200_500_e50, "NT116WHM-N11"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0668, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x068f, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x06e5, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0705, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0715, &delay_200_150_e200, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0717, &delay_200_500_e50_po2e200, "NV133FHM-N42"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0731, &delay_200_500_e80, "NT116WHM-N42"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0741, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0744, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x074c, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0751, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0754, &delay_200_500_e50_po2e200, "NV116WHM-N45"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0771, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0797, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d3, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07f6, &delay_200_500_e200, "NT140FHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07f8, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0813, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0827, &delay_200_500_e50_p2e80, "NT140WHM-N44 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0843, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x08b2, &delay_200_500_e200, "NT140WHM-N49"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0848, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0849, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x096e, &delay_200_500_e50_po2e200, "NV116WHM-T07 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0993, &delay_200_500_e80, "NV116WHM-T14 V8.0"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ad, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ae, &delay_200_500_e200, "NT140FHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1138, &innolux_n116bca_ea1.delay, "N116BCA-EA1-RC4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1139, &delay_200_500_e80_d50, "N116BGE-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1141, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1145, &delay_200_500_e80_d50, "N116BCN-EB1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x114a, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1156, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50, "MNC207QS1-1"),
+
+ EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x048e, &delay_200_500_e200_d10, "M116NWR6 R5"),
@@ -1979,11 +2043,25 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x8c4d, &delay_200_150_e200, "R140NWFM R1"),
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x044f, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x1118, &delay_200_500_e50, "KD116N29-30NK-A005"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x0000, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x048d, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x0497, &delay_200_500_e200_d200, "LP116WH7-SPB1"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x052c, &delay_200_500_e200_d200, "LP133WF2-SPL7"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x0537, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x054a, &delay_200_500_e200_d200, "LP116WH8-SPC1"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x0567, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x05af, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x05f1, &delay_200_500_e200_d200, "Unknown"),
+
EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
new file mode 100644
index 000000000000..466c27012abf
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree.
+ * Copyright (c) 2024 Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer specific DSI commands */
+#define HX83112A_SETPOWER1 0xb1
+#define HX83112A_SETDISP 0xb2
+#define HX83112A_SETDRV 0xb4
+#define HX83112A_SETEXTC 0xb9
+#define HX83112A_SETBANK 0xbd
+#define HX83112A_SETPTBA 0xbf
+#define HX83112A_SETDGCLUT 0xc1
+#define HX83112A_SETTCON 0xc7
+#define HX83112A_SETCLOCK 0xcb
+#define HX83112A_SETPANEL 0xcc
+#define HX83112A_SETPOWER2 0xd2
+#define HX83112A_SETGIP0 0xd3
+#define HX83112A_SETGIP1 0xd5
+#define HX83112A_SETGIP2 0xd6
+#define HX83112A_SETGIP3 0xd8
+#define HX83112A_SETTP1 0xe7
+#define HX83112A_UNKNOWN1 0xe9
+
+struct hx83112a_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[3];
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct hx83112a_panel *to_hx83112a_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx83112a_panel, panel);
+}
+
+static void hx83112a_reset(struct hx83112a_panel *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(50);
+}
+
+static int hx83112a_on(struct hx83112a_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETEXTC, 0x83, 0x11, 0x2a);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER1,
+ 0x08, 0x28, 0x28, 0x83, 0x83, 0x4c, 0x4f, 0x33);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDISP,
+ 0x00, 0x02, 0x00, 0x90, 0x24, 0x00, 0x08, 0x19,
+ 0xea, 0x11, 0x11, 0x00, 0x11, 0xa3);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV,
+ 0x58, 0x68, 0x58, 0x68, 0x0f, 0xef, 0x0b, 0xc0,
+ 0x0b, 0xc0, 0x0b, 0xc0, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0x00, 0x14, 0x15, 0x00, 0x29, 0x11, 0x07,
+ 0x12, 0x00, 0x29);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV,
+ 0x00, 0x12, 0x12, 0x11, 0x88, 0x12, 0x12, 0x00,
+ 0x53);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT,
+ 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
+ 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
+ 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
+ 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
+ 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
+ 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
+ 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
+ 0x40);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT,
+ 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
+ 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
+ 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
+ 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
+ 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
+ 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
+ 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
+ 0x40);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT,
+ 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
+ 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
+ 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
+ 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
+ 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
+ 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
+ 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
+ 0x40);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTCON,
+ 0x70, 0x00, 0x04, 0xe0, 0x33, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPANEL, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER2, 0x2b, 0x2b);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08,
+ 0x08, 0x03, 0x03, 0x22, 0x18, 0x07, 0x07, 0x07,
+ 0x07, 0x32, 0x10, 0x06, 0x00, 0x06, 0x32, 0x10,
+ 0x07, 0x00, 0x07, 0x32, 0x19, 0x31, 0x09, 0x31,
+ 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x09, 0x30, 0x00, 0x00, 0x00, 0x06, 0x0d, 0x00,
+ 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0,
+ 0x00, 0x00, 0x19, 0x10, 0x00, 0x0a, 0x00, 0x81);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP1,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0xc0, 0xc0, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f,
+ 0x28, 0x28, 0x24, 0x24, 0x02, 0x03, 0x02, 0x03,
+ 0x00, 0x01, 0x00, 0x01, 0x31, 0x31, 0x31, 0x31,
+ 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP2,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f,
+ 0x24, 0x24, 0x28, 0x28, 0x01, 0x00, 0x01, 0x00,
+ 0x03, 0x02, 0x03, 0x02, 0x31, 0x31, 0x31, 0x31,
+ 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
+ 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, 0xaa, 0xaa);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
+ 0xaa, 0x2e, 0x28, 0x00, 0x00, 0x00, 0xaa, 0x2e,
+ 0x28, 0x00, 0x00, 0x00, 0xaa, 0xee, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xee, 0xaa, 0xaa, 0xaa, 0xaa);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xff,
+ 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
+ 0xaa, 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1,
+ 0x0e, 0x0e, 0x1e, 0x65, 0x1c, 0x65, 0x00, 0x50,
+ 0x20, 0x20, 0x00, 0x00, 0x02, 0x02, 0x02, 0x05,
+ 0x14, 0x14, 0x32, 0xb9, 0x23, 0xb9, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1,
+ 0x02, 0x00, 0xa8, 0x01, 0xa8, 0x0d, 0xa4, 0x0e);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1,
+ 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc3);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETCLOCK, 0xd1, 0xd6);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc6);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPTBA, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(150);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(50);
+
+ return 0;
+}
+
+static int hx83112a_disable(struct drm_panel *panel)
+{
+ struct hx83112a_panel *ctx = to_hx83112a_panel(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int hx83112a_prepare(struct drm_panel *panel)
+{
+ struct hx83112a_panel *ctx = to_hx83112a_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ hx83112a_reset(ctx);
+
+ ret = hx83112a_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx83112a_unprepare(struct drm_panel *panel)
+{
+ struct hx83112a_panel *ctx = to_hx83112a_panel(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode hx83112a_mode = {
+ .clock = (1080 + 28 + 8 + 8) * (2340 + 27 + 5 + 5) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 28,
+ .hsync_end = 1080 + 28 + 8,
+ .htotal = 1080 + 28 + 8 + 8,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 27,
+ .vsync_end = 2340 + 27 + 5,
+ .vtotal = 2340 + 27 + 5 + 5,
+ .width_mm = 67,
+ .height_mm = 145,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int hx83112a_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &hx83112a_mode);
+}
+
+static const struct drm_panel_funcs hx83112a_panel_funcs = {
+ .prepare = hx83112a_prepare,
+ .unprepare = hx83112a_unprepare,
+ .disable = hx83112a_disable,
+ .get_modes = hx83112a_get_modes,
+};
+
+static int hx83112a_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hx83112a_panel *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vdd1";
+ ctx->supplies[1].supply = "vsn";
+ ctx->supplies[2].supply = "vsp";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_HSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &hx83112a_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hx83112a_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx83112a_panel *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id hx83112a_of_match[] = {
+ { .compatible = "djn,9a-3r063-1102b" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx83112a_of_match);
+
+static struct mipi_dsi_driver hx83112a_driver = {
+ .probe = hx83112a_probe,
+ .remove = hx83112a_remove,
+ .driver = {
+ .name = "panel-himax-hx83112a",
+ .of_match_table = hx83112a_of_match,
+ },
+};
+module_mipi_dsi_driver(hx83112a_driver);
+
+MODULE_DESCRIPTION("DRM driver for hx83112a-equipped DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 30919c872ac8..9d87cc1a357e 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -646,26 +646,17 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
return -EINVAL;
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset_gpio)) {
- dev_err(dev, "cannot get reset gpio\n");
- return PTR_ERR(ctx->reset_gpio);
- }
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "cannot get reset gpio\n");
ctx->vci = devm_regulator_get(dev, "vci");
- if (IS_ERR(ctx->vci)) {
- ret = PTR_ERR(ctx->vci);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request vci regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->vci))
+ return dev_err_probe(dev, PTR_ERR(ctx->vci), "Failed to request vci regulator\n");
ctx->iovcc = devm_regulator_get(dev, "iovcc");
- if (IS_ERR(ctx->iovcc)) {
- ret = PTR_ERR(ctx->iovcc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->iovcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
+ "Failed to request iovcc regulator\n");
mipi_dsi_set_drvdata(dsi, ctx);
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 39e408c9f762..a4c9a5cb9811 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
@@ -21,25 +22,224 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+struct ltk500hd1829_cmd {
+ char cmd;
+ char data;
+};
+
+struct ltk500hd1829_desc {
+ const struct drm_display_mode *mode;
+ const struct ltk500hd1829_cmd *init;
+ unsigned int num_init;
+};
+
struct ltk500hd1829 {
struct device *dev;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct regulator *vcc;
struct regulator *iovcc;
+ const struct ltk500hd1829_desc *panel_desc;
bool prepared;
};
-struct ltk500hd1829_cmd {
- char cmd;
- char data;
+static const struct ltk500hd1829_cmd ltk101b4029w_init[] = {
+ /* Page0 */
+ { 0xE0, 0x00 },
+ /* PASSWORD */
+ { 0xE1, 0x93 },
+ { 0xE2, 0x65 },
+ { 0xE3, 0xF8 },
+ { 0x80, 0x03 }, /* 0X03:4-LANE; 0X02:3-LANE; 0X01:2-LANE */
+ /* Page1 */
+ { 0xE0, 0x01 },
+ /* Set VCOM */
+ { 0x00, 0x00 },
+ { 0x01, 0x6F },
+ /* Set Gamma Power, VGMP,VGMN,VGSP,VGSN */
+ { 0x17, 0x00 },
+ { 0x18, 0xAF }, /* 4.3V */
+ { 0x19, 0x01 }, /* 0.3V */
+ { 0x1A, 0x00 },
+ { 0x1B, 0xAF }, /* 4.3V */
+ { 0x1C, 0x01 }, /* 0.3V */
+ /* Set Gate Power */
+ { 0x1F, 0x3E }, /* VGH_R = 15V */
+ { 0x20, 0x28 }, /* VGL_R = -12V */
+ { 0x21, 0x28 }, /* VGL_R2 = -12V */
+ { 0x22, 0x7E },
+ /* SETPANEL */
+ { 0x35, 0x26 },
+ { 0x37, 0x09 },
+ /* SET RGBCYC */
+ { 0x38, 0x04 },
+ { 0x39, 0x00 },
+ { 0x3A, 0x01 },
+ { 0x3C, 0x7C },
+ { 0x3D, 0xFF },
+ { 0x3E, 0xFF },
+ { 0x3F, 0x7F },
+ /* Set TCON */
+ { 0x40, 0x06 }, /* RSO = 800 RGB */
+ { 0x41, 0xA0 }, /* LN = 640->1280 line */
+ { 0x42, 0x81 },
+ { 0x43, 0x08 }, /* VFP = 8 */
+ { 0x44, 0x0B }, /* VBP = 12 */
+ { 0x45, 0x28 }, /* HBP = 40 */
+ /* power voltage */
+ { 0x55, 0x0F }, /* DCDCM = 0001, JD PWR_IC */
+ { 0x57, 0x69 },
+ { 0x59, 0x0A }, /* VCL = -2.9V */
+ { 0x5A, 0x28 }, /* VGH = 15V */
+ { 0x5B, 0x14 }, /* VGL = -11V */
+ /* Gamma */
+ { 0x5D, 0x7C },
+ { 0x5E, 0x65 },
+ { 0x5F, 0x55 },
+ { 0x60, 0x47 },
+ { 0x61, 0x43 },
+ { 0x62, 0x32 },
+ { 0x63, 0x34 },
+ { 0x64, 0x1C },
+ { 0x65, 0x33 },
+ { 0x66, 0x31 },
+ { 0x67, 0x30 },
+ { 0x68, 0x4E },
+ { 0x69, 0x3C },
+ { 0x6A, 0x44 },
+ { 0x6B, 0x35 },
+ { 0x6C, 0x31 },
+ { 0x6D, 0x23 },
+ { 0x6E, 0x11 },
+ { 0x6F, 0x00 },
+ { 0x70, 0x7C },
+ { 0x71, 0x65 },
+ { 0x72, 0x55 },
+ { 0x73, 0x47 },
+ { 0x74, 0x43 },
+ { 0x75, 0x32 },
+ { 0x76, 0x34 },
+ { 0x77, 0x1C },
+ { 0x78, 0x33 },
+ { 0x79, 0x31 },
+ { 0x7A, 0x30 },
+ { 0x7B, 0x4E },
+ { 0x7C, 0x3C },
+ { 0x7D, 0x44 },
+ { 0x7E, 0x35 },
+ { 0x7F, 0x31 },
+ { 0x80, 0x23 },
+ { 0x81, 0x11 },
+ { 0x82, 0x00 },
+ /* Page2, for GIP */
+ { 0xE0, 0x02 },
+ /* GIP_L Pin mapping */
+ { 0x00, 0x1E },
+ { 0x01, 0x1E },
+ { 0x02, 0x41 },
+ { 0x03, 0x41 },
+ { 0x04, 0x43 },
+ { 0x05, 0x43 },
+ { 0x06, 0x1F },
+ { 0x07, 0x1F },
+ { 0x08, 0x35 },
+ { 0x09, 0x1F },
+ { 0x0A, 0x15 },
+ { 0x0B, 0x15 },
+ { 0x0C, 0x1F },
+ { 0x0D, 0x47 },
+ { 0x0E, 0x47 },
+ { 0x0F, 0x45 },
+ { 0x10, 0x45 },
+ { 0x11, 0x4B },
+ { 0x12, 0x4B },
+ { 0x13, 0x49 },
+ { 0x14, 0x49 },
+ { 0x15, 0x1F },
+ /* GIP_R Pin mapping */
+ { 0x16, 0x1E },
+ { 0x17, 0x1E },
+ { 0x18, 0x40 },
+ { 0x19, 0x40 },
+ { 0x1A, 0x42 },
+ { 0x1B, 0x42 },
+ { 0x1C, 0x1F },
+ { 0x1D, 0x1F },
+ { 0x1E, 0x35 },
+ { 0x1F, 0x1F },
+ { 0x20, 0x15 },
+ { 0x21, 0x15 },
+ { 0x22, 0x1f },
+ { 0x23, 0x46 },
+ { 0x24, 0x46 },
+ { 0x25, 0x44 },
+ { 0x26, 0x44 },
+ { 0x27, 0x4A },
+ { 0x28, 0x4A },
+ { 0x29, 0x48 },
+ { 0x2A, 0x48 },
+ { 0x2B, 0x1F },
+ /* GIP Timing */
+ { 0x58, 0x40 },
+ { 0x5B, 0x30 },
+ { 0x5C, 0x03 },
+ { 0x5D, 0x30 },
+ { 0x5E, 0x01 },
+ { 0x5F, 0x02 },
+ { 0x63, 0x14 },
+ { 0x64, 0x6A },
+ { 0x67, 0x73 },
+ { 0x68, 0x05 },
+ { 0x69, 0x14 },
+ { 0x6A, 0x6A },
+ { 0x6B, 0x08 },
+ { 0x6C, 0x00 },
+ { 0x6D, 0x00 },
+ { 0x6E, 0x00 },
+ { 0x6F, 0x88 },
+ { 0x77, 0xDD },
+ { 0x79, 0x0E },
+ { 0x7A, 0x03 },
+ { 0x7D, 0x14 },
+ { 0x7E, 0x6A },
+ /* Page4 */
+ { 0xE0, 0x04 },
+ { 0x09, 0x11 },
+ { 0x0E, 0x48 },
+ { 0x2B, 0x2B },
+ { 0x2D, 0x03 },
+ { 0x2E, 0x44 },
+ /* Page0 */
+ { 0xE0, 0x00 },
+ { 0xE6, 0x02 },
+ { 0xE7, 0x0C },
+};
+
+static const struct drm_display_mode ltk101b4029w_mode = {
+ .hdisplay = 800,
+ .hsync_start = 800 + 18,
+ .hsync_end = 800 + 18 + 18,
+ .htotal = 800 + 18 + 18 + 18,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 24,
+ .vsync_end = 1280 + 24 + 4,
+ .vtotal = 1280 + 24 + 4 + 8,
+ .clock = 67330,
+ .width_mm = 136,
+ .height_mm = 218,
+};
+
+static const struct ltk500hd1829_desc ltk101b4029w_data = {
+ .mode = &ltk101b4029w_mode,
+ .init = ltk101b4029w_init,
+ .num_init = ARRAY_SIZE(ltk101b4029w_init),
};
/*
* There is no description in the Reference Manual about these commands.
* We received them from the vendor, so just use them as is.
*/
-static const struct ltk500hd1829_cmd init_code[] = {
+static const struct ltk500hd1829_cmd ltk500hd1829_init[] = {
{ 0xE0, 0x00 },
{ 0xE1, 0x93 },
{ 0xE2, 0x65 },
@@ -260,6 +460,26 @@ static const struct ltk500hd1829_cmd init_code[] = {
{ 0x35, 0x00 },
};
+static const struct drm_display_mode ltk500hd1829_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 50,
+ .hsync_end = 720 + 50 + 50,
+ .htotal = 720 + 50 + 50 + 50,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 30,
+ .vsync_end = 1280 + 30 + 4,
+ .vtotal = 1280 + 30 + 4 + 12,
+ .clock = 69217,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static const struct ltk500hd1829_desc ltk500hd1829_data = {
+ .mode = &ltk500hd1829_mode,
+ .init = ltk500hd1829_init,
+ .num_init = ARRAY_SIZE(ltk500hd1829_init),
+};
+
static inline
struct ltk500hd1829 *panel_to_ltk500hd1829(struct drm_panel *panel)
{
@@ -324,8 +544,8 @@ static int ltk500hd1829_prepare(struct drm_panel *panel)
/* tRT: >= 5ms */
usleep_range(5000, 6000);
- for (i = 0; i < ARRAY_SIZE(init_code); i++) {
- ret = mipi_dsi_generic_write(dsi, &init_code[i],
+ for (i = 0; i < ctx->panel_desc->num_init; i++) {
+ ret = mipi_dsi_generic_write(dsi, &ctx->panel_desc->init[i],
sizeof(struct ltk500hd1829_cmd));
if (ret < 0) {
dev_err(panel->dev, "failed to write init cmds: %d\n", ret);
@@ -359,31 +579,17 @@ disable_vcc:
return ret;
}
-static const struct drm_display_mode default_mode = {
- .hdisplay = 720,
- .hsync_start = 720 + 50,
- .hsync_end = 720 + 50 + 50,
- .htotal = 720 + 50 + 50 + 50,
- .vdisplay = 1280,
- .vsync_start = 1280 + 30,
- .vsync_end = 1280 + 30 + 4,
- .vtotal = 1280 + 30 + 4 + 12,
- .clock = 69217,
- .width_mm = 62,
- .height_mm = 110,
-};
-
static int ltk500hd1829_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode);
if (!mode) {
dev_err(ctx->dev, "failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ ctx->panel_desc->mode->hdisplay, ctx->panel_desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->panel_desc->mode));
return -ENOMEM;
}
@@ -413,6 +619,10 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
+ ctx->panel_desc = of_device_get_match_data(dev);
+ if (!ctx->panel_desc)
+ return -EINVAL;
+
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio\n");
@@ -492,7 +702,14 @@ static void ltk500hd1829_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id ltk500hd1829_of_match[] = {
- { .compatible = "leadtek,ltk500hd1829", },
+ {
+ .compatible = "leadtek,ltk101b4029w",
+ .data = &ltk101b4029w_data,
+ },
+ {
+ .compatible = "leadtek,ltk500hd1829",
+ .data = &ltk500hd1829_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ltk500hd1829_of_match);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 83a9cf53d269..d3bfdfc9cff6 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -36,6 +36,9 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#define NT35510_CMD_CORRECT_GAMMA BIT(0)
+#define NT35510_CMD_CONTROL_DISPLAY BIT(1)
+
#define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */
#define MCS_CMD_READ_ID1 0xDA
#define MCS_CMD_READ_ID2 0xDB
@@ -112,18 +115,33 @@
/* AVDD and AVEE setting 3 bytes */
#define NT35510_P1_AVDD_LEN 3
#define NT35510_P1_AVEE_LEN 3
+#define NT35510_P1_VCL_LEN 3
#define NT35510_P1_VGH_LEN 3
#define NT35510_P1_VGL_LEN 3
#define NT35510_P1_VGP_LEN 3
#define NT35510_P1_VGN_LEN 3
+#define NT35510_P1_VCMOFF_LEN 2
/* BT1CTR thru BT5CTR setting 3 bytes */
#define NT35510_P1_BT1CTR_LEN 3
#define NT35510_P1_BT2CTR_LEN 3
+#define NT35510_P1_BT3CTR_LEN 3
#define NT35510_P1_BT4CTR_LEN 3
#define NT35510_P1_BT5CTR_LEN 3
/* 52 gamma parameters times two per color: positive and negative */
#define NT35510_P1_GAMMA_LEN 52
+#define NT35510_WRCTRLD_BCTRL BIT(5)
+#define NT35510_WRCTRLD_A BIT(4)
+#define NT35510_WRCTRLD_DD BIT(3)
+#define NT35510_WRCTRLD_BL BIT(2)
+#define NT35510_WRCTRLD_DB BIT(1)
+#define NT35510_WRCTRLD_G BIT(0)
+
+#define NT35510_WRCABC_OFF 0
+#define NT35510_WRCABC_UI_MODE 1
+#define NT35510_WRCABC_STILL_MODE 2
+#define NT35510_WRCABC_MOVING_MODE 3
+
/**
* struct nt35510_config - the display-specific NT35510 configuration
*
@@ -172,6 +190,14 @@ struct nt35510_config {
*/
const struct drm_display_mode mode;
/**
+ * @mode_flags: DSI operation mode related flags
+ */
+ unsigned long mode_flags;
+ /**
+ * @cmds: enable DSI commands
+ */
+ u32 cmds;
+ /**
* @avdd: setting for AVDD ranging from 0x00 = 6.5V to 0x14 = 4.5V
* in 0.1V steps the default is 0x05 which means 6.0V
*/
@@ -221,6 +247,25 @@ struct nt35510_config {
*/
u8 bt2ctr[NT35510_P1_BT2CTR_LEN];
/**
+ * @vcl: setting for VCL ranging from 0x00 = -2.5V to 0x11 = -4.0V
+ * in 1V steps, the default is 0x00 which means -2.5V
+ */
+ u8 vcl[NT35510_P1_VCL_LEN];
+ /**
+ * @bt3ctr: setting for boost power control for the VCL step-up
+ * circuit (3)
+ * bits 0..2 in the lower nibble controls CLCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTCL, the boosting
+ * amplification for the step-up circuit.
+ * 0 = Disable
+ * 1 = -0.5 x VDDB
+ * 2 = -1 x VDDB
+ * 3 = -2 x VDDB
+ * The defaults are 4 and 2 yielding 0x24
+ */
+ u8 bt3ctr[NT35510_P1_BT3CTR_LEN];
+ /**
* @vgh: setting for VGH ranging from 0x00 = 7.0V to 0x0B = 18.0V
* in 1V steps, the default is 0x08 which means 15V
*/
@@ -274,6 +319,113 @@ struct nt35510_config {
*/
u8 vgn[NT35510_P1_VGN_LEN];
/**
+ * @vcmoff: setting the DC VCOM offset voltage
+ * The first byte contains bit 8 of VCM in bit 0 and VCMOFFSEL in bit 4.
+ * The second byte contains bits 0..7 of VCM.
+ * VCMOFFSEL the common voltage offset mode.
+ * VCMOFFSEL 0x00 = VCOM .. 0x01 Gamma.
+ * The default is 0x00.
+ * VCM the VCOM output voltage (VCMOFFSEL = 0) or the internal register
+ * offset for gamma voltage (VCMOFFSEL = 1).
+ * VCM 0x00 = 0V/0 .. 0x118 = 3.5V/280 in steps of 12.5mV/1step
+ * The default is 0x00 = 0V/0.
+ */
+ u8 vcmoff[NT35510_P1_VCMOFF_LEN];
+ /**
+ * @dopctr: setting optional control for display
+ * ERR bits 0..1 in the first byte is the ERR pin output signal setting.
+ * 0 = Disable, ERR pin output low
+ * 1 = ERR pin output CRC error only
+ * 2 = ERR pin output ECC error only
+ * 3 = ERR pin output CRC and ECC error
+ * The default is 0.
+ * N565 bit 2 in the first byte is the 16-bit/pixel format selection.
+ * 0 = R[4:0] + G[5:3] & G[2:0] + B[4:0]
+ * 1 = G[2:0] + R[4:0] & B[4:0] + G[5:3]
+ * The default is 0.
+ * DIS_EoTP_HS bit 3 in the first byte is "DSI protocol violation" error
+ * reporting.
+ * 0 = reporting when error
+ * 1 = not reporting when error
+ * DSIM bit 4 in the first byte is the video mode data type enable
+ * 0 = Video mode data type disable
+ * 1 = Video mode data type enable
+ * The default is 0.
+ * DSIG bit 5 int the first byte is the generic r/w data type enable
+ * 0 = Generic r/w disable
+ * 1 = Generic r/w enable
+ * The default is 0.
+ * DSITE bit 6 in the first byte is TE line enable
+ * 0 = TE line is disabled
+ * 1 = TE line is enabled
+ * The default is 0.
+ * RAMKP bit 7 in the first byte is the frame memory keep/loss in
+ * sleep-in mode
+ * 0 = contents loss in sleep-in
+ * 1 = contents keep in sleep-in
+ * The default is 0.
+ * CRL bit 1 in the second byte is the source driver data shift
+ * direction selection. This bit is XOR operation with bit RSMX
+ * of 3600h command.
+ * 0 (RMSX = 0) = S1 -> S1440
+ * 0 (RMSX = 1) = S1440 -> S1
+ * 1 (RMSX = 0) = S1440 -> S1
+ * 1 (RMSX = 1) = S1 -> S1440
+ * The default is 0.
+ * CTB bit 2 in the second byte is the vertical scanning direction
+ * selection for gate control signals. This bit is XOR operation
+ * with bit ML of 3600h command.
+ * 0 (ML = 0) = Forward (top -> bottom)
+ * 0 (ML = 1) = Reverse (bottom -> top)
+ * 1 (ML = 0) = Reverse (bottom -> top)
+ * 1 (ML = 1) = Forward (top -> bottom)
+ * The default is 0.
+ * CRGB bit 3 in the second byte is RGB-BGR order selection. This
+ * bit is XOR operation with bit RGB of 3600h command.
+ * 0 (RGB = 0) = RGB/Normal
+ * 0 (RGB = 1) = BGR/RB swap
+ * 1 (RGB = 0) = BGR/RB swap
+ * 1 (RGB = 1) = RGB/Normal
+ * The default is 0.
+ * TE_PWR_SEL bit 4 in the second byte is the TE output voltage
+ * level selection (only valid when DSTB_SEL = 0 or DSTB_SEL = 1,
+ * VSEL = High and VDDI = 1.665~3.3V).
+ * 0 = TE output voltage level is VDDI
+ * 1 = TE output voltage level is VDDA
+ * The default is 0.
+ */
+ u8 dopctr[NT35510_P0_DOPCTR_LEN];
+ /**
+ * @madctl: Memory data access control
+ * RSMY bit 0 is flip vertical. Flips the display image top to down.
+ * RSMX bit 1 is flip horizontal. Flips the display image left to right.
+ * MH bit 2 is the horizontal refresh order.
+ * RGB bit 3 is the RGB-BGR order.
+ * 0 = RGB color sequence
+ * 1 = BGR color sequence
+ * ML bit 4 is the vertical refresh order.
+ * MV bit 5 is the row/column exchange.
+ * MX bit 6 is the column address order.
+ * MY bit 7 is the row address order.
+ */
+ u8 madctl;
+ /**
+ * @sdhdtctr: source output data hold time
+ * 0x00..0x3F = 0..31.5us in steps of 0.5us
+ * The default is 0x05 = 2.5us.
+ */
+ u8 sdhdtctr;
+ /**
+ * @gseqctr: EQ control for gate signals
+ * GFEQ_XX[3:0]: time setting of EQ step for falling edge in steps
+ * of 0.5us.
+ * The default is 0x07 = 3.5us
+ * GREQ_XX[7:4]: time setting of EQ step for rising edge in steps
+ * of 0.5us.
+ * The default is 0x07 = 3.5us
+ */
+ u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
+ /**
* @sdeqctr: Source driver control settings, first byte is
* 0 for mode 1 and 1 for mode 2. Mode 1 uses two steps and
* mode 2 uses three steps meaning EQS3 is not used in mode
@@ -343,6 +495,43 @@ struct nt35510_config {
* @gamma_corr_neg_b: Blue gamma correction parameters, negative
*/
u8 gamma_corr_neg_b[NT35510_P1_GAMMA_LEN];
+ /**
+ * @wrdisbv: write display brightness
+ * 0x00 value means the lowest brightness and 0xff value means
+ * the highest brightness.
+ * The default is 0x00.
+ */
+ u8 wrdisbv;
+ /**
+ * @wrctrld: write control display
+ * G bit 0 selects gamma curve: 0 = Manual, 1 = Automatic
+ * DB bit 1 selects display brightness: 0 = Manual, 1 = Automatic
+ * BL bit 2 controls backlight control: 0 = Off, 1 = On
+ * DD bit 3 controls display dimming: 0 = Off, 1 = On
+ * A bit 4 controls LABC block: 0 = Off, 1 = On
+ * BCTRL bit 5 controls brightness block: 0 = Off, 1 = On
+ */
+ u8 wrctrld;
+ /**
+ * @wrcabc: write content adaptive brightness control
+ * There is possible to use 4 different modes for content adaptive
+ * image functionality:
+ * 0: Off
+ * 1: User Interface Image (UI-Mode)
+ * 2: Still Picture Image (Still-Mode)
+ * 3: Moving Picture Image (Moving-Mode)
+ * The default is 0
+ */
+ u8 wrcabc;
+ /**
+ * @wrcabcmb: write CABC minimum brightness
+ * Set the minimum brightness value of the display for CABC
+ * function.
+ * 0x00 value means the lowest brightness for CABC and 0xff
+ * value means the highest brightness for CABC.
+ * The default is 0x00.
+ */
+ u8 wrcabcmb;
};
/**
@@ -486,6 +675,16 @@ static int nt35510_setup_power(struct nt35510 *nt)
nt->conf->bt2ctr);
if (ret)
return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCL,
+ NT35510_P1_VCL_LEN,
+ nt->conf->vcl);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT3CTR,
+ NT35510_P1_BT3CTR_LEN,
+ nt->conf->bt3ctr);
+ if (ret)
+ return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGH,
NT35510_P1_VGH_LEN,
nt->conf->vgh);
@@ -522,6 +721,12 @@ static int nt35510_setup_power(struct nt35510 *nt)
if (ret)
return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF,
+ NT35510_P1_VCMOFF_LEN,
+ nt->conf->vcmoff);
+ if (ret)
+ return ret;
+
/* Typically 10 ms */
usleep_range(10000, 20000);
@@ -536,46 +741,28 @@ static int nt35510_setup_display(struct nt35510 *nt)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
const struct nt35510_config *conf = nt->conf;
- u8 dopctr[NT35510_P0_DOPCTR_LEN];
- u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
u8 dpfrctr[NT35510_P0_DPFRCTR1_LEN];
- /* FIXME: set up any rotation (assume none for now) */
- u8 addr_mode = NT35510_ROTATE_0_SETTING;
- u8 val;
int ret;
- /* Enable TE, EoTP and RGB pixel format */
- dopctr[0] = NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
- NT35510_DOPCTR_0_N565;
- dopctr[1] = NT35510_DOPCTR_1_CTB;
ret = nt35510_send_long(nt, dsi, NT35510_P0_DOPCTR,
NT35510_P0_DOPCTR_LEN,
- dopctr);
+ conf->dopctr);
if (ret)
return ret;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &addr_mode,
- sizeof(addr_mode));
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &conf->madctl,
+ sizeof(conf->madctl));
if (ret < 0)
return ret;
- /*
- * Source data hold time, default 0x05 = 2.5us
- * 0x00..0x3F = 0 .. 31.5us in steps of 0.5us
- * 0x0A = 5us
- */
- val = 0x0A;
- ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &val,
- sizeof(val));
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &conf->sdhdtctr,
+ sizeof(conf->sdhdtctr));
if (ret < 0)
return ret;
- /* EQ control for gate signals, 0x00 = 0 us */
- gseqctr[0] = 0x00;
- gseqctr[1] = 0x00;
ret = nt35510_send_long(nt, dsi, NT35510_P0_GSEQCTR,
NT35510_P0_GSEQCTR_LEN,
- gseqctr);
+ conf->gseqctr);
if (ret)
return ret;
@@ -719,36 +906,38 @@ static int nt35510_power_on(struct nt35510 *nt)
if (ret)
return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_pos_r);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_pos_g);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_pos_b);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_neg_r);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_neg_g);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_neg_b);
- if (ret)
- return ret;
+ if (nt->conf->cmds & NT35510_CMD_CORRECT_GAMMA) {
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_b);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_b);
+ if (ret)
+ return ret;
+ }
/* Set up stuff in manufacturer control, page 0 */
ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
@@ -827,6 +1016,26 @@ static int nt35510_prepare(struct drm_panel *panel)
/* Up to 120 ms */
usleep_range(120000, 150000);
+ if (nt->conf->cmds & NT35510_CMD_CONTROL_DISPLAY) {
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &nt->conf->wrctrld,
+ sizeof(nt->conf->wrctrld));
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_POWER_SAVE,
+ &nt->conf->wrcabc,
+ sizeof(nt->conf->wrcabc));
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS,
+ &nt->conf->wrcabcmb,
+ sizeof(nt->conf->wrcabcmb));
+ if (ret < 0)
+ return ret;
+ }
+
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret) {
dev_err(nt->dev, "failed to turn display on (%d)\n", ret);
@@ -896,7 +1105,6 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
*/
dsi->hs_rate = 349440000;
dsi->lp_rate = 9600000;
- dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
/*
* Every new incarnation of this display must have a unique
@@ -908,6 +1116,8 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return -ENODEV;
}
+ dsi->mode_flags = nt->conf->mode_flags;
+
nt->supplies[0].supply = "vdd"; /* 2.3-4.8 V */
nt->supplies[1].supply = "vddi"; /* 1.65-3.3V */
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->supplies),
@@ -923,7 +1133,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(nt->reset_gpio)) {
dev_err(dev, "error getting RESET GPIO\n");
return PTR_ERR(nt->reset_gpio);
@@ -952,7 +1162,10 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(bl);
}
bl->props.max_brightness = 255;
- bl->props.brightness = 255;
+ if (nt->conf->cmds & NT35510_CMD_CONTROL_DISPLAY)
+ bl->props.brightness = nt->conf->wrdisbv;
+ else
+ bl->props.brightness = 255;
bl->props.power = FB_BLANK_POWERDOWN;
nt->panel.backlight = bl;
}
@@ -1030,6 +1243,8 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */
.flags = 0,
},
+ .mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS,
+ .cmds = NT35510_CMD_CORRECT_GAMMA,
/* 0x09: AVDD = 5.6V */
.avdd = { 0x09, 0x09, 0x09 },
/* 0x34: PCK = Hsync/2, BTP = 2 x VDDB */
@@ -1038,6 +1253,10 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.avee = { 0x09, 0x09, 0x09 },
/* 0x24: NCK = Hsync/2, BTN = -2 x VDDB */
.bt2ctr = { 0x24, 0x24, 0x24 },
+ /* VBCLA: -2.5V, VBCLB: -2.5V, VBCLC: -2.5V */
+ .vcl = { 0x00, 0x00, 0x00 },
+ /* 0x24: CLCK = Hsync/2, BTN = -1 x VDDB */
+ .bt3ctr = { 0x24, 0x24, 0x24 },
/* 0x05 = 12V */
.vgh = { 0x05, 0x05, 0x05 },
/* 0x24: NCKA = Hsync/2, VGH = 2 x AVDD - AVEE */
@@ -1050,6 +1269,16 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.vgp = { 0x00, 0xA3, 0x00 },
/* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
.vgn = { 0x00, 0xA3, 0x00 },
+ /* VCMOFFSEL = VCOM voltage offset mode, VCM = 0V */
+ .vcmoff = { 0x00, 0x00 },
+ /* Enable TE, EoTP and RGB pixel format */
+ .dopctr = { NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
+ NT35510_DOPCTR_0_N565, NT35510_DOPCTR_1_CTB },
+ .madctl = NT35510_ROTATE_0_SETTING,
+ /* 0x0A: SDT = 5 us */
+ .sdhdtctr = 0x0A,
+ /* EQ control for gate signals, 0x00 = 0 us */
+ .gseqctr = { 0x00, 0x00 },
/* SDEQCTR: source driver EQ mode 2, 2.5 us rise time on each step */
.sdeqctr = { 0x01, 0x05, 0x05, 0x05 },
/* SDVPCTR: Normal operation off color during v porch */
@@ -1073,8 +1302,89 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.gamma_corr_neg_b = { NT35510_GAMMA_NEG_DEFAULT },
};
+static const struct nt35510_config nt35510_frida_frd400b25025 = {
+ .width_mm = 52,
+ .height_mm = 86,
+ .mode = {
+ .clock = 23000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 34, /* HFP = 34 */
+ .hsync_end = 480 + 34 + 2, /* HSync = 2 */
+ .htotal = 480 + 34 + 2 + 34, /* HBP = 34 */
+ .vdisplay = 800,
+ .vsync_start = 800 + 15, /* VFP = 15 */
+ .vsync_end = 800 + 15 + 12, /* VSync = 12 */
+ .vtotal = 800 + 15 + 12 + 15, /* VBP = 15 */
+ .flags = 0,
+ },
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM,
+ .cmds = NT35510_CMD_CONTROL_DISPLAY,
+ /* 0x03: AVDD = 6.2V */
+ .avdd = { 0x03, 0x03, 0x03 },
+ /* 0x46: PCK = 2 x Hsync, BTP = 2.5 x VDDB */
+ .bt1ctr = { 0x46, 0x46, 0x46 },
+ /* 0x03: AVEE = -6.2V */
+ .avee = { 0x03, 0x03, 0x03 },
+ /* 0x36: PCK = 2 x Hsync, BTP = 2 x VDDB */
+ .bt2ctr = { 0x36, 0x36, 0x36 },
+ /* VBCLA: -2.5V, VBCLB: -2.5V, VBCLC: -3.5V */
+ .vcl = { 0x00, 0x00, 0x02 },
+ /* 0x26: CLCK = 2 x Hsync, BTN = -1 x VDDB */
+ .bt3ctr = { 0x26, 0x26, 0x26 },
+ /* 0x09 = 16V */
+ .vgh = { 0x09, 0x09, 0x09 },
+ /* 0x36: HCK = 2 x Hsync, VGH = 2 x AVDD - AVEE */
+ .bt4ctr = { 0x36, 0x36, 0x36 },
+ /* 0x08 = -10V */
+ .vgl = { 0x08, 0x08, 0x08 },
+ /* 0x26: LCK = 2 x Hsync, VGL = AVDD + VCL - AVDD */
+ .bt5ctr = { 0x26, 0x26, 0x26 },
+ /* VGMP: 0x080 = 4.6V, VGSP = 0V */
+ .vgp = { 0x00, 0x80, 0x00 },
+ /* VGMP: 0x080 = 4.6V, VGSP = 0V */
+ .vgn = { 0x00, 0x80, 0x00 },
+ /* VCMOFFSEL = VCOM voltage offset mode, VCM = -1V */
+ .vcmoff = { 0x00, 0x50 },
+ .dopctr = { NT35510_DOPCTR_0_RAMKP | NT35510_DOPCTR_0_DSITE |
+ NT35510_DOPCTR_0_DSIG | NT35510_DOPCTR_0_DSIM |
+ NT35510_DOPCTR_0_EOTP | NT35510_DOPCTR_0_N565, 0 },
+ .madctl = NT35510_ROTATE_180_SETTING,
+ /* 0x03: SDT = 1.5 us */
+ .sdhdtctr = 0x03,
+ /* EQ control for gate signals, 0x00 = 0 us */
+ .gseqctr = { 0x00, 0x00 },
+ /* SDEQCTR: source driver EQ mode 2, 1 us rise time on each step */
+ .sdeqctr = { 0x01, 0x02, 0x02, 0x02 },
+ /* SDVPCTR: Normal operation off color during v porch */
+ .sdvpctr = 0x01,
+ /* T1: number of pixel clocks on one scanline: 0x184 = 389 clocks */
+ .t1 = 0x0184,
+ /* VBP: vertical back porch toward the panel */
+ .vbp = 0x1C,
+ /* VFP: vertical front porch toward the panel */
+ .vfp = 0x1C,
+ /* PSEL: divide pixel clock 23MHz with 1 (no clock downscaling) */
+ .psel = 0,
+ /* DPTMCTR12: 0x03: LVGL = VGLX, overlap mode, swap R->L O->E */
+ .dpmctr12 = { 0x03, 0x00, 0x00, },
+ /* write display brightness */
+ .wrdisbv = 0x7f,
+ /* write control display */
+ .wrctrld = NT35510_WRCTRLD_BCTRL | NT35510_WRCTRLD_DD |
+ NT35510_WRCTRLD_BL,
+ /* write content adaptive brightness control */
+ .wrcabc = NT35510_WRCABC_STILL_MODE,
+ /* write CABC minimum brightness */
+ .wrcabcmb = 0xff,
+};
+
static const struct of_device_id nt35510_of_match[] = {
{
+ .compatible = "frida,frd400b25025",
+ .data = &nt35510_frida_frd400b25025,
+ },
+ {
.compatible = "hydis,hva40wv1",
.data = &nt35510_hydis_hva40wv1,
},
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index a189ce236328..18bd2ee71201 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -933,8 +933,7 @@ static int j606f_boe_init_sequence(struct panel_info *pinfo)
static const struct drm_display_mode elish_boe_modes[] = {
{
- /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
- .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 104 / 1000,
+ .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 120 / 1000,
.hdisplay = 1600,
.hsync_start = 1600 + 60,
.hsync_end = 1600 + 60 + 8,
@@ -948,8 +947,7 @@ static const struct drm_display_mode elish_boe_modes[] = {
static const struct drm_display_mode elish_csot_modes[] = {
{
- /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
- .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 104 / 1000,
+ .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 120 / 1000,
.hdisplay = 1600,
.hsync_start = 1600 + 200,
.hsync_end = 1600 + 200 + 40,
@@ -1270,6 +1268,8 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
return ret;
}
+ pinfo->panel.prepare_prev_first = true;
+
if (pinfo->desc->has_dcs_backlight) {
pinfo->panel.backlight = nt36523_create_backlight(dsi);
if (IS_ERR(pinfo->panel.backlight))
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
new file mode 100644
index 000000000000..cb7406d74466
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+static const char * const regulator_names[] = {
+ "vddi",
+ "avdd",
+ "avee",
+};
+
+static const unsigned long regulator_enable_loads[] = {
+ 62000,
+ 100000,
+ 100000,
+};
+
+static const unsigned long regulator_disable_loads[] = {
+ 80,
+ 100,
+ 100,
+};
+
+struct panel_desc {
+ const struct drm_display_mode *display_mode;
+ u32 width_mm;
+ u32 height_mm;
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+ const char *panel_name;
+ int (*init_sequence)(struct mipi_dsi_device *dsi);
+};
+
+struct nt36672e_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[3];
+ const struct panel_desc *desc;
+};
+
+static inline struct nt36672e_panel *to_nt36672e_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt36672e_panel, panel);
+}
+
+static int nt36672e_1080x2408_60hz_init(struct mipi_dsi_device *dsi)
+{
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x89, 0x28, 0x00, 0x08, 0x00, 0xaa, 0x02,
+ 0x0e, 0x00, 0x2b, 0x00, 0x07, 0x0d, 0xb7, 0x0c, 0xb7);
+
+ mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x1b, 0xa0);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x01, 0x66);
+ mipi_dsi_dcs_write_seq(dsi, 0x06, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x07, 0x38);
+ mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, 0x69, 0x91);
+ mipi_dsi_dcs_write_seq(dsi, 0x95, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0x96, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x64);
+ mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x54);
+ mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x64);
+ mipi_dsi_dcs_write_seq(dsi, 0xf5, 0x54);
+ mipi_dsi_dcs_write_seq(dsi, 0xf6, 0x64);
+ mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x54);
+ mipi_dsi_dcs_write_seq(dsi, 0xf8, 0x64);
+ mipi_dsi_dcs_write_seq(dsi, 0xf9, 0x54);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x01, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x03, 0x0c);
+ mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d);
+ mipi_dsi_dcs_write_seq(dsi, 0x08, 0x2f);
+ mipi_dsi_dcs_write_seq(dsi, 0x09, 0x2e);
+ mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x2d);
+ mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x2c);
+ mipi_dsi_dcs_write_seq(dsi, 0x11, 0x17);
+ mipi_dsi_dcs_write_seq(dsi, 0x12, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0x13, 0x15);
+ mipi_dsi_dcs_write_seq(dsi, 0x15, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0x16, 0x16);
+ mipi_dsi_dcs_write_seq(dsi, 0x17, 0x18);
+ mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x1d);
+ mipi_dsi_dcs_write_seq(dsi, 0x20, 0x2f);
+ mipi_dsi_dcs_write_seq(dsi, 0x21, 0x2e);
+ mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2d);
+ mipi_dsi_dcs_write_seq(dsi, 0x23, 0x2c);
+ mipi_dsi_dcs_write_seq(dsi, 0x29, 0x17);
+ mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x15);
+ mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0x30, 0x16);
+ mipi_dsi_dcs_write_seq(dsi, 0x31, 0x18);
+ mipi_dsi_dcs_write_seq(dsi, 0x32, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x34, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0x35, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0x36, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x4f, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x53, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x71, 0x30);
+ mipi_dsi_dcs_write_seq(dsi, 0x79, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x7a, 0x82);
+ mipi_dsi_dcs_write_seq(dsi, 0x7b, 0x8f);
+ mipi_dsi_dcs_write_seq(dsi, 0x7d, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x80, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x81, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x82, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x31);
+ mipi_dsi_dcs_write_seq(dsi, 0x85, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x86, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x87, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x90, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0x92, 0x31);
+ mipi_dsi_dcs_write_seq(dsi, 0x93, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x94, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x95, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x9c, 0xf4);
+ mipi_dsi_dcs_write_seq(dsi, 0x9d, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xa0, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0xa2, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0xa3, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xa4, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xa5, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xc0);
+ mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xd9, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xe9, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x18, 0x22);
+ mipi_dsi_dcs_write_seq(dsi, 0x19, 0xe4);
+ mipi_dsi_dcs_write_seq(dsi, 0x21, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x66, 0xd8);
+ mipi_dsi_dcs_write_seq(dsi, 0x68, 0x50);
+ mipi_dsi_dcs_write_seq(dsi, 0x69, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0x6b, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x0d);
+ mipi_dsi_dcs_write_seq(dsi, 0x6e, 0x48);
+ mipi_dsi_dcs_write_seq(dsi, 0x72, 0x41);
+ mipi_dsi_dcs_write_seq(dsi, 0x73, 0x4a);
+ mipi_dsi_dcs_write_seq(dsi, 0x74, 0xd0);
+ mipi_dsi_dcs_write_seq(dsi, 0x77, 0x62);
+ mipi_dsi_dcs_write_seq(dsi, 0x79, 0x7e);
+ mipi_dsi_dcs_write_seq(dsi, 0x7d, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x15);
+ mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x4d);
+ mipi_dsi_dcs_write_seq(dsi, 0xcf, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xef, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x84);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x81, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x83, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0x85, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x86, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0x87, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x88, 0x05);
+ mipi_dsi_dcs_write_seq(dsi, 0x8a, 0x1a);
+ mipi_dsi_dcs_write_seq(dsi, 0x8b, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x8c, 0x24);
+ mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x42);
+ mipi_dsi_dcs_write_seq(dsi, 0x8f, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x90, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x91, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x9a, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0x9b, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x9c, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x9d, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x9e, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x01, 0x68);
+ mipi_dsi_dcs_write_seq(dsi, 0x20, 0x81);
+ mipi_dsi_dcs_write_seq(dsi, 0x21, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0x25, 0x81);
+ mipi_dsi_dcs_write_seq(dsi, 0x26, 0x94);
+ mipi_dsi_dcs_write_seq(dsi, 0x6e, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x6f, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x70, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x71, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x72, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x75, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x76, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x77, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x7d, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x67);
+ mipi_dsi_dcs_write_seq(dsi, 0x80, 0x23);
+ mipi_dsi_dcs_write_seq(dsi, 0x82, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0x83, 0x67);
+ mipi_dsi_dcs_write_seq(dsi, 0x88, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x89, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xa5, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xa6, 0x23);
+ mipi_dsi_dcs_write_seq(dsi, 0xa7, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xd3);
+ mipi_dsi_dcs_write_seq(dsi, 0xeb, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0xec, 0x28);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x00, 0x91);
+ mipi_dsi_dcs_write_seq(dsi, 0x03, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, 0x07, 0x50);
+ mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x70);
+ mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x11, 0xe0);
+ mipi_dsi_dcs_write_seq(dsi, 0x15, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x16, 0xa4);
+ mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x78);
+ mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x23);
+ mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x3e);
+ mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x3e);
+ mipi_dsi_dcs_write_seq(dsi, 0x20, 0x3e);
+ mipi_dsi_dcs_write_seq(dsi, 0x28, 0xfd);
+ mipi_dsi_dcs_write_seq(dsi, 0x29, 0x12);
+ mipi_dsi_dcs_write_seq(dsi, 0x2a, 0xe1);
+ mipi_dsi_dcs_write_seq(dsi, 0x2d, 0x0a);
+ mipi_dsi_dcs_write_seq(dsi, 0x30, 0x49);
+ mipi_dsi_dcs_write_seq(dsi, 0x33, 0x96);
+ mipi_dsi_dcs_write_seq(dsi, 0x34, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, 0x35, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x36, 0xde);
+ mipi_dsi_dcs_write_seq(dsi, 0x37, 0xf9);
+ mipi_dsi_dcs_write_seq(dsi, 0x38, 0x45);
+ mipi_dsi_dcs_write_seq(dsi, 0x39, 0xd9);
+ mipi_dsi_dcs_write_seq(dsi, 0x3a, 0x49);
+ mipi_dsi_dcs_write_seq(dsi, 0x4a, 0xf0);
+ mipi_dsi_dcs_write_seq(dsi, 0x7a, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0x7b, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x7f, 0xf0);
+ mipi_dsi_dcs_write_seq(dsi, 0x83, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0xa4);
+ mipi_dsi_dcs_write_seq(dsi, 0x87, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x88, 0x78);
+ mipi_dsi_dcs_write_seq(dsi, 0x89, 0x23);
+ mipi_dsi_dcs_write_seq(dsi, 0x8b, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x8c, 0x7d);
+ mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x7d);
+ mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x7d);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
+ 0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
+ mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
+ 0x7a, 0x01, 0xa9, 0x01, 0xf2, 0x02, 0x2d, 0x02, 0x2e);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x64, 0x02, 0xa3, 0x02, 0xca, 0x03,
+ 0x00, 0x03, 0x1e, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x51, 0x00,
+ 0x71, 0x00, 0x90, 0x00, 0xa7, 0x00, 0xbf, 0x00, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xe2, 0x01, 0x1a, 0x01, 0x43, 0x01,
+ 0x83, 0x01, 0xb2, 0x01, 0xfa, 0x02, 0x34, 0x02, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x6b, 0x02, 0xa8, 0x02, 0xd0, 0x03,
+ 0x03, 0x03, 0x21, 0x03, 0x4d, 0x03, 0x5b, 0x03, 0x6b);
+ mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x7e, 0x03, 0x94, 0x03, 0xac, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x51, 0x00,
+ 0x72, 0x00, 0x92, 0x00, 0xa8, 0x00, 0xbf, 0x00, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xe2, 0x01, 0x18, 0x01, 0x42, 0x01,
+ 0x81, 0x01, 0xaf, 0x01, 0xf5, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x68, 0x02, 0xa6, 0x02, 0xcd, 0x03,
+ 0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
+ 0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
+ mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
+ 0x7a, 0x01, 0xa9, 0x01, 0xf2, 0x02, 0x2d, 0x02, 0x2e);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x64, 0x02, 0xa3, 0x02, 0xca, 0x03,
+ 0x00, 0x03, 0x1e, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x51, 0x00,
+ 0x71, 0x00, 0x90, 0x00, 0xa7, 0x00, 0xbf, 0x00, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xe2, 0x01, 0x1a, 0x01, 0x43, 0x01,
+ 0x83, 0x01, 0xb2, 0x01, 0xfa, 0x02, 0x34, 0x02, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x6b, 0x02, 0xa8, 0x02, 0xd0, 0x03,
+ 0x03, 0x03, 0x21, 0x03, 0x4d, 0x03, 0x5b, 0x03, 0x6b);
+ mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x7e, 0x03, 0x94, 0x03, 0xac, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x51, 0x00,
+ 0x72, 0x00, 0x92, 0x00, 0xa8, 0x00, 0xbf, 0x00, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xe2, 0x01, 0x18, 0x01, 0x42, 0x01,
+ 0x81, 0x01, 0xaf, 0x01, 0xf5, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x68, 0x02, 0xa6, 0x02, 0xcd, 0x03,
+ 0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2c);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0x62, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0x6b, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x6c, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x53, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x54, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x55, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x56, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x58, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x59, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00);
+
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x51, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, 0x53, 0x24);
+ mipi_dsi_dcs_write_seq(dsi, 0x55, 0x01);
+
+ return 0;
+}
+
+static int nt36672e_power_on(struct nt36672e_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+ ret = regulator_set_load(ctx->supplies[i].consumer,
+ regulator_enable_loads[i]);
+ if (ret) {
+ dev_err(&dsi->dev, "regulator set load failed for supply %s: %d\n",
+ ctx->supplies[i].supply, ret);
+ return ret;
+ }
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "regulator bulk enable failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Reset sequence of nt36672e panel requires the panel to be out of reset
+ * for 10ms, followed by being held in reset for 10ms and then out again.
+ */
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static int nt36672e_power_off(struct nt36672e_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret = 0;
+ int i;
+
+ gpiod_set_value(ctx->reset_gpio, 0);
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+ ret = regulator_set_load(ctx->supplies[i].consumer,
+ regulator_disable_loads[i]);
+ if (ret) {
+ dev_err(&dsi->dev, "regulator set load failed for supply %s: %d\n",
+ ctx->supplies[i].supply, ret);
+ return ret;
+ }
+ }
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ dev_err(&dsi->dev, "regulator bulk disable failed: %d\n", ret);
+
+ return ret;
+}
+
+static int nt36672e_on(struct nt36672e_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ const struct panel_desc *desc = ctx->desc;
+ int ret = 0;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ if (desc->init_sequence) {
+ ret = desc->init_sequence(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "panel init sequence failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(100);
+
+ return 0;
+}
+
+static int nt36672e_off(struct nt36672e_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret = 0;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(60);
+
+ return 0;
+}
+
+static int nt36672e_panel_prepare(struct drm_panel *panel)
+{
+ struct nt36672e_panel *ctx = to_nt36672e_panel(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret = 0;
+
+ ret = nt36672e_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = nt36672e_on(ctx);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to initialize panel: %d\n", ret);
+ if (nt36672e_power_off(ctx))
+ dev_err(&dsi->dev, "power off failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nt36672e_panel_unprepare(struct drm_panel *panel)
+{
+ struct nt36672e_panel *ctx = to_nt36672e_panel(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret = 0;
+
+ ret = nt36672e_off(ctx);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to un-initialize panel: %d\n", ret);
+
+ ret = nt36672e_power_off(ctx);
+ if (ret < 0)
+ dev_err(&dsi->dev, "power off failed: %d\n", ret);
+
+ return 0;
+}
+
+static const struct drm_display_mode nt36672e_1080x2408_60hz = {
+ .name = "1080x2408",
+ .clock = 181690,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 76,
+ .hsync_end = 1080 + 76 + 12,
+ .htotal = 1080 + 76 + 12 + 56,
+ .vdisplay = 2408,
+ .vsync_start = 2408 + 46,
+ .vsync_end = 2408 + 46 + 10,
+ .vtotal = 2408 + 46 + 10 + 10,
+ .flags = 0,
+};
+
+static const struct panel_desc nt36672e_panel_desc = {
+ .display_mode = &nt36672e_1080x2408_60hz,
+ .width_mm = 74,
+ .height_mm = 131,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .panel_name = "nt36672e fhd plus panel",
+ .init_sequence = nt36672e_1080x2408_60hz_init,
+};
+
+static int nt36672e_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+{
+ struct nt36672e_panel *ctx = to_nt36672e_panel(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->display_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = ctx->desc->width_mm;
+ connector->display_info.height_mm = ctx->desc->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs nt36672e_drm_funcs = {
+ .prepare = nt36672e_panel_prepare,
+ .unprepare = nt36672e_panel_unprepare,
+ .get_modes = nt36672e_panel_get_modes,
+};
+
+static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct nt36672e_panel *ctx;
+ int i, ret = 0;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->desc = of_device_get_match_data(dev);
+ if (!ctx->desc) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ ctx->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = ctx->desc->lanes;
+ dsi->format = ctx->desc->format;
+ dsi->mode_flags = ctx->desc->mode_flags;
+
+ drm_panel_init(&ctx->panel, dev, &nt36672e_drm_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ ctx->panel.prepare_prev_first = true;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ goto err_dsi_attach;
+ }
+
+ return 0;
+
+err_dsi_attach:
+ drm_panel_remove(&ctx->panel);
+ return ret;
+}
+
+static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id nt36672e_of_match[] = {
+ {
+ .compatible = "novatek,nt36672e",
+ .data = &nt36672e_panel_desc,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nt36672e_of_match);
+
+static struct mipi_dsi_driver nt36672e_panel_driver = {
+ .driver = {
+ .name = "panel-novatek-nt36672e",
+ .of_match_table = nt36672e_of_match,
+ },
+ .probe = nt36672e_panel_probe,
+ .remove = nt36672e_panel_remove,
+};
+module_mipi_dsi_driver(nt36672e_panel_driver);
+
+MODULE_AUTHOR("Ritesh Kumar <quic_riteshk@quicinc.com>");
+MODULE_DESCRIPTION("Novatek NT36672E DSI Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index d493ee735c73..20e3df1c59d4 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1367,6 +1367,23 @@ static const struct drm_display_mode boe_bp101wx1_100_mode = {
.vtotal = 800 + 6 + 8 + 2,
};
+static const struct panel_desc boe_bp082wx1_100 = {
+ .modes = &boe_bp101wx1_100_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 177,
+ .height = 110,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct panel_desc boe_bp101wx1_100 = {
.modes = &boe_bp101wx1_100_mode,
.num_modes = 1,
@@ -1980,6 +1997,33 @@ static const struct panel_desc edt_etml0700y5dha = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing edt_etml1010g3dra_timing = {
+ .pixelclock = { 66300000, 72400000, 78900000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 12, 72, 132 },
+ .hback_porch = { 86, 86, 86 },
+ .hsync_len = { 2, 2, 2 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 1, 15, 49 },
+ .vback_porch = { 21, 21, 21 },
+ .vsync_len = { 2, 2, 2 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc edt_etml1010g3dra = {
+ .timings = &edt_etml1010g3dra_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode edt_etmv570g2dhu_mode = {
.clock = 25175,
.hdisplay = 640,
@@ -2754,21 +2798,21 @@ static const struct panel_desc lemaker_bl035_rgb_002 = {
.bus_flags = DRM_BUS_FLAG_DE_LOW,
};
-static const struct drm_display_mode lg_lb070wv8_mode = {
- .clock = 33246,
- .hdisplay = 800,
- .hsync_start = 800 + 88,
- .hsync_end = 800 + 88 + 80,
- .htotal = 800 + 88 + 80 + 88,
- .vdisplay = 480,
- .vsync_start = 480 + 10,
- .vsync_end = 480 + 10 + 25,
- .vtotal = 480 + 10 + 25 + 10,
+static const struct display_timing lg_lb070wv8_timing = {
+ .pixelclock = { 31950000, 33260000, 34600000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 88, 88, 88 },
+ .hback_porch = { 88, 88, 88 },
+ .hsync_len = { 80, 80, 80 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 10, 10, 10 },
+ .vback_porch = { 10, 10, 10 },
+ .vsync_len = { 25, 25, 25 },
};
static const struct panel_desc lg_lb070wv8 = {
- .modes = &lg_lb070wv8_mode,
- .num_modes = 1,
+ .timings = &lg_lb070wv8_timing,
+ .num_timings = 1,
.bpc = 8,
.size = {
.width = 151,
@@ -3516,14 +3560,15 @@ static const struct display_timing rocktech_rk043fn48h_timing = {
.pixelclock = { 6000000, 9000000, 12000000 },
.hactive = { 480, 480, 480 },
.hback_porch = { 8, 43, 43 },
- .hfront_porch = { 2, 8, 8 },
+ .hfront_porch = { 2, 8, 10 },
.hsync_len = { 1, 1, 1 },
.vactive = { 272, 272, 272 },
- .vback_porch = { 2, 12, 12 },
+ .vback_porch = { 2, 12, 26 },
.vfront_porch = { 1, 4, 4 },
.vsync_len = { 1, 10, 10 },
.flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
};
static const struct panel_desc rocktech_rk043fn48h = {
@@ -4346,6 +4391,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "bananapi,s070wv20-ct16",
.data = &bananapi_s070wv20_ct16,
}, {
+ .compatible = "boe,bp082wx1-100",
+ .data = &boe_bp082wx1_100,
+ }, {
.compatible = "boe,bp101wx1-100",
.data = &boe_bp101wx1_100,
}, {
@@ -4424,6 +4472,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etml0700y5dha",
.data = &edt_etml0700y5dha,
}, {
+ .compatible = "edt,etml1010g3dra",
+ .data = &edt_etml1010g3dra,
+ }, {
.compatible = "edt,etmv570g2dhu",
.data = &edt_etmv570g2dhu,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index b55bafd1a8be..a3e142f156d5 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -62,6 +62,7 @@ struct st7703 {
struct dentry *debugfs;
const struct st7703_panel_desc *desc;
+ enum drm_panel_orientation orientation;
};
struct st7703_panel_desc {
@@ -521,6 +522,96 @@ static const struct st7703_panel_desc rgb30panel_desc = {
.init_sequence = rgb30panel_init_sequence,
};
+static int rgb10max3_panel_init_sequence(struct st7703 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* Init sequence extracted from Powkiddy RGB10MAX3 BSP kernel. */
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETAPID, 0x00, 0x00, 0x00, 0xda,
+ 0x80);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP, 0xc8, 0x02, 0x30);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF, 0x10, 0x10, 0x28,
+ 0x28, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP, 0x04, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM, 0x78, 0x78);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT, 0x25, 0x22, 0xf0,
+ 0x63);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI, 0x33, 0x81, 0x05, 0xf9,
+ 0x0e, 0x0e, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x44, 0x25, 0x00, 0x90, 0x0a, 0x00,
+ 0x00, 0x01, 0x4f, 0x01, 0x00, 0x00, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x47);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR, 0x73, 0x73, 0x50, 0x50,
+ 0x00, 0x00, 0x12, 0x70, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER, 0x25, 0x00, 0x32,
+ 0x32, 0x77, 0xe1, 0xff, 0xff, 0xcc, 0xcc, 0x77,
+ 0x77);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETECO, 0x82, 0x00, 0xbf, 0xff,
+ 0x00, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETIO, 0xb8, 0x00, 0x0a, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCABC, 0x10, 0x40, 0x1e,
+ 0x02);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0b);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA, 0x00, 0x04, 0x07,
+ 0x2a, 0x39, 0x3f, 0x36, 0x31, 0x06, 0x0b, 0x0e,
+ 0x12, 0x14, 0x12, 0x13, 0x0f, 0x17, 0x00, 0x04,
+ 0x07, 0x2a, 0x39, 0x3f, 0x36, 0x31, 0x06, 0x0b,
+ 0x0e, 0x12, 0x14, 0x12, 0x13, 0x0f, 0x17);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ, 0x03, 0x03, 0x03, 0x03,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0xff, 0x80,
+ 0xc0, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1, 0xc8, 0x10, 0x08, 0x00,
+ 0x00, 0x41, 0xf8, 0x12, 0x31, 0x23, 0x37, 0x86,
+ 0x11, 0xc8, 0x37, 0x2a, 0x00, 0x00, 0x0c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00,
+ 0x88, 0x20, 0x46, 0x02, 0x88, 0x88, 0x88, 0x88,
+ 0x88, 0x88, 0xff, 0x88, 0x31, 0x57, 0x13, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2, 0x00, 0x1a, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8f, 0x13, 0x31, 0x75, 0x88, 0x88, 0x88, 0x88,
+ 0x88, 0x88, 0xf8, 0x8f, 0x02, 0x20, 0x64, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0xf8, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_EF, 0xff, 0xff, 0x01);
+
+ return 0;
+}
+
+static const struct drm_display_mode rgb10max3_panel_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 40,
+ .hsync_end = 720 + 40 + 10,
+ .htotal = 720 + 40 + 10 + 40,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 4,
+ .vtotal = 1280 + 16 + 4 + 14,
+ .clock = 63800,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 62,
+ .height_mm = 109,
+};
+
+static const struct st7703_panel_desc rgb10max3_panel_desc = {
+ .mode = &rgb10max3_panel_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = rgb10max3_panel_init_sequence,
+};
+
static int st7703_enable(struct drm_panel *panel)
{
struct st7703 *ctx = panel_to_st7703(panel);
@@ -653,12 +744,20 @@ static int st7703_get_modes(struct drm_panel *panel,
return 1;
}
+static enum drm_panel_orientation st7703_get_orientation(struct drm_panel *panel)
+{
+ struct st7703 *st7703 = panel_to_st7703(panel);
+
+ return st7703->orientation;
+}
+
static const struct drm_panel_funcs st7703_drm_funcs = {
.disable = st7703_disable,
.unprepare = st7703_unprepare,
.prepare = st7703_prepare,
.enable = st7703_enable,
.get_modes = st7703_get_modes,
+ .get_orientation = st7703_get_orientation,
};
static int allpixelson_set(void *data, u64 val)
@@ -727,6 +826,10 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
"Failed to request iovcc regulator\n");
+ ret = of_drm_get_panel_orientation(dsi->dev.of_node, &ctx->orientation);
+ if (ret < 0)
+ return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
+
drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -784,6 +887,7 @@ static void st7703_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id st7703_of_match[] = {
{ .compatible = "anbernic,rg353v-panel-v2", .data = &rg353v2_desc },
+ { .compatible = "powkiddy,rgb10max3-panel", .data = &rgb10max3_panel_desc },
{ .compatible = "powkiddy,rgb30-panel", .data = &rgb30panel_desc },
{ .compatible = "rocktech,jh057n00900", .data = &jh057n00900_panel_desc },
{ .compatible = "xingbangda,xbd599", .data = &xbd599_desc },
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
index fbb73464de33..493f2a6076f8 100644
--- a/drivers/gpu/drm/panel/panel-visionox-r66451.c
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -322,6 +322,7 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ ctx->panel.prepare_prev_first = true;
drm_panel_init(&ctx->panel, dev, &visionox_r66451_funcs, DRM_MODE_CONNECTOR_DSI);
ctx->panel.backlight = visionox_r66451_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
index a23407b9f6fb..540099253e1b 100644
--- a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -287,6 +287,7 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ ctx->panel.prepare_prev_first = true;
drm_panel_init(&ctx->panel, dev, &visionox_vtdr6130_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index ad24cdf1d992..20fe1d2c0aaf 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -9,7 +9,6 @@ config DRM_PL111
select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the PL111 CLCD controller.
If M is selected the module will be called pl111_drm.
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 06a58dad5f5c..1e46b0a6e478 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -66,7 +66,6 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
pflag |= TTM_PL_FLAG_TOPDOWN;
qbo->placement.placement = qbo->placements;
- qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM) {
qbo->placements[c].mem_type = TTM_PL_VRAM;
qbo->placements[c++].flags = pflag;
@@ -86,7 +85,6 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
qbo->placements[c++].flags = 0;
}
qbo->placement.num_placement = c;
- qbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
qbo->placements[i].fpfn = 0;
qbo->placements[i].lpfn = 0;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 1a82629bce3f..765a144cea14 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -60,9 +60,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
if (!qxl_ttm_bo_is_qxl_bo(bo)) {
placement->placement = &placements;
- placement->busy_placement = &placements;
placement->num_placement = 1;
- placement->num_busy_placement = 1;
return;
}
qbo = to_qxl_bo(bo);
diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h
index e8fae5c77514..2bfd6d0ff050 100644
--- a/drivers/gpu/drm/radeon/atom-bits.h
+++ b/drivers/gpu/drm/radeon/atom-bits.h
@@ -33,7 +33,7 @@ static inline uint8_t get_u8(void *bios, int ptr)
#define CU8(ptr) get_u8(ctx->bios, (ptr))
static inline uint16_t get_u16(void *bios, int ptr)
{
- return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+ return get_u8(bios, ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
}
#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
#define CU16(ptr) get_u16(ctx->bios, (ptr))
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ceb6d772ef94..5bc3e6b41c34 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -60,6 +60,7 @@
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
+ int ps_size, ws_size;
int ps_shift;
uint16_t start;
unsigned last_jump;
@@ -68,8 +69,8 @@ typedef struct {
} atom_exec_context;
int atom_debug = 0;
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
static uint32_t atom_arg_mask[8] = {
0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
@@ -221,7 +222,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr)++;
/* get_unaligned_le32 avoids unaligned accesses from atombios
* tables, noticed on a DEC Alpha. */
- val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ if (idx < ctx->ps_size)
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ else
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -259,7 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
val = gctx->reg_block;
break;
default:
- val = ctx->ws[idx];
+ if (idx < ctx->ws_size)
+ val = ctx->ws[idx];
+ else
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
}
break;
case ATOM_ARG_ID:
@@ -494,6 +501,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
idx = U8(*ptr);
(*ptr)++;
DEBUG("PS[0x%02X]", idx);
+ if (idx >= ctx->ps_size) {
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
+ return;
+ }
ctx->ps[idx] = cpu_to_le32(val);
break;
case ATOM_ARG_WS:
@@ -526,6 +537,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
gctx->reg_block = val;
break;
default:
+ if (idx >= ctx->ws_size) {
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
+ return;
+ }
ctx->ws[idx] = val;
}
break;
@@ -623,7 +638,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
if (r) {
ctx->abort = true;
}
@@ -1152,7 +1167,7 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1174,12 +1189,16 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
+ ectx.ps_size = params_size;
ectx.abort = false;
ectx.last_jump = 0;
- if (ws)
+ if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
- else
+ ectx.ws_size = ws;
+ } else {
ectx.ws = NULL;
+ ectx.ws_size = 0;
+ }
debug_depth++;
while (1) {
@@ -1212,7 +1231,7 @@ free:
return ret;
}
-int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params)
+int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int r;
@@ -1228,16 +1247,16 @@ int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uin
/* reset divmul */
ctx->divmul[0] = 0;
ctx->divmul[1] = 0;
- r = atom_execute_table_locked(ctx, index, params);
+ r = atom_execute_table_locked(ctx, index, params, params_size);
mutex_unlock(&ctx->mutex);
return r;
}
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int r;
mutex_lock(&ctx->scratch_mutex);
- r = atom_execute_table_scratch_unlocked(ctx, index, params);
+ r = atom_execute_table_scratch_unlocked(ctx, index, params, params_size);
mutex_unlock(&ctx->scratch_mutex);
return r;
}
@@ -1335,7 +1354,7 @@ int atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
- ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
if (ret)
return ret;
@@ -1343,7 +1362,7 @@ int atom_asic_init(struct atom_context *ctx)
if (rdev->family < CHIP_R600) {
if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
- atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+ atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps, 16);
}
return ret;
}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 5de0563b63d2..5bf06c0bd6ff 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -145,8 +145,8 @@ struct atom_context {
extern int atom_debug;
struct atom_context *atom_parse(struct card_info *, void *);
-int atom_execute_table(struct atom_context *, int, uint32_t *);
-int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *);
+int atom_execute_table(struct atom_context *, int, uint32_t *, int);
+int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *, int);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ade13173921b..9b3a3a9d60e2 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -77,7 +77,7 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_scaler_setup(struct drm_crtc *crtc)
@@ -157,7 +157,7 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
break;
}
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
if ((is_tv || is_cv)
&& rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) {
atom_rv515_force_tv_scaler(rdev, radeon_crtc);
@@ -178,7 +178,7 @@ static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = lock;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
@@ -194,7 +194,7 @@ static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = state;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
@@ -210,7 +210,7 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = state;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static const u32 vga_control_regs[6] =
@@ -242,7 +242,7 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
args.ucCRTC = radeon_crtc->crtc_id;
args.ucBlanking = state;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
if (ASIC_IS_DCE8(rdev))
WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
@@ -261,7 +261,7 @@ static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
args.ucDispPipeId = radeon_crtc->crtc_id;
args.ucEnable = state;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -343,7 +343,7 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_crtc_set_timing(struct drm_crtc *crtc,
@@ -389,7 +389,7 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
@@ -546,7 +546,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
args.lvds_ss.ucEnable = enable;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union adjust_pixel_clock {
@@ -692,7 +692,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
atom_execute_table(rdev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
case 3:
@@ -725,7 +725,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.ucExtTransmitterID = 0;
atom_execute_table(rdev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
@@ -809,7 +809,7 @@ static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_crtc_program_pll(struct drm_crtc *crtc,
@@ -949,7 +949,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 009333645438..fca8b08535a5 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -112,7 +112,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd;
- atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*ack = args.v1.ucReplyStatus;
@@ -354,7 +354,7 @@ static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
args.ucLaneNum = lane_num;
args.ucStatus = 0;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return args.ucStatus;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 6e537c5bd295..2bff0d9e20f5 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -119,12 +119,12 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
if (dig->backlight_level == 0) {
args.ucAction = ATOM_LCD_BLOFF;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
} else {
args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -389,7 +389,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
}
args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -445,7 +445,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -546,7 +546,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union lvds_encoder_control {
@@ -664,7 +664,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int
@@ -979,7 +979,7 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -1361,7 +1361,7 @@ atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void
@@ -1397,7 +1397,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
args.v1.ucAction = action;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
@@ -1519,7 +1519,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void
@@ -1554,7 +1554,7 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
args.ucEnable = ATOM_ENABLE;
args.ucCRTC = radeon_crtc->crtc_id;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
WREG32(reg, temp);
}
@@ -1618,10 +1618,10 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
WREG32(RADEON_BIOS_3_SCRATCH, reg);
} else
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (rdev->mode_info.bl_encoder) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -1629,7 +1629,7 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
} else {
args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
}
break;
@@ -1637,10 +1637,10 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
args.ucAction = ATOM_DISABLE;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLOFF;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
break;
}
@@ -1983,7 +1983,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* update scratch regs with new routing */
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
@@ -2311,7 +2311,7 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return true;
} else
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index ab4d21072191..730f0b25312b 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -78,7 +78,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
- atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 4e64ed38c439..70931b04bbac 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -53,8 +53,7 @@
extern int ni_mc_load_microcode(struct radeon_device *rdev);
//********* BARTS **************//
-static const u32 barts_cgcg_cgls_default[] =
-{
+static const u32 barts_cgcg_cgls_default[] = {
/* Register, Value, Mask bits */
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
@@ -107,8 +106,7 @@ static const u32 barts_cgcg_cgls_default[] =
};
#define BARTS_CGCG_CGLS_DEFAULT_LENGTH sizeof(barts_cgcg_cgls_default) / (3 * sizeof(u32))
-static const u32 barts_cgcg_cgls_disable[] =
-{
+static const u32 barts_cgcg_cgls_disable[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -162,8 +160,7 @@ static const u32 barts_cgcg_cgls_disable[] =
};
#define BARTS_CGCG_CGLS_DISABLE_LENGTH sizeof(barts_cgcg_cgls_disable) / (3 * sizeof(u32))
-static const u32 barts_cgcg_cgls_enable[] =
-{
+static const u32 barts_cgcg_cgls_enable[] = {
/* 0x0000c124, 0x84180000, 0x00180000, */
0x00000644, 0x000f7892, 0x001f4080,
0x000008f8, 0x00000010, 0xffffffff,
@@ -217,8 +214,7 @@ static const u32 barts_cgcg_cgls_enable[] =
};
#define BARTS_CGCG_CGLS_ENABLE_LENGTH sizeof(barts_cgcg_cgls_enable) / (3 * sizeof(u32))
-static const u32 barts_mgcg_default[] =
-{
+static const u32 barts_mgcg_default[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x00005448, 0x00000100, 0xffffffff,
0x000055e4, 0x00600100, 0xffffffff,
@@ -366,8 +362,7 @@ static const u32 barts_mgcg_default[] =
};
#define BARTS_MGCG_DEFAULT_LENGTH sizeof(barts_mgcg_default) / (3 * sizeof(u32))
-static const u32 barts_mgcg_disable[] =
-{
+static const u32 barts_mgcg_disable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
@@ -381,8 +376,7 @@ static const u32 barts_mgcg_disable[] =
};
#define BARTS_MGCG_DISABLE_LENGTH sizeof(barts_mgcg_disable) / (3 * sizeof(u32))
-static const u32 barts_mgcg_enable[] =
-{
+static const u32 barts_mgcg_enable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
@@ -397,8 +391,7 @@ static const u32 barts_mgcg_enable[] =
#define BARTS_MGCG_ENABLE_LENGTH sizeof(barts_mgcg_enable) / (3 * sizeof(u32))
//********* CAICOS **************//
-static const u32 caicos_cgcg_cgls_default[] =
-{
+static const u32 caicos_cgcg_cgls_default[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -450,8 +443,7 @@ static const u32 caicos_cgcg_cgls_default[] =
};
#define CAICOS_CGCG_CGLS_DEFAULT_LENGTH sizeof(caicos_cgcg_cgls_default) / (3 * sizeof(u32))
-static const u32 caicos_cgcg_cgls_disable[] =
-{
+static const u32 caicos_cgcg_cgls_disable[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -505,8 +497,7 @@ static const u32 caicos_cgcg_cgls_disable[] =
};
#define CAICOS_CGCG_CGLS_DISABLE_LENGTH sizeof(caicos_cgcg_cgls_disable) / (3 * sizeof(u32))
-static const u32 caicos_cgcg_cgls_enable[] =
-{
+static const u32 caicos_cgcg_cgls_enable[] = {
/* 0x0000c124, 0x84180000, 0x00180000, */
0x00000644, 0x000f7892, 0x001f4080,
0x000008f8, 0x00000010, 0xffffffff,
@@ -560,8 +551,7 @@ static const u32 caicos_cgcg_cgls_enable[] =
};
#define CAICOS_CGCG_CGLS_ENABLE_LENGTH sizeof(caicos_cgcg_cgls_enable) / (3 * sizeof(u32))
-static const u32 caicos_mgcg_default[] =
-{
+static const u32 caicos_mgcg_default[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x00005448, 0x00000100, 0xffffffff,
0x000055e4, 0x00600100, 0xffffffff,
@@ -640,8 +630,7 @@ static const u32 caicos_mgcg_default[] =
};
#define CAICOS_MGCG_DEFAULT_LENGTH sizeof(caicos_mgcg_default) / (3 * sizeof(u32))
-static const u32 caicos_mgcg_disable[] =
-{
+static const u32 caicos_mgcg_disable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
@@ -655,8 +644,7 @@ static const u32 caicos_mgcg_disable[] =
};
#define CAICOS_MGCG_DISABLE_LENGTH sizeof(caicos_mgcg_disable) / (3 * sizeof(u32))
-static const u32 caicos_mgcg_enable[] =
-{
+static const u32 caicos_mgcg_enable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
@@ -671,8 +659,7 @@ static const u32 caicos_mgcg_enable[] =
#define CAICOS_MGCG_ENABLE_LENGTH sizeof(caicos_mgcg_enable) / (3 * sizeof(u32))
//********* TURKS **************//
-static const u32 turks_cgcg_cgls_default[] =
-{
+static const u32 turks_cgcg_cgls_default[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -724,8 +711,7 @@ static const u32 turks_cgcg_cgls_default[] =
};
#define TURKS_CGCG_CGLS_DEFAULT_LENGTH sizeof(turks_cgcg_cgls_default) / (3 * sizeof(u32))
-static const u32 turks_cgcg_cgls_disable[] =
-{
+static const u32 turks_cgcg_cgls_disable[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -779,8 +765,7 @@ static const u32 turks_cgcg_cgls_disable[] =
};
#define TURKS_CGCG_CGLS_DISABLE_LENGTH sizeof(turks_cgcg_cgls_disable) / (3 * sizeof(u32))
-static const u32 turks_cgcg_cgls_enable[] =
-{
+static const u32 turks_cgcg_cgls_enable[] = {
/* 0x0000c124, 0x84180000, 0x00180000, */
0x00000644, 0x000f7892, 0x001f4080,
0x000008f8, 0x00000010, 0xffffffff,
@@ -835,8 +820,7 @@ static const u32 turks_cgcg_cgls_enable[] =
#define TURKS_CGCG_CGLS_ENABLE_LENGTH sizeof(turks_cgcg_cgls_enable) / (3 * sizeof(u32))
// These are the sequences for turks_mgcg_shls
-static const u32 turks_mgcg_default[] =
-{
+static const u32 turks_mgcg_default[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x00005448, 0x00000100, 0xffffffff,
0x000055e4, 0x00600100, 0xffffffff,
@@ -935,8 +919,7 @@ static const u32 turks_mgcg_default[] =
};
#define TURKS_MGCG_DEFAULT_LENGTH sizeof(turks_mgcg_default) / (3 * sizeof(u32))
-static const u32 turks_mgcg_disable[] =
-{
+static const u32 turks_mgcg_disable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
@@ -950,8 +933,7 @@ static const u32 turks_mgcg_disable[] =
};
#define TURKS_MGCG_DISABLE_LENGTH sizeof(turks_mgcg_disable) / (3 * sizeof(u32))
-static const u32 turks_mgcg_enable[] =
-{
+static const u32 turks_mgcg_enable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
@@ -972,8 +954,7 @@ static const u32 turks_mgcg_enable[] =
//********* BARTS **************//
-static const u32 barts_sysls_default[] =
-{
+static const u32 barts_sysls_default[] = {
/* Register, Value, Mask bits */
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
@@ -993,8 +974,7 @@ static const u32 barts_sysls_default[] =
};
#define BARTS_SYSLS_DEFAULT_LENGTH sizeof(barts_sysls_default) / (3 * sizeof(u32))
-static const u32 barts_sysls_disable[] =
-{
+static const u32 barts_sysls_disable[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x00041401, 0xffffffff,
@@ -1013,8 +993,7 @@ static const u32 barts_sysls_disable[] =
};
#define BARTS_SYSLS_DISABLE_LENGTH sizeof(barts_sysls_disable) / (3 * sizeof(u32))
-static const u32 barts_sysls_enable[] =
-{
+static const u32 barts_sysls_enable[] = {
0x000055e8, 0x00000001, 0xffffffff,
0x0000d0bc, 0x00000100, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1034,8 +1013,7 @@ static const u32 barts_sysls_enable[] =
#define BARTS_SYSLS_ENABLE_LENGTH sizeof(barts_sysls_enable) / (3 * sizeof(u32))
//********* CAICOS **************//
-static const u32 caicos_sysls_default[] =
-{
+static const u32 caicos_sysls_default[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1053,8 +1031,7 @@ static const u32 caicos_sysls_default[] =
};
#define CAICOS_SYSLS_DEFAULT_LENGTH sizeof(caicos_sysls_default) / (3 * sizeof(u32))
-static const u32 caicos_sysls_disable[] =
-{
+static const u32 caicos_sysls_disable[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x00041401, 0xffffffff,
@@ -1072,8 +1049,7 @@ static const u32 caicos_sysls_disable[] =
};
#define CAICOS_SYSLS_DISABLE_LENGTH sizeof(caicos_sysls_disable) / (3 * sizeof(u32))
-static const u32 caicos_sysls_enable[] =
-{
+static const u32 caicos_sysls_enable[] = {
0x000055e8, 0x00000001, 0xffffffff,
0x0000d0bc, 0x00000100, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1092,8 +1068,7 @@ static const u32 caicos_sysls_enable[] =
#define CAICOS_SYSLS_ENABLE_LENGTH sizeof(caicos_sysls_enable) / (3 * sizeof(u32))
//********* TURKS **************//
-static const u32 turks_sysls_default[] =
-{
+static const u32 turks_sysls_default[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1112,8 +1087,7 @@ static const u32 turks_sysls_default[] =
};
#define TURKS_SYSLS_DEFAULT_LENGTH sizeof(turks_sysls_default) / (3 * sizeof(u32))
-static const u32 turks_sysls_disable[] =
-{
+static const u32 turks_sysls_disable[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x00041401, 0xffffffff,
@@ -1132,8 +1106,7 @@ static const u32 turks_sysls_disable[] =
};
#define TURKS_SYSLS_DISABLE_LENGTH sizeof(turks_sysls_disable) / (3 * sizeof(u32))
-static const u32 turks_sysls_enable[] =
-{
+static const u32 turks_sysls_enable[] = {
0x000055e8, 0x00000001, 0xffffffff,
0x0000d0bc, 0x00000100, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1154,8 +1127,7 @@ static const u32 turks_sysls_enable[] =
#endif
-u32 btc_valid_sclk[40] =
-{
+u32 btc_valid_sclk[40] = {
5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000,
55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 95000, 100000,
105000, 110000, 11500, 120000, 125000, 130000, 135000, 140000, 145000, 150000,
@@ -1194,7 +1166,7 @@ void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_t
if ((table == NULL) || (table->count == 0))
return;
- for (i= 0; i < table->count; i++) {
+ for (i = 0; i < table->count; i++) {
if (clock <= table->entries[i].clk) {
if (*voltage < table->entries[i].v)
*voltage = (u16)((table->entries[i].v < max_voltage) ?
@@ -1441,7 +1413,7 @@ void btc_program_mgcg_hw_sequence(struct radeon_device *rdev,
u32 i, length = count * 3;
u32 tmp;
- for (i = 0; i < length; i+=3) {
+ for (i = 0; i < length; i += 3) {
tmp = RREG32(sequence[i]);
tmp &= ~sequence[i+2];
tmp |= sequence[i+1] & sequence[i+2];
@@ -2003,7 +1975,7 @@ static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
for (i = 0; i < table->num_entries; i++) {
eg_table->mc_reg_table_entry[i].mclk_max =
table->mc_reg_table_entry[i].mclk_max;
- for(j = 0; j < table->last; j++)
+ for (j = 0; j < table->last; j++)
eg_table->mc_reg_table_entry[i].mc_data[j] =
table->mc_reg_table_entry[i].mc_data[j];
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index b8f4dac68d85..abe9d65cc460 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -46,36 +46,31 @@
#define VOLTAGE_VID_OFFSET_SCALE1 625
#define VOLTAGE_VID_OFFSET_SCALE2 100
-static const struct ci_pt_defaults defaults_hawaii_xt =
-{
+static const struct ci_pt_defaults defaults_hawaii_xt = {
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
{ 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
};
-static const struct ci_pt_defaults defaults_hawaii_pro =
-{
+static const struct ci_pt_defaults defaults_hawaii_pro = {
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
{ 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
};
-static const struct ci_pt_defaults defaults_bonaire_xt =
-{
+static const struct ci_pt_defaults defaults_bonaire_xt = {
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
-static const struct ci_pt_defaults defaults_saturn_xt =
-{
+static const struct ci_pt_defaults defaults_saturn_xt = {
1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
{ 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
-static const struct ci_pt_config_reg didt_config_ci[] =
-{
+static const struct ci_pt_config_reg didt_config_ci[] = {
{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
@@ -1216,7 +1211,7 @@ static void ci_thermal_initialize(struct radeon_device *rdev)
if (rdev->pm.fan_pulses_per_revolution) {
tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
- tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+ tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution - 1);
WREG32_SMC(CG_TACH_CTRL, tmp);
}
@@ -3333,7 +3328,7 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
}
static void ci_reset_single_dpm_table(struct radeon_device *rdev,
- struct ci_single_dpm_table* dpm_table,
+ struct ci_single_dpm_table *dpm_table,
u32 count)
{
u32 i;
@@ -3343,7 +3338,7 @@ static void ci_reset_single_dpm_table(struct radeon_device *rdev,
dpm_table->dpm_levels[i].enabled = false;
}
-static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
+static void ci_setup_pcie_table_entry(struct ci_single_dpm_table *dpm_table,
u32 index, u32 pcie_gen, u32 pcie_lanes)
{
dpm_table->dpm_levels[index].value = pcie_gen;
@@ -3503,7 +3498,7 @@ static int ci_find_boot_level(struct ci_single_dpm_table *table,
u32 i;
int ret = -EINVAL;
- for(i = 0; i < table->count; i++) {
+ for (i = 0; i < table->count; i++) {
if (value == table->dpm_levels[i].value) {
*boot_level = i;
ret = 0;
@@ -4304,7 +4299,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev,
for (i = 0, j = table->last; i < table->last; i++) {
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
- switch(table->mc_reg_address[i].s1 << 2) {
+ switch (table->mc_reg_address[i].s1 << 2) {
case MC_SEQ_MISC1:
temp_reg = RREG32(MC_PMG_CMD_EMRS);
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
@@ -4369,7 +4364,7 @@ static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
{
bool result = true;
- switch(in_reg) {
+ switch (in_reg) {
case MC_SEQ_RAS_TIMING >> 2:
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
break;
@@ -4508,7 +4503,7 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev,
for (i = 0; i < table->last; i++) {
if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
- switch(table->mc_reg_address[i].s1 >> 2) {
+ switch (table->mc_reg_address[i].s1 >> 2) {
case MC_SEQ_MISC1:
for (k = 0; k < table->num_entries; k++) {
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
@@ -4683,7 +4678,7 @@ static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
struct ci_power_info *pi = ci_get_pi(rdev);
u32 i = 0;
- for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
+ for (i = 0; i < pi->mc_reg_table.num_entries; i++) {
if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
break;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
index ac12db5f2cf7..74b95c200222 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.h
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -87,8 +87,7 @@ struct ci_mc_reg_table {
SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
-struct ci_ulv_parm
-{
+struct ci_ulv_parm {
bool supported;
u32 cg_ulv_parameter;
u32 volt_change_delay;
@@ -113,8 +112,7 @@ struct ci_dpm_level_enable_mask {
u32 pcie_dpm_enable_mask;
};
-struct ci_vbios_boot_state
-{
+struct ci_vbios_boot_state {
u16 mvdd_bootup_value;
u16 vddc_bootup_value;
u16 vddci_bootup_value;
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index 4774e04c4da6..7693fb6624a3 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -23,8 +23,7 @@
#include "clearstate_defs.h"
-static const u32 SECT_CONTEXT_def_1[] =
-{
+static const u32 SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
@@ -514,8 +513,7 @@ static const u32 SECT_CONTEXT_def_1[] =
0x00000000, // CB_BLEND6_CONTROL
0x00000000, // CB_BLEND7_CONTROL
};
-static const u32 SECT_CONTEXT_def_2[] =
-{
+static const u32 SECT_CONTEXT_def_2[] = {
0x00000000, // PA_CL_POINT_X_RAD
0x00000000, // PA_CL_POINT_Y_RAD
0x00000000, // PA_CL_POINT_SIZE
@@ -523,8 +521,7 @@ static const u32 SECT_CONTEXT_def_2[] =
0x00000000, // VGT_DMA_BASE_HI
0x00000000, // VGT_DMA_BASE
};
-static const u32 SECT_CONTEXT_def_3[] =
-{
+static const u32 SECT_CONTEXT_def_3[] = {
0x00000000, // DB_DEPTH_CONTROL
0x00000000, // DB_EQAA
0x00000000, // CB_COLOR_CONTROL
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
index c1b6c22dbed7..0045d42aa27c 100644
--- a/drivers/gpu/drm/radeon/clearstate_ci.h
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -23,8 +23,7 @@
#include "clearstate_defs.h"
-static const unsigned int ci_SECT_CONTEXT_def_1[] =
-{
+static const unsigned int ci_SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a424b86008b8..c634dc28e6c3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2514,8 +2514,7 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
-static const unsigned ni_dig_offsets[] =
-{
+static const unsigned ni_dig_offsets[] = {
NI_DIG0_REGISTER_OFFSET,
NI_DIG1_REGISTER_OFFSET,
NI_DIG2_REGISTER_OFFSET,
@@ -2524,8 +2523,7 @@ static const unsigned ni_dig_offsets[] =
NI_DIG5_REGISTER_OFFSET
};
-static const unsigned ni_tx_offsets[] =
-{
+static const unsigned ni_tx_offsets[] = {
NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
@@ -2534,8 +2532,7 @@ static const unsigned ni_tx_offsets[] =
NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
};
-static const unsigned evergreen_dp_offsets[] =
-{
+static const unsigned evergreen_dp_offsets[] = {
EVERGREEN_DP0_REGISTER_OFFSET,
EVERGREEN_DP1_REGISTER_OFFSET,
EVERGREEN_DP2_REGISTER_OFFSET,
@@ -2544,8 +2541,7 @@ static const unsigned evergreen_dp_offsets[] =
EVERGREEN_DP5_REGISTER_OFFSET
};
-static const unsigned evergreen_disp_int_status[] =
-{
+static const unsigned evergreen_disp_int_status[] = {
DISP_INTERRUPT_STATUS,
DISP_INTERRUPT_STATUS_CONTINUE,
DISP_INTERRUPT_STATUS_CONTINUE2,
@@ -2643,7 +2639,7 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev,
return;
}
- stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+ stream_ctrl &= ~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
evergreen_dp_offsets[dig_fe], stream_ctrl);
@@ -2655,7 +2651,7 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev,
stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
evergreen_dp_offsets[dig_fe]);
}
- if (counter >= 32 )
+ if (counter >= 32)
DRM_ERROR("counter exceeds %d\n", counter);
fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
@@ -2716,7 +2712,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
/*for now we do it this manually*/
/**/
if (ASIC_IS_DCE5(rdev) &&
- evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+ evergreen_is_dp_sst_stream_enabled(rdev, i, &dig_fe))
evergreen_blank_dp_output(rdev, dig_fe);
/*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
@@ -3597,7 +3593,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
- sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 12 / 32);
sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 0de79f3a7e3f..1fe6e0d883c7 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -33,8 +33,8 @@
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
-#define MAX(a,b) (((a)>(b))?(a):(b))
-#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index b436badf9efa..3ff9fda54aa3 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -265,8 +265,8 @@
#define NI_DIG_BE_CNTL 0x7140
-# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
-# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
+# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8) & 0x3F)
+# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7)
#define NI_DIG_BE_EN_CNTL 0x7144
# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
@@ -284,7 +284,7 @@
#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
-# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
+# define EVERGREEN_DP_VID_STREAM_STATUS (1 << 16)
#define EVERGREEN_DP_STEER_FIFO 0x7310
# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
@@ -302,8 +302,8 @@
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
/*DCIO_UNIPHY block*/
-#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
-#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 - 0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 - 0x6600)
#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
index 3a03ba37d043..b34d54b567b7 100644
--- a/drivers/gpu/drm/radeon/evergreen_smc.h
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -29,8 +29,7 @@
#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
-struct SMC_Evergreen_MCRegisterAddress
-{
+struct SMC_Evergreen_MCRegisterAddress {
uint16_t s0;
uint16_t s1;
};
@@ -38,15 +37,13 @@ struct SMC_Evergreen_MCRegisterAddress
typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
-struct SMC_Evergreen_MCRegisterSet
-{
+struct SMC_Evergreen_MCRegisterSet {
uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
};
typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
-struct SMC_Evergreen_MCRegisters
-{
+struct SMC_Evergreen_MCRegisters {
uint8_t last;
uint8_t reserved[3];
SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index f7735da07feb..55dbf450bd9c 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -64,8 +64,7 @@ extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
extern void cik_update_cg(struct radeon_device *rdev,
u32 block, bool enable);
-static const struct kv_pt_config_reg didt_config_kv[] =
-{
+static const struct kv_pt_config_reg didt_config_kv[] = {
{ 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
@@ -931,9 +930,9 @@ static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
pi->graphics_level[i].ClkBypassCntl = 2;
else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
pi->graphics_level[i].ClkBypassCntl = 7;
- else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
+ else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200)
pi->graphics_level[i].ClkBypassCntl = 6;
- else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
+ else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200)
pi->graphics_level[i].ClkBypassCntl = 8;
else
pi->graphics_level[i].ClkBypassCntl = 0;
@@ -1577,7 +1576,7 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
if ((new_ps->levels[0].sclk -
table->entries[pi->highest_valid].sclk_frequency) >
(table->entries[pi->lowest_valid].sclk_frequency -
- new_ps->levels[new_ps->num_levels -1].sclk))
+ new_ps->levels[new_ps->num_levels - 1].sclk))
pi->highest_valid = pi->lowest_valid;
else
pi->lowest_valid = pi->highest_valid;
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index c0a59527e7b8..65831cca6730 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -189,7 +189,7 @@ int kv_copy_bytes_to_smc(struct radeon_device *rdev,
if (ret)
return ret;
- original_data= RREG32(SMC_IND_DATA_0);
+ original_data = RREG32(SMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 927e5f42e97d..77aee99e473a 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -66,8 +66,7 @@ void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
}
-static const u32 tn_rlc_save_restore_register_list[] =
-{
+static const u32 tn_rlc_save_restore_register_list[] = {
0x98fc,
0x98f0,
0x9834,
@@ -216,8 +215,7 @@ MODULE_FIRMWARE("radeon/ARUBA_me.bin");
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
-static const u32 cayman_golden_registers2[] =
-{
+static const u32 cayman_golden_registers2[] = {
0x3e5c, 0xffffffff, 0x00000000,
0x3e48, 0xffffffff, 0x00000000,
0x3e4c, 0xffffffff, 0x00000000,
@@ -226,8 +224,7 @@ static const u32 cayman_golden_registers2[] =
0x3e60, 0xffffffff, 0x00000000
};
-static const u32 cayman_golden_registers[] =
-{
+static const u32 cayman_golden_registers[] = {
0x5eb4, 0xffffffff, 0x00000002,
0x5e78, 0x8f311ff1, 0x001000f0,
0x3f90, 0xffff0000, 0xff000000,
@@ -267,16 +264,14 @@ static const u32 cayman_golden_registers[] =
0x8974, 0xffffffff, 0x00000000
};
-static const u32 dvst_golden_registers2[] =
-{
+static const u32 dvst_golden_registers2[] = {
0x8f8, 0xffffffff, 0,
0x8fc, 0x00380000, 0,
0x8f8, 0xffffffff, 1,
0x8fc, 0x0e000000, 0
};
-static const u32 dvst_golden_registers[] =
-{
+static const u32 dvst_golden_registers[] = {
0x690, 0x3fff3fff, 0x20c00033,
0x918c, 0x0fff0fff, 0x00010006,
0x91a8, 0x0fff0fff, 0x00010006,
@@ -333,8 +328,7 @@ static const u32 dvst_golden_registers[] =
0x8974, 0xffffffff, 0x00000000
};
-static const u32 scrapper_golden_registers[] =
-{
+static const u32 scrapper_golden_registers[] = {
0x690, 0x3fff3fff, 0x20c00033,
0x918c, 0x0fff0fff, 0x00010006,
0x918c, 0x0fff0fff, 0x00010006,
@@ -624,7 +618,7 @@ static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
int ni_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
- u32 mem_type, running, blackout = 0;
+ u32 mem_type, running;
u32 *io_mc_regs;
int i, ucode_size, regs_size;
@@ -659,11 +653,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
- if (running) {
- blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
- WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
- }
-
/* reset the engine and set to writable */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -689,9 +678,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
break;
udelay(1);
}
-
- if (running)
- WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -754,7 +740,8 @@ int ni_init_microcode(struct radeon_device *rdev)
rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
mc_req_size = 0;
break;
- default: BUG();
+ default:
+ BUG();
}
DRM_INFO("Loading %s Microcode\n", chip_name);
@@ -813,7 +800,7 @@ int ni_init_microcode(struct radeon_device *rdev)
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
+ rdev->smc_fw->size, fw_name);
err = -EINVAL;
}
}
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 3e1c1a392fb7..e08559c44a5c 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3103,9 +3103,6 @@ static int ni_init_simplified_leakage_table(struct radeon_device *rdev,
u32 smc_leakage, max_leakage = 0;
u32 scaling_factor;
- if (!leakage_table)
- return -EINVAL;
-
table_size = leakage_table->count;
if (eg_pi->vddc_voltage_table.count != table_size)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.h b/drivers/gpu/drm/radeon/ni_dpm.h
index 74e301936906..4e3e7303e035 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.h
+++ b/drivers/gpu/drm/radeon/ni_dpm.h
@@ -59,8 +59,7 @@ struct ni_mc_reg_table {
#define NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 2
-enum ni_dc_cac_level
-{
+enum ni_dc_cac_level {
NISLANDS_DCCAC_LEVEL_0 = 0,
NISLANDS_DCCAC_LEVEL_1,
NISLANDS_DCCAC_LEVEL_2,
@@ -72,8 +71,7 @@ enum ni_dc_cac_level
NISLANDS_DCCAC_MAX_LEVELS
};
-struct ni_leakage_coeffients
-{
+struct ni_leakage_coeffients {
u32 at;
u32 bt;
u32 av;
@@ -83,8 +81,7 @@ struct ni_leakage_coeffients
u32 t_ref;
};
-struct ni_cac_data
-{
+struct ni_cac_data {
struct ni_leakage_coeffients leakage_coefficients;
u32 i_leakage;
s32 leakage_minimum_temperature;
@@ -100,8 +97,7 @@ struct ni_cac_data
u8 lts_truncate_n;
};
-struct ni_cac_weights
-{
+struct ni_cac_weights {
u32 weight_tcp_sig0;
u32 weight_tcp_sig1;
u32 weight_ta_sig;
diff --git a/drivers/gpu/drm/radeon/nislands_smc.h b/drivers/gpu/drm/radeon/nislands_smc.h
index 42f3bab0f9ee..097893c38915 100644
--- a/drivers/gpu/drm/radeon/nislands_smc.h
+++ b/drivers/gpu/drm/radeon/nislands_smc.h
@@ -27,8 +27,7 @@
#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-struct PP_NIslands_Dpm2PerfLevel
-{
+struct PP_NIslands_Dpm2PerfLevel {
uint8_t MaxPS;
uint8_t TgtAct;
uint8_t MaxPS_StepInc;
@@ -44,8 +43,7 @@ struct PP_NIslands_Dpm2PerfLevel
typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
-struct PP_NIslands_DPM2Parameters
-{
+struct PP_NIslands_DPM2Parameters {
uint32_t TDPLimit;
uint32_t NearTDPLimit;
uint32_t SafePowerLimit;
@@ -53,8 +51,7 @@ struct PP_NIslands_DPM2Parameters
};
typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
-struct NISLANDS_SMC_SCLK_VALUE
-{
+struct NISLANDS_SMC_SCLK_VALUE {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
@@ -66,8 +63,7 @@ struct NISLANDS_SMC_SCLK_VALUE
typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
-struct NISLANDS_SMC_MCLK_VALUE
-{
+struct NISLANDS_SMC_MCLK_VALUE {
uint32_t vMPLL_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL_1;
uint32_t vMPLL_FUNC_CNTL_2;
@@ -84,8 +80,7 @@ struct NISLANDS_SMC_MCLK_VALUE
typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
-struct NISLANDS_SMC_VOLTAGE_VALUE
-{
+struct NISLANDS_SMC_VOLTAGE_VALUE {
uint16_t value;
uint8_t index;
uint8_t padding;
@@ -93,8 +88,7 @@ struct NISLANDS_SMC_VOLTAGE_VALUE
typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
-struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL {
uint8_t arbValue;
uint8_t ACIndex;
uint8_t displayWatermark;
@@ -132,8 +126,7 @@ struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-struct NISLANDS_SMC_SWSTATE
-{
+struct NISLANDS_SMC_SWSTATE {
uint8_t flags;
uint8_t levelCount;
uint8_t padding2;
@@ -156,8 +149,7 @@ struct NISLANDS_SMC_SWSTATE_SINGLE {
#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
-struct NISLANDS_SMC_VOLTAGEMASKTABLE
-{
+struct NISLANDS_SMC_VOLTAGEMASKTABLE {
uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
};
@@ -166,8 +158,7 @@ typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
#define NISLANDS_MAX_NO_VREG_STEPS 32
-struct NISLANDS_SMC_STATETABLE
-{
+struct NISLANDS_SMC_STATETABLE {
uint8_t thermalProtectType;
uint8_t systemFlags;
uint8_t maxVDDCIndexInPPTable;
@@ -203,8 +194,7 @@ typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
#define SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
#define SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES 4
-struct SMC_NISLANDS_MC_TPP_CAC_TABLE
-{
+struct SMC_NISLANDS_MC_TPP_CAC_TABLE {
uint32_t tpp[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
uint32_t cacValue[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
};
@@ -212,8 +202,7 @@ struct SMC_NISLANDS_MC_TPP_CAC_TABLE
typedef struct SMC_NISLANDS_MC_TPP_CAC_TABLE SMC_NISLANDS_MC_TPP_CAC_TABLE;
-struct PP_NIslands_CACTABLES
-{
+struct PP_NIslands_CACTABLES {
uint32_t cac_bif_lut[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES];
uint32_t cac_lkge_lut[SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
@@ -257,8 +246,7 @@ typedef struct PP_NIslands_CACTABLES PP_NIslands_CACTABLES;
#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
-struct SMC_NIslands_MCRegisterAddress
-{
+struct SMC_NIslands_MCRegisterAddress {
uint16_t s0;
uint16_t s1;
};
@@ -266,15 +254,13 @@ struct SMC_NIslands_MCRegisterAddress
typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
-struct SMC_NIslands_MCRegisterSet
-{
+struct SMC_NIslands_MCRegisterSet {
uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
};
typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
-struct SMC_NIslands_MCRegisters
-{
+struct SMC_NIslands_MCRegisters {
uint8_t last;
uint8_t reserved[3];
SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
@@ -283,8 +269,7 @@ struct SMC_NIslands_MCRegisters
typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
-struct SMC_NIslands_MCArbDramTimingRegisterSet
-{
+struct SMC_NIslands_MCArbDramTimingRegisterSet {
uint32_t mc_arb_dram_timing;
uint32_t mc_arb_dram_timing2;
uint8_t mc_arb_rfsh_rate;
@@ -293,8 +278,7 @@ struct SMC_NIslands_MCArbDramTimingRegisterSet
typedef struct SMC_NIslands_MCArbDramTimingRegisterSet SMC_NIslands_MCArbDramTimingRegisterSet;
-struct SMC_NIslands_MCArbDramTimingRegisters
-{
+struct SMC_NIslands_MCArbDramTimingRegisters {
uint8_t arb_current;
uint8_t reserved[3];
SMC_NIslands_MCArbDramTimingRegisterSet data[20];
@@ -302,8 +286,7 @@ struct SMC_NIslands_MCArbDramTimingRegisters
typedef struct SMC_NIslands_MCArbDramTimingRegisters SMC_NIslands_MCArbDramTimingRegisters;
-struct SMC_NISLANDS_SPLL_DIV_TABLE
-{
+struct SMC_NISLANDS_SPLL_DIV_TABLE {
uint32_t freq[256];
uint32_t ss[256];
};
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cfeca2694d5f..86b8b770af19 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1327,7 +1327,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
return -EINVAL;
}
track->num_arrays = c;
- for (i = 0; i < (c - 1); i+=2, idx+=3) {
+ for (i = 0; i < (c - 1); i += 2, idx += 3) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 9d341cff63ee..d776f929d5c3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -825,7 +825,7 @@
# define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13)
# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13)
-# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
+# define R300_TX_MIN_FILTER_MASK ((15 << 11) | (3 << 13))
# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a17b95eec65f..b5e97d95a19f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -99,8 +99,7 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
-static const u32 crtc_offsets[2] =
-{
+static const u32 crtc_offsets[2] = {
0,
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 9d2bcb9551e6..64980a61d38a 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -28,8 +28,7 @@
#include "r600_dpm.h"
#include "atom.h"
-const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
-{
+const u32 r600_utc[R600_PM_NUMBER_OF_TC] = {
R600_UTC_DFLT_00,
R600_UTC_DFLT_01,
R600_UTC_DFLT_02,
@@ -47,8 +46,7 @@ const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
R600_UTC_DFLT_14,
};
-const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
-{
+const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = {
R600_DTC_DFLT_00,
R600_DTC_DFLT_01,
R600_DTC_DFLT_02,
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 6e4d22ed2a00..5c2513c84c48 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -119,8 +119,7 @@ enum r600_display_watermark {
R600_DISPLAY_WATERMARK_HIGH = 1,
};
-enum r600_display_gap
-{
+enum r600_display_gap {
R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
R600_PM_DISPLAY_GAP_VBLANK = 1,
R600_PM_DISPLAY_GAP_WATERMARK = 2,
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3d3d2109dfeb..3e5ff17e3caf 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1355,14 +1355,12 @@ struct radeon_dpm_thermal {
bool high_to_low;
};
-enum radeon_clk_action
-{
+enum radeon_clk_action {
RADEON_SCLK_UP = 1,
RADEON_SCLK_DOWN
};
-struct radeon_blacklist_clocks
-{
+struct radeon_blacklist_clocks {
u32 sclk;
u32 mclk;
enum radeon_clk_action action;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 802b5af19261..b5a0109b2e2c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2400,10 +2400,10 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_RS880:
rdev->asic = &rs780_asic;
/* 760G/780V/880V don't have UVD */
- if ((rdev->pdev->device == 0x9616)||
- (rdev->pdev->device == 0x9611)||
- (rdev->pdev->device == 0x9613)||
- (rdev->pdev->device == 0x9711)||
+ if ((rdev->pdev->device == 0x9616) ||
+ (rdev->pdev->device == 0x9611) ||
+ (rdev->pdev->device == 0x9613) ||
+ (rdev->pdev->device == 0x9711) ||
(rdev->pdev->device == 0x9713))
rdev->has_uvd = false;
else
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 3596ea4a8b60..bb1f0a3371ab 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2852,7 +2852,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
args.v1.ucAction = clock_type;
args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_div = args.v1.ucPostDiv;
dividers->fb_div = args.v1.ucFbDiv;
@@ -2866,7 +2866,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
args.v2.ucAction = clock_type;
args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_div = args.v2.ucPostDiv;
dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
@@ -2881,7 +2881,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_div = args.v3.ucPostDiv;
dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -2901,7 +2901,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
if (strobe_mode)
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_div = args.v5.ucPostDiv;
dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -2920,7 +2920,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -2931,7 +2931,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -2972,7 +2972,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
if (strobe_mode)
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -3005,7 +3005,7 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
args.ucEnable = enable;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
@@ -3013,7 +3013,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
GET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return le32_to_cpu(args.ulReturnEngineClock);
}
@@ -3022,7 +3022,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
GET_MEMORY_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return le32_to_cpu(args.ulReturnMemoryClock);
}
@@ -3034,7 +3034,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void radeon_atom_set_memory_clock(struct radeon_device *rdev,
@@ -3048,7 +3048,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
@@ -3067,7 +3067,7 @@ void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
if (mem_clock)
args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void radeon_atom_update_memory_dll(struct radeon_device *rdev,
@@ -3078,7 +3078,7 @@ void radeon_atom_update_memory_dll(struct radeon_device *rdev,
args = cpu_to_le32(mem_clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void radeon_atom_set_ac_timing(struct radeon_device *rdev,
@@ -3090,7 +3090,7 @@ void radeon_atom_set_ac_timing(struct radeon_device *rdev,
args.ulTargetMemoryClock = cpu_to_le32(tmp); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union set_voltage {
@@ -3134,7 +3134,7 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
@@ -3155,7 +3155,7 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
args.v2.ucVoltageMode = 0;
args.v2.usVoltageLevel = 0;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
@@ -3164,7 +3164,7 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
@@ -3200,7 +3200,7 @@ int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
args.v3.usVoltageLevel = 0;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
break;
@@ -3327,7 +3327,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
args.in.ulSCLKFreq =
cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
@@ -3353,7 +3353,7 @@ int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*gpio_mask = le32_to_cpu(*(u32 *)&args.v2);
@@ -3361,7 +3361,7 @@ int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*gpio_value = le32_to_cpu(*(u32 *)&args.v2);
break;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 595354e3ce0b..f557535c1d7b 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -61,19 +61,23 @@ struct atpx_mux {
u16 mux;
} __packed;
-bool radeon_has_atpx(void) {
+bool radeon_has_atpx(void)
+{
return radeon_atpx_priv.atpx_detected;
}
-bool radeon_has_atpx_dgpu_power_cntl(void) {
+bool radeon_has_atpx_dgpu_power_cntl(void)
+{
return radeon_atpx_priv.atpx.functions.power_cntl;
}
-bool radeon_is_atpx_hybrid(void) {
+bool radeon_is_atpx_hybrid(void)
+{
return radeon_atpx_priv.atpx.is_hybrid;
}
-bool radeon_atpx_dgpu_req_power_for_displays(void) {
+bool radeon_atpx_dgpu_req_power_for_displays(void)
+{
return radeon_atpx_priv.atpx.dgpu_req_power_for_displays;
}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 91b58fbc2be7..74753bb26d33 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -37,15 +37,14 @@
void dce6_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin,
u8 enable_mask);
-struct r600_audio_pin* r600_audio_get_pin(struct radeon_device *rdev);
-struct r600_audio_pin* dce6_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode);
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode);
-static const u32 pin_offsets[7] =
-{
+static const u32 pin_offsets[7] = {
(0x5e00 - 0x5e00),
(0x5e18 - 0x5e00),
(0x5e30 - 0x5e00),
@@ -361,7 +360,7 @@ static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
}
-struct r600_audio_pin* radeon_audio_get_pin(struct drm_encoder *encoder)
+struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -528,7 +527,7 @@ static void radeon_audio_calc_cts(unsigned int clock, int *CTS, int *N, int freq
*N, *CTS, freq);
}
-static const struct radeon_hdmi_acr* radeon_audio_acr(unsigned int clock)
+static const struct radeon_hdmi_acr *radeon_audio_acr(unsigned int clock)
{
static struct radeon_hdmi_acr res;
u8 i;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index dacaaa007051..a073dadd0638 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -34,8 +34,7 @@ struct cea_sad;
#define WREG32_ENDPOINT(block, reg, v) \
radeon_audio_endpoint_wreg(rdev, (block), (reg), (v))
-struct radeon_audio_basic_funcs
-{
+struct radeon_audio_basic_funcs {
u32 (*endpoint_rreg)(struct radeon_device *rdev, u32 offset, u32 reg);
void (*endpoint_wreg)(struct radeon_device *rdev,
u32 offset, u32 reg, u32 v);
@@ -43,8 +42,7 @@ struct radeon_audio_basic_funcs
struct r600_audio_pin *pin, u8 enable_mask);
};
-struct radeon_audio_funcs
-{
+struct radeon_audio_funcs {
void (*select_pin)(struct drm_encoder *encoder);
struct r600_audio_pin* (*get_pin)(struct radeon_device *rdev);
void (*write_latency_fields)(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 59c4db13d90a..546381a5c918 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -603,8 +603,7 @@ struct atom_memory_info {
#define MAX_AC_TIMING_ENTRIES 16
-struct atom_memory_clock_range_table
-{
+struct atom_memory_clock_range_table {
u8 num_entries;
u8 rsv[3];
u32 mclk[MAX_AC_TIMING_ENTRIES];
@@ -632,14 +631,12 @@ struct atom_mc_reg_table {
#define MAX_VOLTAGE_ENTRIES 32
-struct atom_voltage_table_entry
-{
+struct atom_voltage_table_entry {
u16 value;
u32 smio_low;
};
-struct atom_voltage_table
-{
+struct atom_voltage_table {
u32 count;
u32 mask_low;
u32 phase_delay;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 10c0fbd9d2b4..a955f8a2f7fe 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -78,7 +78,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
u32 c = 0, i;
rbo->placement.placement = rbo->placements;
- rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM) {
/* Try placing BOs which don't need CPU access outside of the
* CPU accessible part of VRAM
@@ -114,7 +113,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
}
rbo->placement.num_placement = c;
- rbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index b73fd9ab0252..4482c8c5f5ce 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -587,7 +587,7 @@ static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
int err;
int value;
- if(!rdev->asic->dpm.fan_ctrl_set_mode)
+ if (!rdev->asic->dpm.fan_ctrl_set_mode)
return -EINVAL;
err = kstrtoint(buf, 10, &value);
@@ -789,7 +789,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* Skip vddc attribute if get_current_vddc is not implemented */
- if(attr == &sensor_dev_attr_in0_input.dev_attr.attr &&
+ if (attr == &sensor_dev_attr_in0_input.dev_attr.attr &&
!rdev->asic->dpm.get_current_vddc)
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index de4e6d78f1e1..2078b0000e22 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -92,9 +92,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
if (!radeon_ttm_bo_is_radeon_bo(bo)) {
placement->placement = &placements;
- placement->busy_placement = &placements;
placement->num_placement = 1;
- placement->num_busy_placement = 1;
return;
}
rbo = container_of(bo, struct radeon_bo, tbo);
@@ -114,15 +112,11 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
*/
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
RADEON_GEM_DOMAIN_GTT);
- rbo->placement.num_busy_placement = 0;
for (i = 0; i < rbo->placement.num_placement; i++) {
if (rbo->placements[i].mem_type == TTM_PL_VRAM) {
if (rbo->placements[i].fpfn < fpfn)
rbo->placements[i].fpfn = fpfn;
- } else {
- rbo->placement.busy_placement =
- &rbo->placements[i];
- rbo->placement.num_busy_placement = 1;
+ rbo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
}
}
} else
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index a2cda184b2b2..058a1c8451b2 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -324,7 +324,6 @@ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
rbo->placement.num_placement++;
- rbo->placement.num_busy_placement++;
}
void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 922a29e58880..d7f552d441ab 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -86,7 +86,7 @@ int rs400_gart_init(struct radeon_device *rdev)
return 0;
}
/* Check gart size */
- switch(rdev->mc.gtt_size / (1024 * 1024)) {
+ switch (rdev->mc.gtt_size / (1024 * 1024)) {
case 32:
case 64:
case 128:
@@ -116,7 +116,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
/* Check gart size */
- switch(rdev->mc.gtt_size / (1024 * 1024)) {
+ switch (rdev->mc.gtt_size / (1024 * 1024)) {
case 32:
size_reg = RS480_VA_SIZE_32MB;
break;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 8cf87a0a2b2a..5c162778899b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -54,8 +54,7 @@
static void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
-static const u32 crtc_offsets[2] =
-{
+static const u32 crtc_offsets[2] = {
0,
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 76260fdfbaa7..79709d26d983 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -42,8 +42,7 @@
static void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
-static const u32 crtc_offsets[2] =
-{
+static const u32 crtc_offsets[2] = {
0,
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.h b/drivers/gpu/drm/radeon/rv6xx_dpm.h
index 8035d53ebea6..020c0dc8361d 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.h
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.h
@@ -28,8 +28,7 @@
#include "r600_dpm.h"
/* Represents a single SCLK step. */
-struct rv6xx_sclk_stepping
-{
+struct rv6xx_sclk_stepping {
u32 vco_frequency;
u32 post_divider;
};
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index ef2f1a048cfe..e3e1f6833f12 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -1010,7 +1010,7 @@ int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) ==
- (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) {
+ (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low)) {
voltage->index = MVDD_LOW_INDEX;
voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
} else {
@@ -1260,7 +1260,7 @@ static int rv770_construct_vddc_table(struct radeon_device *rdev)
pi->vddc_mask_low = gpio_mask;
if (i > 0) {
if ((pi->vddc_table[i].low_smio !=
- pi->vddc_table[i - 1].low_smio ) ||
+ pi->vddc_table[i - 1].low_smio) ||
(pi->vddc_table[i].high_smio !=
pi->vddc_table[i - 1].high_smio))
vddc_index++;
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
index 3b2c963c4880..d8e8f70135f2 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.h
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -31,8 +31,7 @@
#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
-struct RV770_SMC_SCLK_VALUE
-{
+struct RV770_SMC_SCLK_VALUE {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
@@ -43,8 +42,7 @@ struct RV770_SMC_SCLK_VALUE
typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
-struct RV770_SMC_MCLK_VALUE
-{
+struct RV770_SMC_MCLK_VALUE {
uint32_t vMPLL_AD_FUNC_CNTL;
uint32_t vMPLL_AD_FUNC_CNTL_2;
uint32_t vMPLL_DQ_FUNC_CNTL;
@@ -59,8 +57,7 @@ struct RV770_SMC_MCLK_VALUE
typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
-struct RV730_SMC_MCLK_VALUE
-{
+struct RV730_SMC_MCLK_VALUE {
uint32_t vMCLK_PWRMGT_CNTL;
uint32_t vDLL_CNTL;
uint32_t vMPLL_FUNC_CNTL;
@@ -73,8 +70,7 @@ struct RV730_SMC_MCLK_VALUE
typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
-struct RV770_SMC_VOLTAGE_VALUE
-{
+struct RV770_SMC_VOLTAGE_VALUE {
uint16_t value;
uint8_t index;
uint8_t padding;
@@ -82,16 +78,14 @@ struct RV770_SMC_VOLTAGE_VALUE
typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
-union RV7XX_SMC_MCLK_VALUE
-{
+union RV7XX_SMC_MCLK_VALUE {
RV770_SMC_MCLK_VALUE mclk770;
RV730_SMC_MCLK_VALUE mclk730;
};
typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
-struct RV770_SMC_HW_PERFORMANCE_LEVEL
-{
+struct RV770_SMC_HW_PERFORMANCE_LEVEL {
uint8_t arbValue;
union{
uint8_t seqValue;
@@ -126,8 +120,7 @@ struct RV770_SMC_HW_PERFORMANCE_LEVEL
typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
-struct RV770_SMC_SWSTATE
-{
+struct RV770_SMC_SWSTATE {
uint8_t flags;
uint8_t padding1;
uint8_t padding2;
@@ -142,8 +135,7 @@ typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
#define RV770_SMC_VOLTAGEMASK_VDDCI 2
#define RV770_SMC_VOLTAGEMASK_MAX 4
-struct RV770_SMC_VOLTAGEMASKTABLE
-{
+struct RV770_SMC_VOLTAGEMASKTABLE {
uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
};
@@ -152,8 +144,7 @@ typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
#define MAX_NO_VREG_STEPS 32
-struct RV770_SMC_STATETABLE
-{
+struct RV770_SMC_STATETABLE {
uint8_t thermalProtectType;
uint8_t systemFlags;
uint8_t maxVDDCIndexInPPTable;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 85e9cba49cec..93f197d96d8f 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -138,8 +138,7 @@ static void si_fini_pg(struct radeon_device *rdev);
static void si_fini_cg(struct radeon_device *rdev);
static void si_rlc_stop(struct radeon_device *rdev);
-static const u32 crtc_offsets[] =
-{
+static const u32 crtc_offsets[] = {
EVERGREEN_CRTC0_REGISTER_OFFSET,
EVERGREEN_CRTC1_REGISTER_OFFSET,
EVERGREEN_CRTC2_REGISTER_OFFSET,
@@ -148,8 +147,7 @@ static const u32 crtc_offsets[] =
EVERGREEN_CRTC5_REGISTER_OFFSET
};
-static const u32 si_disp_int_status[] =
-{
+static const u32 si_disp_int_status[] = {
DISP_INTERRUPT_STATUS,
DISP_INTERRUPT_STATUS_CONTINUE,
DISP_INTERRUPT_STATUS_CONTINUE2,
@@ -162,8 +160,7 @@ static const u32 si_disp_int_status[] =
#define DC_HPDx_INT_CONTROL(x) (DC_HPD1_INT_CONTROL + (x * 0xc))
#define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS + (x * 0xc))
-static const u32 verde_rlc_save_restore_register_list[] =
-{
+static const u32 verde_rlc_save_restore_register_list[] = {
(0x8000 << 16) | (0x98f4 >> 2),
0x00000000,
(0x8040 << 16) | (0x98f4 >> 2),
@@ -384,8 +381,7 @@ static const u32 verde_rlc_save_restore_register_list[] =
0x00000000
};
-static const u32 tahiti_golden_rlc_registers[] =
-{
+static const u32 tahiti_golden_rlc_registers[] = {
0xc424, 0xffffffff, 0x00601005,
0xc47c, 0xffffffff, 0x10104040,
0xc488, 0xffffffff, 0x0100000a,
@@ -394,8 +390,7 @@ static const u32 tahiti_golden_rlc_registers[] =
0xf4a8, 0xffffffff, 0x00000000
};
-static const u32 tahiti_golden_registers[] =
-{
+static const u32 tahiti_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -429,13 +424,11 @@ static const u32 tahiti_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 tahiti_golden_registers2[] =
-{
+static const u32 tahiti_golden_registers2[] = {
0xc64, 0x00000001, 0x00000001
};
-static const u32 pitcairn_golden_rlc_registers[] =
-{
+static const u32 pitcairn_golden_rlc_registers[] = {
0xc424, 0xffffffff, 0x00601004,
0xc47c, 0xffffffff, 0x10102020,
0xc488, 0xffffffff, 0x01000020,
@@ -443,8 +436,7 @@ static const u32 pitcairn_golden_rlc_registers[] =
0xc30c, 0xffffffff, 0x800000a4
};
-static const u32 pitcairn_golden_registers[] =
-{
+static const u32 pitcairn_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -474,8 +466,7 @@ static const u32 pitcairn_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 verde_golden_rlc_registers[] =
-{
+static const u32 verde_golden_rlc_registers[] = {
0xc424, 0xffffffff, 0x033f1005,
0xc47c, 0xffffffff, 0x10808020,
0xc488, 0xffffffff, 0x00800008,
@@ -483,8 +474,7 @@ static const u32 verde_golden_rlc_registers[] =
0xc30c, 0xffffffff, 0x80010014
};
-static const u32 verde_golden_registers[] =
-{
+static const u32 verde_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -539,8 +529,7 @@ static const u32 verde_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 oland_golden_rlc_registers[] =
-{
+static const u32 oland_golden_rlc_registers[] = {
0xc424, 0xffffffff, 0x00601005,
0xc47c, 0xffffffff, 0x10104040,
0xc488, 0xffffffff, 0x0100000a,
@@ -548,8 +537,7 @@ static const u32 oland_golden_rlc_registers[] =
0xc30c, 0xffffffff, 0x800000f4
};
-static const u32 oland_golden_registers[] =
-{
+static const u32 oland_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -579,8 +567,7 @@ static const u32 oland_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 hainan_golden_registers[] =
-{
+static const u32 hainan_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -608,13 +595,11 @@ static const u32 hainan_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 hainan_golden_registers2[] =
-{
+static const u32 hainan_golden_registers2[] = {
0x98f8, 0xffffffff, 0x02010001
};
-static const u32 tahiti_mgcg_cgcg_init[] =
-{
+static const u32 tahiti_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -743,8 +728,7 @@ static const u32 tahiti_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static const u32 pitcairn_mgcg_cgcg_init[] =
-{
+static const u32 pitcairn_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -841,8 +825,7 @@ static const u32 pitcairn_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static const u32 verde_mgcg_cgcg_init[] =
-{
+static const u32 verde_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -941,8 +924,7 @@ static const u32 verde_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static const u32 oland_mgcg_cgcg_init[] =
-{
+static const u32 oland_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -1021,8 +1003,7 @@ static const u32 oland_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static const u32 hainan_mgcg_cgcg_init[] =
-{
+static const u32 hainan_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -1098,8 +1079,7 @@ static const u32 hainan_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static u32 verde_pg_init[] =
-{
+static u32 verde_pg_init[] = {
0x353c, 0xffffffff, 0x40000,
0x3538, 0xffffffff, 0x200010ff,
0x353c, 0xffffffff, 0x0,
@@ -1768,7 +1748,8 @@ static int si_init_microcode(struct radeon_device *rdev)
mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
break;
- default: BUG();
+ default:
+ BUG();
}
/* this memory configuration requires special firmware */
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index fbf968e3f6d7..9deb91970d4d 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -46,8 +46,7 @@
#define SCLK_MIN_DEEPSLEEP_FREQ 1350
-static const struct si_cac_config_reg cac_weights_tahiti[] =
-{
+static const struct si_cac_config_reg cac_weights_tahiti[] = {
{ 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
@@ -111,8 +110,7 @@ static const struct si_cac_config_reg cac_weights_tahiti[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_tahiti[] =
-{
+static const struct si_cac_config_reg lcac_tahiti[] = {
{ 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
@@ -203,13 +201,11 @@ static const struct si_cac_config_reg lcac_tahiti[] =
};
-static const struct si_cac_config_reg cac_override_tahiti[] =
-{
+static const struct si_cac_config_reg cac_override_tahiti[] = {
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_tahiti =
-{
+static const struct si_powertune_data powertune_data_tahiti = {
((1 << 16) | 27027),
6,
0,
@@ -239,8 +235,7 @@ static const struct si_powertune_data powertune_data_tahiti =
true
};
-static const struct si_dte_data dte_data_tahiti =
-{
+static const struct si_dte_data dte_data_tahiti = {
{ 1159409, 0, 0, 0, 0 },
{ 777, 0, 0, 0, 0 },
2,
@@ -257,8 +252,7 @@ static const struct si_dte_data dte_data_tahiti =
false
};
-static const struct si_dte_data dte_data_tahiti_pro =
-{
+static const struct si_dte_data dte_data_tahiti_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -275,8 +269,7 @@ static const struct si_dte_data dte_data_tahiti_pro =
true
};
-static const struct si_dte_data dte_data_new_zealand =
-{
+static const struct si_dte_data dte_data_new_zealand = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
{ 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
0x5,
@@ -293,8 +286,7 @@ static const struct si_dte_data dte_data_new_zealand =
true
};
-static const struct si_dte_data dte_data_aruba_pro =
-{
+static const struct si_dte_data dte_data_aruba_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -311,8 +303,7 @@ static const struct si_dte_data dte_data_aruba_pro =
true
};
-static const struct si_dte_data dte_data_malta =
-{
+static const struct si_dte_data dte_data_malta = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -329,8 +320,7 @@ static const struct si_dte_data dte_data_malta =
true
};
-static struct si_cac_config_reg cac_weights_pitcairn[] =
-{
+static struct si_cac_config_reg cac_weights_pitcairn[] = {
{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
@@ -394,8 +384,7 @@ static struct si_cac_config_reg cac_weights_pitcairn[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_pitcairn[] =
-{
+static const struct si_cac_config_reg lcac_pitcairn[] = {
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -485,13 +474,11 @@ static const struct si_cac_config_reg lcac_pitcairn[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_override_pitcairn[] =
-{
+static const struct si_cac_config_reg cac_override_pitcairn[] = {
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_pitcairn =
-{
+static const struct si_powertune_data powertune_data_pitcairn = {
((1 << 16) | 27027),
5,
0,
@@ -521,8 +508,7 @@ static const struct si_powertune_data powertune_data_pitcairn =
true
};
-static const struct si_dte_data dte_data_pitcairn =
-{
+static const struct si_dte_data dte_data_pitcairn = {
{ 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0 },
0,
@@ -539,8 +525,7 @@ static const struct si_dte_data dte_data_pitcairn =
false
};
-static const struct si_dte_data dte_data_curacao_xt =
-{
+static const struct si_dte_data dte_data_curacao_xt = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -557,8 +542,7 @@ static const struct si_dte_data dte_data_curacao_xt =
true
};
-static const struct si_dte_data dte_data_curacao_pro =
-{
+static const struct si_dte_data dte_data_curacao_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -575,8 +559,7 @@ static const struct si_dte_data dte_data_curacao_pro =
true
};
-static const struct si_dte_data dte_data_neptune_xt =
-{
+static const struct si_dte_data dte_data_neptune_xt = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -593,8 +576,7 @@ static const struct si_dte_data dte_data_neptune_xt =
true
};
-static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -658,8 +640,7 @@ static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
-{
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -723,8 +704,7 @@ static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_heathrow[] =
-{
+static const struct si_cac_config_reg cac_weights_heathrow[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -788,8 +768,7 @@ static const struct si_cac_config_reg cac_weights_heathrow[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -853,8 +832,7 @@ static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_cape_verde[] =
-{
+static const struct si_cac_config_reg cac_weights_cape_verde[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -918,8 +896,7 @@ static const struct si_cac_config_reg cac_weights_cape_verde[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_cape_verde[] =
-{
+static const struct si_cac_config_reg lcac_cape_verde[] = {
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -977,13 +954,11 @@ static const struct si_cac_config_reg lcac_cape_verde[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_override_cape_verde[] =
-{
+static const struct si_cac_config_reg cac_override_cape_verde[] = {
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_cape_verde =
-{
+static const struct si_powertune_data powertune_data_cape_verde = {
((1 << 16) | 0x6993),
5,
0,
@@ -1013,8 +988,7 @@ static const struct si_powertune_data powertune_data_cape_verde =
true
};
-static const struct si_dte_data dte_data_cape_verde =
-{
+static const struct si_dte_data dte_data_cape_verde = {
{ 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0 },
0,
@@ -1031,8 +1005,7 @@ static const struct si_dte_data dte_data_cape_verde =
false
};
-static const struct si_dte_data dte_data_venus_xtx =
-{
+static const struct si_dte_data dte_data_venus_xtx = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
5,
@@ -1049,8 +1022,7 @@ static const struct si_dte_data dte_data_venus_xtx =
true
};
-static const struct si_dte_data dte_data_venus_xt =
-{
+static const struct si_dte_data dte_data_venus_xt = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
5,
@@ -1067,8 +1039,7 @@ static const struct si_dte_data dte_data_venus_xt =
true
};
-static const struct si_dte_data dte_data_venus_pro =
-{
+static const struct si_dte_data dte_data_venus_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
5,
@@ -1085,8 +1056,7 @@ static const struct si_dte_data dte_data_venus_pro =
true
};
-static struct si_cac_config_reg cac_weights_oland[] =
-{
+static struct si_cac_config_reg cac_weights_oland[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -1150,8 +1120,7 @@ static struct si_cac_config_reg cac_weights_oland[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_mars_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_mars_pro[] = {
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1215,8 +1184,7 @@ static const struct si_cac_config_reg cac_weights_mars_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_mars_xt[] =
-{
+static const struct si_cac_config_reg cac_weights_mars_xt[] = {
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1280,8 +1248,7 @@ static const struct si_cac_config_reg cac_weights_mars_xt[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_oland_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_oland_pro[] = {
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1345,8 +1312,7 @@ static const struct si_cac_config_reg cac_weights_oland_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_oland_xt[] =
-{
+static const struct si_cac_config_reg cac_weights_oland_xt[] = {
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1410,8 +1376,7 @@ static const struct si_cac_config_reg cac_weights_oland_xt[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_oland[] =
-{
+static const struct si_cac_config_reg lcac_oland[] = {
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -1457,8 +1422,7 @@ static const struct si_cac_config_reg lcac_oland[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_mars_pro[] =
-{
+static const struct si_cac_config_reg lcac_mars_pro[] = {
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -1504,13 +1468,11 @@ static const struct si_cac_config_reg lcac_mars_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_override_oland[] =
-{
+static const struct si_cac_config_reg cac_override_oland[] = {
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_oland =
-{
+static const struct si_powertune_data powertune_data_oland = {
((1 << 16) | 0x6993),
5,
0,
@@ -1540,8 +1502,7 @@ static const struct si_powertune_data powertune_data_oland =
true
};
-static const struct si_powertune_data powertune_data_mars_pro =
-{
+static const struct si_powertune_data powertune_data_mars_pro = {
((1 << 16) | 0x6993),
5,
0,
@@ -1571,8 +1532,7 @@ static const struct si_powertune_data powertune_data_mars_pro =
true
};
-static const struct si_dte_data dte_data_oland =
-{
+static const struct si_dte_data dte_data_oland = {
{ 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0 },
0,
@@ -1589,8 +1549,7 @@ static const struct si_dte_data dte_data_oland =
false
};
-static const struct si_dte_data dte_data_mars_pro =
-{
+static const struct si_dte_data dte_data_mars_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -1607,8 +1566,7 @@ static const struct si_dte_data dte_data_mars_pro =
true
};
-static const struct si_dte_data dte_data_sun_xt =
-{
+static const struct si_dte_data dte_data_sun_xt = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -1626,8 +1584,7 @@ static const struct si_dte_data dte_data_sun_xt =
};
-static const struct si_cac_config_reg cac_weights_hainan[] =
-{
+static const struct si_cac_config_reg cac_weights_hainan[] = {
{ 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
@@ -1691,8 +1648,7 @@ static const struct si_cac_config_reg cac_weights_hainan[] =
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_hainan =
-{
+static const struct si_powertune_data powertune_data_hainan = {
((1 << 16) | 0x6993),
5,
0,
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index aa857906ef93..4887edebd348 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -26,15 +26,13 @@
#include "ni_dpm.h"
#include "sislands_smc.h"
-enum si_cac_config_reg_type
-{
+enum si_cac_config_reg_type {
SISLANDS_CACCONFIG_MMR = 0,
SISLANDS_CACCONFIG_CGIND,
SISLANDS_CACCONFIG_MAX
};
-struct si_cac_config_reg
-{
+struct si_cac_config_reg {
u32 offset;
u32 mask;
u32 shift;
@@ -42,8 +40,7 @@ struct si_cac_config_reg
enum si_cac_config_reg_type type;
};
-struct si_powertune_data
-{
+struct si_powertune_data {
u32 cac_window;
u32 l2_lta_window_size_default;
u8 lts_truncate_default;
@@ -56,8 +53,7 @@ struct si_powertune_data
bool enable_powertune_by_default;
};
-struct si_dyn_powertune_data
-{
+struct si_dyn_powertune_data {
u32 cac_leakage;
s32 leakage_minimum_temperature;
u32 wintime;
@@ -68,8 +64,7 @@ struct si_dyn_powertune_data
bool disable_uvd_powertune;
};
-struct si_dte_data
-{
+struct si_dte_data {
u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
u32 k;
@@ -122,8 +117,7 @@ struct si_mc_reg_table {
#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3
-struct si_leakage_voltage_entry
-{
+struct si_leakage_voltage_entry {
u16 voltage;
u16 leakage_index;
};
@@ -131,8 +125,7 @@ struct si_leakage_voltage_entry
#define SISLANDS_LEAKAGE_INDEX0 0xff01
#define SISLANDS_MAX_LEAKAGE_COUNT 4
-struct si_leakage_voltage
-{
+struct si_leakage_voltage {
u16 count;
struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
};
diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h
index 75a380a15292..985d720dbc0d 100644
--- a/drivers/gpu/drm/radeon/smu7.h
+++ b/drivers/gpu/drm/radeon/smu7.h
@@ -82,8 +82,7 @@
#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
-struct SMU7_PIDController
-{
+struct SMU7_PIDController {
uint32_t Ki;
int32_t LFWindupUL;
int32_t LFWindupLL;
@@ -117,8 +116,7 @@ typedef struct SMU7_PIDController SMU7_PIDController;
#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
-struct SMU7_Firmware_Header
-{
+struct SMU7_Firmware_Header {
uint32_t Digest[5];
uint32_t Version;
uint32_t HeaderSize;
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
index 0b0b404ff091..1f63cbbd6515 100644
--- a/drivers/gpu/drm/radeon/smu7_discrete.h
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -35,8 +35,7 @@
#define SMU7_NUM_GPU_TES 1
#define SMU7_NUM_NON_TES 2
-struct SMU7_SoftRegisters
-{
+struct SMU7_SoftRegisters {
uint32_t RefClockFrequency;
uint32_t PmTimerP;
uint32_t FeatureEnables;
@@ -89,8 +88,7 @@ struct SMU7_SoftRegisters
typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
-struct SMU7_Discrete_VoltageLevel
-{
+struct SMU7_Discrete_VoltageLevel {
uint16_t Voltage;
uint16_t StdVoltageHiSidd;
uint16_t StdVoltageLoSidd;
@@ -100,8 +98,7 @@ struct SMU7_Discrete_VoltageLevel
typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
-struct SMU7_Discrete_GraphicsLevel
-{
+struct SMU7_Discrete_GraphicsLevel {
uint32_t Flags;
uint32_t MinVddc;
uint32_t MinVddcPhases;
@@ -131,8 +128,7 @@ struct SMU7_Discrete_GraphicsLevel
typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
-struct SMU7_Discrete_ACPILevel
-{
+struct SMU7_Discrete_ACPILevel {
uint32_t Flags;
uint32_t MinVddc;
uint32_t MinVddcPhases;
@@ -153,8 +149,7 @@ struct SMU7_Discrete_ACPILevel
typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
-struct SMU7_Discrete_Ulv
-{
+struct SMU7_Discrete_Ulv {
uint32_t CcPwrDynRm;
uint32_t CcPwrDynRm1;
uint16_t VddcOffset;
@@ -165,8 +160,7 @@ struct SMU7_Discrete_Ulv
typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
-struct SMU7_Discrete_MemoryLevel
-{
+struct SMU7_Discrete_MemoryLevel {
uint32_t MinVddc;
uint32_t MinVddcPhases;
uint32_t MinVddci;
@@ -206,8 +200,7 @@ struct SMU7_Discrete_MemoryLevel
typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
-struct SMU7_Discrete_LinkLevel
-{
+struct SMU7_Discrete_LinkLevel {
uint8_t PcieGenSpeed;
uint8_t PcieLaneCount;
uint8_t EnabledForActivity;
@@ -220,8 +213,7 @@ struct SMU7_Discrete_LinkLevel
typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
-struct SMU7_Discrete_MCArbDramTimingTableEntry
-{
+struct SMU7_Discrete_MCArbDramTimingTableEntry {
uint32_t McArbDramTiming;
uint32_t McArbDramTiming2;
uint8_t McArbBurstTime;
@@ -230,15 +222,13 @@ struct SMU7_Discrete_MCArbDramTimingTableEntry
typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
-struct SMU7_Discrete_MCArbDramTimingTable
-{
+struct SMU7_Discrete_MCArbDramTimingTable {
SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
};
typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
-struct SMU7_Discrete_UvdLevel
-{
+struct SMU7_Discrete_UvdLevel {
uint32_t VclkFrequency;
uint32_t DclkFrequency;
uint16_t MinVddc;
@@ -250,8 +240,7 @@ struct SMU7_Discrete_UvdLevel
typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
-struct SMU7_Discrete_ExtClkLevel
-{
+struct SMU7_Discrete_ExtClkLevel {
uint32_t Frequency;
uint16_t MinVoltage;
uint8_t MinPhases;
@@ -260,8 +249,7 @@ struct SMU7_Discrete_ExtClkLevel
typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
-struct SMU7_Discrete_StateInfo
-{
+struct SMU7_Discrete_StateInfo {
uint32_t SclkFrequency;
uint32_t MclkFrequency;
uint32_t VclkFrequency;
@@ -285,8 +273,7 @@ struct SMU7_Discrete_StateInfo
typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
-struct SMU7_Discrete_DpmTable
-{
+struct SMU7_Discrete_DpmTable {
SMU7_PIDController GraphicsPIDController;
SMU7_PIDController MemoryPIDController;
SMU7_PIDController LinkPIDController;
@@ -406,23 +393,20 @@ typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
-struct SMU7_Discrete_MCRegisterAddress
-{
+struct SMU7_Discrete_MCRegisterAddress {
uint16_t s0;
uint16_t s1;
};
typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
-struct SMU7_Discrete_MCRegisterSet
-{
+struct SMU7_Discrete_MCRegisterSet {
uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
-struct SMU7_Discrete_MCRegisters
-{
+struct SMU7_Discrete_MCRegisters {
uint8_t last;
uint8_t reserved[3];
SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
@@ -431,8 +415,7 @@ struct SMU7_Discrete_MCRegisters
typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
-struct SMU7_Discrete_FanTable
-{
+struct SMU7_Discrete_FanTable {
uint16_t FdoMode;
int16_t TempMin;
int16_t TempMed;
diff --git a/drivers/gpu/drm/radeon/smu7_fusion.h b/drivers/gpu/drm/radeon/smu7_fusion.h
index 78ada9ffd508..e130f52fe8d6 100644
--- a/drivers/gpu/drm/radeon/smu7_fusion.h
+++ b/drivers/gpu/drm/radeon/smu7_fusion.h
@@ -36,8 +36,7 @@
#define SMU7_NUM_NON_TES 2
// All 'soft registers' should be uint32_t.
-struct SMU7_SoftRegisters
-{
+struct SMU7_SoftRegisters {
uint32_t RefClockFrequency;
uint32_t PmTimerP;
uint32_t FeatureEnables;
@@ -80,8 +79,7 @@ struct SMU7_SoftRegisters
typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
-struct SMU7_Fusion_GraphicsLevel
-{
+struct SMU7_Fusion_GraphicsLevel {
uint32_t MinVddNb;
uint32_t SclkFrequency;
@@ -111,8 +109,7 @@ struct SMU7_Fusion_GraphicsLevel
typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
-struct SMU7_Fusion_GIOLevel
-{
+struct SMU7_Fusion_GIOLevel {
uint8_t EnabledForActivity;
uint8_t LclkDid;
uint8_t Vid;
@@ -137,8 +134,7 @@ struct SMU7_Fusion_GIOLevel
typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
// UVD VCLK/DCLK state (level) definition.
-struct SMU7_Fusion_UvdLevel
-{
+struct SMU7_Fusion_UvdLevel {
uint32_t VclkFrequency;
uint32_t DclkFrequency;
uint16_t MinVddNb;
@@ -155,8 +151,7 @@ struct SMU7_Fusion_UvdLevel
typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
// Clocks for other external blocks (VCE, ACP, SAMU).
-struct SMU7_Fusion_ExtClkLevel
-{
+struct SMU7_Fusion_ExtClkLevel {
uint32_t Frequency;
uint16_t MinVoltage;
uint8_t Divider;
@@ -166,8 +161,7 @@ struct SMU7_Fusion_ExtClkLevel
};
typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
-struct SMU7_Fusion_ACPILevel
-{
+struct SMU7_Fusion_ACPILevel {
uint32_t Flags;
uint32_t MinVddNb;
uint32_t SclkFrequency;
@@ -181,8 +175,7 @@ struct SMU7_Fusion_ACPILevel
typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
-struct SMU7_Fusion_NbDpm
-{
+struct SMU7_Fusion_NbDpm {
uint8_t DpmXNbPsHi;
uint8_t DpmXNbPsLo;
uint8_t Dpm0PgNbPsHi;
@@ -197,8 +190,7 @@ struct SMU7_Fusion_NbDpm
typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
-struct SMU7_Fusion_StateInfo
-{
+struct SMU7_Fusion_StateInfo {
uint32_t SclkFrequency;
uint32_t LclkFrequency;
uint32_t VclkFrequency;
@@ -214,8 +206,7 @@ struct SMU7_Fusion_StateInfo
typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
-struct SMU7_Fusion_DpmTable
-{
+struct SMU7_Fusion_DpmTable {
uint32_t SystemFlags;
SMU7_PIDController GraphicsPIDController;
@@ -230,12 +221,12 @@ struct SMU7_Fusion_DpmTable
uint8_t SamuLevelCount;
uint16_t FpsHighT;
- SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_GraphicsLevel GraphicsLevel[SMU__NUM_SCLK_DPM_STATE];
SMU7_Fusion_ACPILevel ACPILevel;
- SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
- SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
- SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
- SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+ SMU7_Fusion_UvdLevel UvdLevel[SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel VceLevel[SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel AcpLevel[SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel SamuLevel[SMU7_MAX_LEVELS_SAMU];
uint8_t UvdBootLevel;
uint8_t VceBootLevel;
@@ -266,10 +257,9 @@ struct SMU7_Fusion_DpmTable
};
-struct SMU7_Fusion_GIODpmTable
-{
+struct SMU7_Fusion_GIODpmTable {
- SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO];
+ SMU7_Fusion_GIOLevel GIOLevel[SMU7_MAX_LEVELS_GIO];
SMU7_PIDController GioPIDController;
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index d49c145db437..21d27e6235f3 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -33,8 +33,7 @@
#define SUMO_MINIMUM_ENGINE_CLOCK 800
#define BOOST_DPM_LEVEL 7
-static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
-{
+static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] = {
SUMO_UTC_DFLT_00,
SUMO_UTC_DFLT_01,
SUMO_UTC_DFLT_02,
@@ -52,8 +51,7 @@ static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
SUMO_UTC_DFLT_14,
};
-static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
-{
+static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] = {
SUMO_DTC_DFLT_00,
SUMO_DTC_DFLT_01,
SUMO_DTC_DFLT_02,
@@ -109,11 +107,11 @@ static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable)
local1 = RREG32(CG_CGTT_LOCAL_1);
if (enable) {
- WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
- WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+ WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
+ WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
} else {
- WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
- WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+ WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
+ WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
}
}
@@ -702,9 +700,9 @@ static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev,
u32 nbps1_new = 0;
if (old_ps != NULL)
- nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
+ nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
- nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
+ nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
if (nbps1_old == 0 && nbps1_new == 1)
sumo_smu_notify_alt_vddnb_change(rdev, 1, 1);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index ef1cc7bad20a..b9a2c7ccc881 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -39,8 +39,7 @@
#ifndef TRINITY_MGCG_SEQUENCE
#define TRINITY_MGCG_SEQUENCE 100
-static const u32 trinity_mgcg_shls_default[] =
-{
+static const u32 trinity_mgcg_shls_default[] = {
/* Register, Value, Mask */
0x0000802c, 0xc0000000, 0xffffffff,
0x00003fc4, 0xc0000000, 0xffffffff,
@@ -122,8 +121,7 @@ static const u32 trinity_mgcg_shls_default[] =
#ifndef TRINITY_SYSLS_SEQUENCE
#define TRINITY_SYSLS_SEQUENCE 100
-static const u32 trinity_sysls_disable[] =
-{
+static const u32 trinity_sysls_disable[] = {
/* Register, Value, Mask */
0x0000d0c0, 0x00000000, 0xffffffff,
0x0000d8c0, 0x00000000, 0xffffffff,
@@ -146,8 +144,7 @@ static const u32 trinity_sysls_disable[] =
0x00006dfc, 0x0000007f, 0xffffffff
};
-static const u32 trinity_sysls_enable[] =
-{
+static const u32 trinity_sysls_enable[] = {
/* Register, Value, Mask */
0x000055e8, 0x00000001, 0xffffffff,
0x0000d0bc, 0x00000100, 0xffffffff,
@@ -169,8 +166,7 @@ static const u32 trinity_sysls_enable[] =
};
#endif
-static const u32 trinity_override_mgpg_sequences[] =
-{
+static const u32 trinity_override_mgpg_sequences[] = {
/* Register, Value */
0x00000200, 0xE030032C,
0x00000204, 0x00000FFF,
@@ -366,9 +362,9 @@ static void trinity_mg_clockgating_enable(struct radeon_device *rdev,
local1 = RREG32_CG(CG_CGTT_LOCAL_1);
WREG32_CG(CG_CGTT_LOCAL_0,
- (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+ (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
WREG32_CG(CG_CGTT_LOCAL_1,
- (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+ (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_ENABLE);
} else {
@@ -378,9 +374,9 @@ static void trinity_mg_clockgating_enable(struct radeon_device *rdev,
local1 = RREG32_CG(CG_CGTT_LOCAL_1);
WREG32_CG(CG_CGTT_LOCAL_0,
- CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+ CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
WREG32_CG(CG_CGTT_LOCAL_1,
- CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+ CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
}
}
@@ -1434,7 +1430,7 @@ static void trinity_adjust_uvd_state(struct radeon_device *rdev,
if (pi->uvd_dpm && r600_is_uvd_state(rps->class, rps->class2)) {
high_index = trinity_get_uvd_clock_index(rdev, rps);
- switch(high_index) {
+ switch (high_index) {
case 3:
case 2:
low_index = 1;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
index c261657750ca..431e2b68d21e 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.h
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -64,8 +64,7 @@ struct trinity_ps {
#define TRINITY_NUM_NBPSTATES 4
-struct trinity_uvd_clock_table_entry
-{
+struct trinity_uvd_clock_table_entry {
u32 vclk;
u32 dclk;
u8 vclk_did;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 58557c2263a7..5684639d20a6 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -142,7 +142,7 @@ int uvd_v1_0_resume(struct radeon_device *rdev)
addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
- WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr));
+ WREG32(UVD_FW_START, *((uint32_t *)rdev->uvd.cpu_addr));
return 0;
}
diff --git a/drivers/gpu/drm/renesas/Kconfig b/drivers/gpu/drm/renesas/Kconfig
index 3777dad17f81..21862a8ef710 100644
--- a/drivers/gpu/drm/renesas/Kconfig
+++ b/drivers/gpu/drm/renesas/Kconfig
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
source "drivers/gpu/drm/renesas/rcar-du/Kconfig"
+source "drivers/gpu/drm/renesas/rz-du/Kconfig"
source "drivers/gpu/drm/renesas/shmobile/Kconfig"
diff --git a/drivers/gpu/drm/renesas/Makefile b/drivers/gpu/drm/renesas/Makefile
index ec0e89e7a592..b8d8bc53967f 100644
--- a/drivers/gpu/drm/renesas/Makefile
+++ b/drivers/gpu/drm/renesas/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += rcar-du/
+obj-y += rz-du/
obj-$(CONFIG_DRM_SHMOBILE) += shmobile/
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
new file mode 100644
index 000000000000..5f0db2c5fee6
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+config DRM_RZG2L_DU
+ tristate "DRM Support for RZ/G2L Display Unit"
+ depends on ARCH_RZG2L || COMPILE_TEST
+ depends on DRM && OF
+ depends on VIDEO_RENESAS_VSP1
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have an RZ/G2L alike chipset.
+ If M is selected the module will be called rzg2l-du-drm.
diff --git a/drivers/gpu/drm/renesas/rz-du/Makefile b/drivers/gpu/drm/renesas/rz-du/Makefile
new file mode 100644
index 000000000000..663b82a2577f
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+rzg2l-du-drm-y := rzg2l_du_crtc.o \
+ rzg2l_du_drv.o \
+ rzg2l_du_encoder.o \
+ rzg2l_du_kms.o \
+
+rzg2l-du-drm-$(CONFIG_VIDEO_RENESAS_VSP1) += rzg2l_du_vsp.o
+obj-$(CONFIG_DRM_RZG2L_DU) += rzg2l-du-drm.o
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
new file mode 100644
index 000000000000..6e7aac6219be
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit CRTCs
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_crtc.c
+ */
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "rzg2l_du_crtc.h"
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_encoder.h"
+#include "rzg2l_du_kms.h"
+#include "rzg2l_du_vsp.h"
+
+#define DU_MCR0 0x00
+#define DU_MCR0_DI_EN BIT(8)
+
+#define DU_DITR0 0x10
+#define DU_DITR0_DEMD_HIGH (BIT(8) | BIT(9))
+#define DU_DITR0_VSPOL BIT(16)
+#define DU_DITR0_HSPOL BIT(17)
+
+#define DU_DITR1 0x14
+#define DU_DITR1_VSA(x) ((x) << 0)
+#define DU_DITR1_VACTIVE(x) ((x) << 16)
+
+#define DU_DITR2 0x18
+#define DU_DITR2_VBP(x) ((x) << 0)
+#define DU_DITR2_VFP(x) ((x) << 16)
+
+#define DU_DITR3 0x1c
+#define DU_DITR3_HSA(x) ((x) << 0)
+#define DU_DITR3_HACTIVE(x) ((x) << 16)
+
+#define DU_DITR4 0x20
+#define DU_DITR4_HBP(x) ((x) << 0)
+#define DU_DITR4_HFP(x) ((x) << 16)
+
+#define DU_MCR1 0x40
+#define DU_MCR1_PB_AUTOCLR BIT(16)
+
+#define DU_PBCR0 0x4c
+#define DU_PBCR0_PB_DEP(x) ((x) << 0)
+
+/* -----------------------------------------------------------------------------
+ * Hardware Setup
+ */
+
+static void rzg2l_du_crtc_set_display_timing(struct rzg2l_du_crtc *rcrtc)
+{
+ const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
+ unsigned long mode_clock = mode->clock * 1000;
+ u32 ditr0, ditr1, ditr2, ditr3, ditr4, pbcr0;
+ struct rzg2l_du_device *rcdu = rcrtc->dev;
+
+ clk_prepare_enable(rcrtc->rzg2l_clocks.dclk);
+ clk_set_rate(rcrtc->rzg2l_clocks.dclk, mode_clock);
+
+ ditr0 = (DU_DITR0_DEMD_HIGH
+ | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DU_DITR0_VSPOL : 0)
+ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DU_DITR0_HSPOL : 0));
+
+ ditr1 = DU_DITR1_VSA(mode->vsync_end - mode->vsync_start)
+ | DU_DITR1_VACTIVE(mode->vdisplay);
+
+ ditr2 = DU_DITR2_VBP(mode->vtotal - mode->vsync_end)
+ | DU_DITR2_VFP(mode->vsync_start - mode->vdisplay);
+
+ ditr3 = DU_DITR3_HSA(mode->hsync_end - mode->hsync_start)
+ | DU_DITR3_HACTIVE(mode->hdisplay);
+
+ ditr4 = DU_DITR4_HBP(mode->htotal - mode->hsync_end)
+ | DU_DITR4_HFP(mode->hsync_start - mode->hdisplay);
+
+ pbcr0 = DU_PBCR0_PB_DEP(0x1f);
+
+ writel(ditr0, rcdu->mmio + DU_DITR0);
+ writel(ditr1, rcdu->mmio + DU_DITR1);
+ writel(ditr2, rcdu->mmio + DU_DITR2);
+ writel(ditr3, rcdu->mmio + DU_DITR3);
+ writel(ditr4, rcdu->mmio + DU_DITR4);
+ writel(pbcr0, rcdu->mmio + DU_PBCR0);
+
+ /* Enable auto clear */
+ writel(DU_MCR1_PB_AUTOCLR, rcdu->mmio + DU_MCR1);
+}
+
+/* -----------------------------------------------------------------------------
+ * Page Flip
+ */
+
+void rzg2l_du_crtc_finish_page_flip(struct rzg2l_du_crtc *rcrtc)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = rcrtc->event;
+ rcrtc->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (!event)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_crtc_send_vblank_event(&rcrtc->crtc, event);
+ wake_up(&rcrtc->flip_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ drm_crtc_vblank_put(&rcrtc->crtc);
+}
+
+static bool rzg2l_du_crtc_page_flip_pending(struct rzg2l_du_crtc *rcrtc)
+{
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+ bool pending;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = rcrtc->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return pending;
+}
+
+static void rzg2l_du_crtc_wait_page_flip(struct rzg2l_du_crtc *rcrtc)
+{
+ struct rzg2l_du_device *rcdu = rcrtc->dev;
+
+ if (wait_event_timeout(rcrtc->flip_wait,
+ !rzg2l_du_crtc_page_flip_pending(rcrtc),
+ msecs_to_jiffies(50)))
+ return;
+
+ dev_warn(rcdu->dev, "page flip timeout\n");
+
+ rzg2l_du_crtc_finish_page_flip(rcrtc);
+}
+
+/* -----------------------------------------------------------------------------
+ * Start/Stop and Suspend/Resume
+ */
+
+static void rzg2l_du_crtc_setup(struct rzg2l_du_crtc *rcrtc)
+{
+ /* Configure display timings and output routing */
+ rzg2l_du_crtc_set_display_timing(rcrtc);
+
+ /* Enable the VSP compositor. */
+ rzg2l_du_vsp_enable(rcrtc);
+
+ /* Turn vertical blanking interrupt reporting on. */
+ drm_crtc_vblank_on(&rcrtc->crtc);
+}
+
+static int rzg2l_du_crtc_get(struct rzg2l_du_crtc *rcrtc)
+{
+ int ret;
+
+ /*
+ * Guard against double-get, as the function is called from both the
+ * .atomic_enable() and .atomic_flush() handlers.
+ */
+ if (rcrtc->initialized)
+ return 0;
+
+ ret = clk_prepare_enable(rcrtc->rzg2l_clocks.aclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(rcrtc->rzg2l_clocks.pclk);
+ if (ret < 0)
+ goto error_bus_clock;
+
+ ret = reset_control_deassert(rcrtc->rstc);
+ if (ret < 0)
+ goto error_peri_clock;
+
+ rzg2l_du_crtc_setup(rcrtc);
+ rcrtc->initialized = true;
+
+ return 0;
+
+error_peri_clock:
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.pclk);
+error_bus_clock:
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.aclk);
+ return ret;
+}
+
+static void rzg2l_du_crtc_put(struct rzg2l_du_crtc *rcrtc)
+{
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.dclk);
+ reset_control_assert(rcrtc->rstc);
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.pclk);
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.aclk);
+
+ rcrtc->initialized = false;
+}
+
+static void rzg2l_du_start_stop(struct rzg2l_du_crtc *rcrtc, bool start)
+{
+ struct rzg2l_du_device *rcdu = rcrtc->dev;
+
+ writel(start ? DU_MCR0_DI_EN : 0, rcdu->mmio + DU_MCR0);
+}
+
+static void rzg2l_du_crtc_start(struct rzg2l_du_crtc *rcrtc)
+{
+ rzg2l_du_start_stop(rcrtc, true);
+}
+
+static void rzg2l_du_crtc_stop(struct rzg2l_du_crtc *rcrtc)
+{
+ struct drm_crtc *crtc = &rcrtc->crtc;
+
+ /*
+ * Disable vertical blanking interrupt reporting. We first need to wait
+ * for page flip completion before stopping the CRTC as userspace
+ * expects page flips to eventually complete.
+ */
+ rzg2l_du_crtc_wait_page_flip(rcrtc);
+ drm_crtc_vblank_off(crtc);
+
+ /* Disable the VSP compositor. */
+ rzg2l_du_vsp_disable(rcrtc);
+
+ rzg2l_du_start_stop(rcrtc, false);
+}
+
+/* -----------------------------------------------------------------------------
+ * CRTC Functions
+ */
+
+static void rzg2l_du_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+
+ rzg2l_du_crtc_get(rcrtc);
+
+ rzg2l_du_crtc_start(rcrtc);
+}
+
+static void rzg2l_du_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+
+ rzg2l_du_crtc_stop(rcrtc);
+ rzg2l_du_crtc_put(rcrtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void rzg2l_du_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+
+ WARN_ON(!crtc->state->enable);
+
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ rcrtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ rzg2l_du_vsp_atomic_flush(rcrtc);
+}
+
+static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
+ .atomic_flush = rzg2l_du_crtc_atomic_flush,
+ .atomic_enable = rzg2l_du_crtc_atomic_enable,
+ .atomic_disable = rzg2l_du_crtc_atomic_disable,
+};
+
+static struct drm_crtc_state *
+rzg2l_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct rzg2l_du_crtc_state *state;
+ struct rzg2l_du_crtc_state *copy;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = to_rzg2l_crtc_state(crtc->state);
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &copy->state);
+
+ return &copy->state;
+}
+
+static void rzg2l_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_rzg2l_crtc_state(state));
+}
+
+static void rzg2l_du_crtc_reset(struct drm_crtc *crtc)
+{
+ struct rzg2l_du_crtc_state *state;
+
+ if (crtc->state) {
+ rzg2l_du_crtc_atomic_destroy_state(crtc, crtc->state);
+ crtc->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+
+ __drm_atomic_helper_crtc_reset(crtc, &state->state);
+}
+
+static int rzg2l_du_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+
+ rcrtc->vblank_enable = true;
+
+ return 0;
+}
+
+static void rzg2l_du_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+
+ rcrtc->vblank_enable = false;
+}
+
+static const struct drm_crtc_funcs crtc_funcs_rz = {
+ .reset = rzg2l_du_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = rzg2l_du_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = rzg2l_du_crtc_atomic_destroy_state,
+ .enable_vblank = rzg2l_du_crtc_enable_vblank,
+ .disable_vblank = rzg2l_du_crtc_disable_vblank,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+int rzg2l_du_crtc_create(struct rzg2l_du_device *rcdu)
+{
+ struct rzg2l_du_crtc *rcrtc = &rcdu->crtcs[0];
+ struct drm_crtc *crtc = &rcrtc->crtc;
+ struct drm_plane *primary;
+ int ret;
+
+ rcrtc->rstc = devm_reset_control_get_shared(rcdu->dev, NULL);
+ if (IS_ERR(rcrtc->rstc)) {
+ dev_err(rcdu->dev, "can't get cpg reset\n");
+ return PTR_ERR(rcrtc->rstc);
+ }
+
+ rcrtc->rzg2l_clocks.aclk = devm_clk_get(rcdu->dev, "aclk");
+ if (IS_ERR(rcrtc->rzg2l_clocks.aclk)) {
+ dev_err(rcdu->dev, "no axi clock for DU\n");
+ return PTR_ERR(rcrtc->rzg2l_clocks.aclk);
+ }
+
+ rcrtc->rzg2l_clocks.pclk = devm_clk_get(rcdu->dev, "pclk");
+ if (IS_ERR(rcrtc->rzg2l_clocks.pclk)) {
+ dev_err(rcdu->dev, "no peripheral clock for DU\n");
+ return PTR_ERR(rcrtc->rzg2l_clocks.pclk);
+ }
+
+ rcrtc->rzg2l_clocks.dclk = devm_clk_get(rcdu->dev, "vclk");
+ if (IS_ERR(rcrtc->rzg2l_clocks.dclk)) {
+ dev_err(rcdu->dev, "no video clock for DU\n");
+ return PTR_ERR(rcrtc->rzg2l_clocks.dclk);
+ }
+
+ init_waitqueue_head(&rcrtc->flip_wait);
+ rcrtc->dev = rcdu;
+
+ primary = rzg2l_du_vsp_get_drm_plane(rcrtc, rcrtc->vsp_pipe);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = drmm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL,
+ &crtc_funcs_rz, NULL);
+ if (ret < 0)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h
new file mode 100644
index 000000000000..cbba38acc377
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit CRTCs
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_crtc.h
+ */
+
+#ifndef __RZG2L_DU_CRTC_H__
+#define __RZG2L_DU_CRTC_H__
+
+#include <linux/container_of.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_writeback.h>
+
+#include <media/vsp1.h>
+
+struct clk;
+struct reset_control;
+struct rzg2l_du_vsp;
+struct rzg2l_du_format_info;
+
+/**
+ * struct rzg2l_du_crtc - the CRTC, representing a DU superposition processor
+ * @crtc: base DRM CRTC
+ * @dev: the DU device
+ * @initialized: whether the CRTC has been initialized and clocks enabled
+ * @vblank_enable: whether vblank events are enabled on this CRTC
+ * @event: event to post when the pending page flip completes
+ * @flip_wait: wait queue used to signal page flip completion
+ * @vsp: VSP feeding video to this CRTC
+ * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
+ * @rstc: reset controller
+ * @rzg2l_clocks: the bus, main and video clock
+ */
+struct rzg2l_du_crtc {
+ struct drm_crtc crtc;
+
+ struct rzg2l_du_device *dev;
+ bool initialized;
+
+ bool vblank_enable;
+ struct drm_pending_vblank_event *event;
+ wait_queue_head_t flip_wait;
+
+ struct rzg2l_du_vsp *vsp;
+ unsigned int vsp_pipe;
+
+ const char *const *sources;
+ unsigned int sources_count;
+
+ struct reset_control *rstc;
+ struct {
+ struct clk *aclk;
+ struct clk *pclk;
+ struct clk *dclk;
+ } rzg2l_clocks;
+};
+
+static inline struct rzg2l_du_crtc *to_rzg2l_crtc(struct drm_crtc *c)
+{
+ return container_of(c, struct rzg2l_du_crtc, crtc);
+}
+
+/**
+ * struct rzg2l_du_crtc_state - Driver-specific CRTC state
+ * @state: base DRM CRTC state
+ * @outputs: bitmask of the outputs (enum rzg2l_du_output) driven by this CRTC
+ */
+struct rzg2l_du_crtc_state {
+ struct drm_crtc_state state;
+ unsigned int outputs;
+};
+
+static inline struct rzg2l_du_crtc_state *to_rzg2l_crtc_state(struct drm_crtc_state *s)
+{
+ return container_of(s, struct rzg2l_du_crtc_state, state);
+}
+
+int rzg2l_du_crtc_create(struct rzg2l_du_device *rcdu);
+
+void rzg2l_du_crtc_finish_page_flip(struct rzg2l_du_crtc *rcrtc);
+
+#endif /* __RZG2L_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
new file mode 100644
index 000000000000..470d34da1d6c
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit DRM driver
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_drv.c
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_kms.h"
+
+/* -----------------------------------------------------------------------------
+ * Device Information
+ */
+
+static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
+ .channels_mask = BIT(0),
+ .routes = {
+ [RZG2L_DU_OUTPUT_DSI0] = {
+ .possible_outputs = BIT(0),
+ .port = 0,
+ },
+ [RZG2L_DU_OUTPUT_DPAD0] = {
+ .possible_outputs = BIT(0),
+ .port = 1,
+ }
+ }
+};
+
+static const struct of_device_id rzg2l_du_of_table[] = {
+ { .compatible = "renesas,r9a07g044-du", .data = &rzg2l_du_r9a07g044_info },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rzg2l_du_of_table);
+
+const char *rzg2l_du_output_name(enum rzg2l_du_output output)
+{
+ static const char * const names[] = {
+ [RZG2L_DU_OUTPUT_DSI0] = "DSI0",
+ [RZG2L_DU_OUTPUT_DPAD0] = "DPAD0"
+ };
+
+ if (output >= ARRAY_SIZE(names))
+ return "UNKNOWN";
+
+ return names[output];
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM operations
+ */
+
+DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops);
+
+static const struct drm_driver rzg2l_du_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .dumb_create = rzg2l_du_dumb_create,
+ .fops = &rzg2l_du_fops,
+ .name = "rzg2l-du",
+ .desc = "Renesas RZ/G2L Display Unit",
+ .date = "20230410",
+ .major = 1,
+ .minor = 0,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform driver
+ */
+
+static void rzg2l_du_remove(struct platform_device *pdev)
+{
+ struct rzg2l_du_device *rcdu = platform_get_drvdata(pdev);
+ struct drm_device *ddev = &rcdu->ddev;
+
+ drm_dev_unregister(ddev);
+ drm_atomic_helper_shutdown(ddev);
+
+ drm_kms_helper_poll_fini(ddev);
+}
+
+static void rzg2l_du_shutdown(struct platform_device *pdev)
+{
+ struct rzg2l_du_device *rcdu = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(&rcdu->ddev);
+}
+
+static int rzg2l_du_probe(struct platform_device *pdev)
+{
+ struct rzg2l_du_device *rcdu;
+ int ret;
+
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ /* Allocate and initialize the RZ/G2L device structure. */
+ rcdu = devm_drm_dev_alloc(&pdev->dev, &rzg2l_du_driver,
+ struct rzg2l_du_device, ddev);
+ if (IS_ERR(rcdu))
+ return PTR_ERR(rcdu);
+
+ rcdu->dev = &pdev->dev;
+ rcdu->info = of_device_get_match_data(rcdu->dev);
+
+ platform_set_drvdata(pdev, rcdu);
+
+ /* I/O resources */
+ rcdu->mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rcdu->mmio))
+ return PTR_ERR(rcdu->mmio);
+
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ /* DRM/KMS objects */
+ ret = rzg2l_du_modeset_init(rcdu);
+ if (ret < 0) {
+ /*
+ * Don't use dev_err_probe(), as it would overwrite the probe
+ * deferral reason recorded in rzg2l_du_modeset_init().
+ */
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to initialize DRM/KMS (%d)\n", ret);
+ goto error;
+ }
+
+ /*
+ * Register the DRM device with the core and the connectors with
+ * sysfs.
+ */
+ ret = drm_dev_register(&rcdu->ddev, 0);
+ if (ret)
+ goto error;
+
+ drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev));
+
+ drm_fbdev_generic_setup(&rcdu->ddev, 32);
+
+ return 0;
+
+error:
+ drm_kms_helper_poll_fini(&rcdu->ddev);
+ return ret;
+}
+
+static struct platform_driver rzg2l_du_platform_driver = {
+ .probe = rzg2l_du_probe,
+ .remove_new = rzg2l_du_remove,
+ .shutdown = rzg2l_du_shutdown,
+ .driver = {
+ .name = "rzg2l-du",
+ .of_match_table = rzg2l_du_of_table,
+ },
+};
+
+module_platform_driver(rzg2l_du_platform_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/G2L Display Unit DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h
new file mode 100644
index 000000000000..58806c2a8f2b
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit DRM driver
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_drv.h
+ */
+
+#ifndef __RZG2L_DU_DRV_H__
+#define __RZG2L_DU_DRV_H__
+
+#include <linux/kernel.h>
+
+#include <drm/drm_device.h>
+
+#include "rzg2l_du_crtc.h"
+#include "rzg2l_du_vsp.h"
+
+struct device;
+struct drm_property;
+
+enum rzg2l_du_output {
+ RZG2L_DU_OUTPUT_DSI0,
+ RZG2L_DU_OUTPUT_DPAD0,
+ RZG2L_DU_OUTPUT_MAX,
+};
+
+/*
+ * struct rzg2l_du_output_routing - Output routing specification
+ * @possible_outputs: bitmask of possible outputs
+ * @port: device tree port number corresponding to this output route
+ *
+ * The DU has 2 possible outputs (DPAD0, DSI0). Output routing data
+ * specify the valid SoC outputs, which CRTC can drive the output, and the type
+ * of in-SoC encoder for the output.
+ */
+struct rzg2l_du_output_routing {
+ unsigned int possible_outputs;
+ unsigned int port;
+};
+
+/*
+ * struct rzg2l_du_device_info - DU model-specific information
+ * @channels_mask: bit mask of available DU channels
+ * @routes: array of CRTC to output routes, indexed by output (RZG2L_DU_OUTPUT_*)
+ */
+struct rzg2l_du_device_info {
+ unsigned int channels_mask;
+ struct rzg2l_du_output_routing routes[RZG2L_DU_OUTPUT_MAX];
+};
+
+#define RZG2L_DU_MAX_CRTCS 1
+#define RZG2L_DU_MAX_VSPS 1
+#define RZG2L_DU_MAX_DSI 1
+
+struct rzg2l_du_device {
+ struct device *dev;
+ const struct rzg2l_du_device_info *info;
+
+ void __iomem *mmio;
+
+ struct drm_device ddev;
+
+ struct rzg2l_du_crtc crtcs[RZG2L_DU_MAX_CRTCS];
+ unsigned int num_crtcs;
+
+ struct rzg2l_du_vsp vsps[RZG2L_DU_MAX_VSPS];
+};
+
+static inline struct rzg2l_du_device *to_rzg2l_du_device(struct drm_device *dev)
+{
+ return container_of(dev, struct rzg2l_du_device, ddev);
+}
+
+const char *rzg2l_du_output_name(enum rzg2l_du_output output);
+
+#endif /* __RZG2L_DU_DRV_H__ */
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
new file mode 100644
index 000000000000..339cbaaea0b5
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit Encoder
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_encoder.c
+ */
+
+#include <linux/export.h>
+#include <linux/of.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_panel.h>
+
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_encoder.h"
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = {
+};
+
+int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
+ enum rzg2l_du_output output,
+ struct device_node *enc_node)
+{
+ struct rzg2l_du_encoder *renc;
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+ int ret;
+
+ /* Locate the DRM bridge from the DT node. */
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+
+ dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n",
+ enc_node, rzg2l_du_output_name(output));
+
+ renc = drmm_encoder_alloc(&rcdu->ddev, struct rzg2l_du_encoder, base,
+ &rzg2l_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
+ NULL);
+ if (IS_ERR(renc))
+ return PTR_ERR(renc);
+
+ renc->output = output;
+
+ /* Attach the bridge to the encoder. */
+ ret = drm_bridge_attach(&renc->base, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(rcdu->dev,
+ "failed to attach bridge %pOF for output %s (%d)\n",
+ bridge->of_node, rzg2l_du_output_name(output), ret);
+ return ret;
+ }
+
+ /* Create the connector for the chain of bridges. */
+ connector = drm_bridge_connector_init(&rcdu->ddev, &renc->base);
+ if (IS_ERR(connector)) {
+ dev_err(rcdu->dev,
+ "failed to created connector for output %s (%ld)\n",
+ rzg2l_du_output_name(output), PTR_ERR(connector));
+ return PTR_ERR(connector);
+ }
+
+ return drm_connector_attach_encoder(connector, &renc->base);
+}
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h
new file mode 100644
index 000000000000..3e430c1f6132
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit Encoder
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_encoder.h
+ */
+
+#ifndef __RZG2L_DU_ENCODER_H__
+#define __RZG2L_DU_ENCODER_H__
+
+#include <drm/drm_encoder.h>
+#include <linux/container_of.h>
+
+struct rzg2l_du_device;
+
+struct rzg2l_du_encoder {
+ struct drm_encoder base;
+ enum rzg2l_du_output output;
+};
+
+static inline struct rzg2l_du_encoder *to_rzg2l_encoder(struct drm_encoder *e)
+{
+ return container_of(e, struct rzg2l_du_encoder, base);
+}
+
+int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
+ enum rzg2l_du_output output,
+ struct device_node *enc_node);
+
+#endif /* __RZG2L_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
new file mode 100644
index 000000000000..07b312b6f81e
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit Mode Setting
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_kms.c
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "rzg2l_du_crtc.h"
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_encoder.h"
+#include "rzg2l_du_kms.h"
+#include "rzg2l_du_vsp.h"
+
+/* -----------------------------------------------------------------------------
+ * Format helpers
+ */
+
+static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = {
+ {
+ .fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
+ .bpp = 32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
+ .bpp = 32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGB888,
+ .v4l2 = V4L2_PIX_FMT_BGR24,
+ .bpp = 24,
+ .planes = 1,
+ .hsub = 1,
+ }
+};
+
+const struct rzg2l_du_format_info *rzg2l_du_format_info(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rzg2l_du_format_infos); ++i) {
+ if (rzg2l_du_format_infos[i].fourcc == fourcc)
+ return &rzg2l_du_format_infos[i];
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame buffer
+ */
+
+int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ unsigned int align = 16 * args->bpp / 8;
+
+ args->pitch = roundup(min_pitch, align);
+
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
+}
+
+static struct drm_framebuffer *
+rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct rzg2l_du_format_info *format;
+ unsigned int max_pitch;
+
+ format = rzg2l_du_format_info(mode_cmd->pixel_format);
+ if (!format) {
+ dev_dbg(dev->dev, "unsupported pixel format %p4cc\n",
+ &mode_cmd->pixel_format);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * On RZ/G2L the memory interface is handled by the VSP that limits the
+ * pitch to 65535 bytes.
+ */
+ max_pitch = 65535;
+ if (mode_cmd->pitches[0] > max_pitch) {
+ dev_dbg(dev->dev, "invalid pitch value %u\n",
+ mode_cmd->pitches[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static const struct drm_mode_config_helper_funcs rzg2l_du_mode_config_helper = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static const struct drm_mode_config_funcs rzg2l_du_mode_config_funcs = {
+ .fb_create = rzg2l_du_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int rzg2l_du_encoders_init_one(struct rzg2l_du_device *rcdu,
+ enum rzg2l_du_output output,
+ struct of_endpoint *ep)
+{
+ struct device_node *entity;
+ int ret;
+
+ /* Locate the connected entity and initialize the encoder. */
+ entity = of_graph_get_remote_port_parent(ep->local_node);
+ if (!entity) {
+ dev_dbg(rcdu->dev, "unconnected endpoint %pOF, skipping\n",
+ ep->local_node);
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(entity)) {
+ dev_dbg(rcdu->dev,
+ "connected entity %pOF is disabled, skipping\n",
+ entity);
+ of_node_put(entity);
+ return -ENODEV;
+ }
+
+ ret = rzg2l_du_encoder_init(rcdu, output, entity);
+ if (ret && ret != -EPROBE_DEFER && ret != -ENOLINK)
+ dev_warn(rcdu->dev,
+ "failed to initialize encoder %pOF on output %s (%d), skipping\n",
+ entity, rzg2l_du_output_name(output), ret);
+
+ of_node_put(entity);
+
+ return ret;
+}
+
+static int rzg2l_du_encoders_init(struct rzg2l_du_device *rcdu)
+{
+ struct device_node *np = rcdu->dev->of_node;
+ struct device_node *ep_node;
+ unsigned int num_encoders = 0;
+
+ /*
+ * Iterate over the endpoints and create one encoder for each output
+ * pipeline.
+ */
+ for_each_endpoint_of_node(np, ep_node) {
+ enum rzg2l_du_output output;
+ struct of_endpoint ep;
+ unsigned int i;
+ int ret;
+
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ if (ret < 0) {
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ /* Find the output route corresponding to the port number. */
+ for (i = 0; i < RZG2L_DU_OUTPUT_MAX; ++i) {
+ if (rcdu->info->routes[i].port == ep.port) {
+ output = i;
+ break;
+ }
+ }
+
+ if (i == RZG2L_DU_OUTPUT_MAX) {
+ dev_warn(rcdu->dev,
+ "port %u references unexisting output, skipping\n",
+ ep.port);
+ continue;
+ }
+
+ /* Process the output pipeline. */
+ ret = rzg2l_du_encoders_init_one(rcdu, output, &ep);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ continue;
+ }
+
+ num_encoders++;
+ }
+
+ return num_encoders;
+}
+
+static int rzg2l_du_vsps_init(struct rzg2l_du_device *rcdu)
+{
+ const struct device_node *np = rcdu->dev->of_node;
+ const char *vsps_prop_name = "renesas,vsps";
+ struct of_phandle_args args;
+ struct {
+ struct device_node *np;
+ unsigned int crtcs_mask;
+ } vsps[RZG2L_DU_MAX_VSPS] = { { NULL, }, };
+ unsigned int vsps_count = 0;
+ unsigned int cells;
+ unsigned int i;
+ int ret;
+
+ /*
+ * First parse the DT vsps property to populate the list of VSPs. Each
+ * entry contains a pointer to the VSP DT node and a bitmask of the
+ * connected DU CRTCs.
+ */
+ ret = of_property_count_u32_elems(np, vsps_prop_name);
+ cells = ret / rcdu->num_crtcs - 1;
+ if (cells != 1)
+ return -EINVAL;
+
+ for (i = 0; i < rcdu->num_crtcs; ++i) {
+ unsigned int j;
+
+ ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
+ cells, i, &args);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * Add the VSP to the list or update the corresponding existing
+ * entry if the VSP has already been added.
+ */
+ for (j = 0; j < vsps_count; ++j) {
+ if (vsps[j].np == args.np)
+ break;
+ }
+
+ if (j < vsps_count)
+ of_node_put(args.np);
+ else
+ vsps[vsps_count++].np = args.np;
+
+ vsps[j].crtcs_mask |= BIT(i);
+
+ /*
+ * Store the VSP pointer and pipe index in the CRTC. If the
+ * second cell of the 'renesas,vsps' specifier isn't present,
+ * default to 0.
+ */
+ rcdu->crtcs[i].vsp = &rcdu->vsps[j];
+ rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
+ }
+
+ /*
+ * Then initialize all the VSPs from the node pointers and CRTCs bitmask
+ * computed previously.
+ */
+ for (i = 0; i < vsps_count; ++i) {
+ struct rzg2l_du_vsp *vsp = &rcdu->vsps[i];
+
+ vsp->index = i;
+ vsp->dev = rcdu;
+
+ ret = rzg2l_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
+ if (ret)
+ goto done;
+ }
+
+done:
+ for (i = 0; i < ARRAY_SIZE(vsps); ++i)
+ of_node_put(vsps[i].np);
+
+ return ret;
+}
+
+int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu)
+{
+ struct drm_device *dev = &rcdu->ddev;
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.normalize_zpos = true;
+ dev->mode_config.funcs = &rzg2l_du_mode_config_funcs;
+ dev->mode_config.helper_private = &rzg2l_du_mode_config_helper;
+
+ /*
+ * The RZ DU uses the VSP1 for memory access, and is limited
+ * to frame sizes of 1920x1080.
+ */
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 1080;
+
+ rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
+
+ /*
+ * Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, rcdu->num_crtcs);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the compositors. */
+ ret = rzg2l_du_vsps_init(rcdu);
+ if (ret < 0)
+ return ret;
+
+ /* Create the CRTCs. */
+ ret = rzg2l_du_crtc_create(rcdu);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the encoders. */
+ ret = rzg2l_du_encoders_init(rcdu);
+ if (ret < 0)
+ return dev_err_probe(rcdu->dev, ret,
+ "failed to initialize encoders\n");
+
+ if (ret == 0) {
+ dev_err(rcdu->dev, "error: no encoder could be initialized\n");
+ return -EINVAL;
+ }
+
+ num_encoders = ret;
+
+ /*
+ * Set the possible CRTCs and possible clones. There's always at least
+ * one way for all encoders to clone each other, set all bits in the
+ * possible clones field.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct rzg2l_du_encoder *renc = to_rzg2l_encoder(encoder);
+ const struct rzg2l_du_output_routing *route =
+ &rcdu->info->routes[renc->output];
+
+ encoder->possible_crtcs = route->possible_outputs;
+ encoder->possible_clones = (1 << num_encoders) - 1;
+ }
+
+ drm_mode_config_reset(dev);
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
new file mode 100644
index 000000000000..876e97cfbf45
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit Mode Setting
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_kms.h
+ */
+
+#ifndef __RZG2L_DU_KMS_H__
+#define __RZG2L_DU_KMS_H__
+
+#include <linux/types.h>
+
+struct dma_buf_attachment;
+struct drm_file;
+struct drm_device;
+struct drm_gem_object;
+struct drm_mode_create_dumb;
+struct rzg2l_du_device;
+struct sg_table;
+
+struct rzg2l_du_format_info {
+ u32 fourcc;
+ u32 v4l2;
+ unsigned int bpp;
+ unsigned int planes;
+ unsigned int hsub;
+};
+
+const struct rzg2l_du_format_info *rzg2l_du_format_info(u32 fourcc);
+
+int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu);
+
+int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+struct drm_gem_object *
+rzg2l_du_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+#endif /* __RZG2L_DU_KMS_H__ */
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
new file mode 100644
index 000000000000..0ae6331d6430
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit VSP-Based Compositor
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_vsp.c
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include <media/vsp1.h>
+
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_kms.h"
+#include "rzg2l_du_vsp.h"
+
+static void rzg2l_du_vsp_complete(void *private, unsigned int status, u32 crc)
+{
+ struct rzg2l_du_crtc *crtc = private;
+
+ if (crtc->vblank_enable)
+ drm_crtc_handle_vblank(&crtc->crtc);
+
+ if (status & VSP1_DU_STATUS_COMPLETE)
+ rzg2l_du_crtc_finish_page_flip(crtc);
+
+ drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc);
+}
+
+void rzg2l_du_vsp_enable(struct rzg2l_du_crtc *crtc)
+{
+ const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
+ struct vsp1_du_lif_config cfg = {
+ .width = mode->hdisplay,
+ .height = mode->vdisplay,
+ .interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE,
+ .callback = rzg2l_du_vsp_complete,
+ .callback_data = crtc,
+ };
+
+ vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
+}
+
+void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc)
+{
+ vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, NULL);
+}
+
+void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc)
+{
+ struct vsp1_du_atomic_pipe_config cfg = { { 0, } };
+ struct rzg2l_du_crtc_state *state;
+
+ state = to_rzg2l_crtc_state(crtc->crtc.state);
+
+ vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
+}
+
+struct drm_plane *rzg2l_du_vsp_get_drm_plane(struct rzg2l_du_crtc *crtc,
+ unsigned int pipe_index)
+{
+ struct rzg2l_du_device *rcdu = crtc->vsp->dev;
+ struct drm_plane *plane = NULL;
+
+ drm_for_each_plane(plane, &rcdu->ddev) {
+ struct rzg2l_du_vsp_plane *vsp_plane = to_rzg2l_vsp_plane(plane);
+
+ if (vsp_plane->index == pipe_index)
+ break;
+ }
+
+ return plane ? plane : ERR_PTR(-EINVAL);
+}
+
+static const u32 rzg2l_du_vsp_formats[] = {
+ DRM_FORMAT_RGB332,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU444,
+};
+
+static void rzg2l_du_vsp_plane_setup(struct rzg2l_du_vsp_plane *plane)
+{
+ struct rzg2l_du_vsp_plane_state *state =
+ to_rzg2l_vsp_plane_state(plane->plane.state);
+ struct rzg2l_du_crtc *crtc = to_rzg2l_crtc(state->state.crtc);
+ struct drm_framebuffer *fb = plane->plane.state->fb;
+ const struct rzg2l_du_format_info *format;
+ struct vsp1_du_atomic_config cfg = {
+ .pixelformat = 0,
+ .pitch = fb->pitches[0],
+ .alpha = state->state.alpha >> 8,
+ .zpos = state->state.zpos,
+ };
+ u32 fourcc = state->format->fourcc;
+ unsigned int i;
+
+ cfg.src.left = state->state.src.x1 >> 16;
+ cfg.src.top = state->state.src.y1 >> 16;
+ cfg.src.width = drm_rect_width(&state->state.src) >> 16;
+ cfg.src.height = drm_rect_height(&state->state.src) >> 16;
+
+ cfg.dst.left = state->state.dst.x1;
+ cfg.dst.top = state->state.dst.y1;
+ cfg.dst.width = drm_rect_width(&state->state.dst);
+ cfg.dst.height = drm_rect_height(&state->state.dst);
+
+ for (i = 0; i < state->format->planes; ++i) {
+ struct drm_gem_dma_object *gem;
+
+ gem = drm_fb_dma_get_gem_obj(fb, i);
+ cfg.mem[i] = gem->dma_addr + fb->offsets[i];
+ }
+
+ if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ fourcc = DRM_FORMAT_XRGB1555;
+ break;
+
+ case DRM_FORMAT_ARGB4444:
+ fourcc = DRM_FORMAT_XRGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ fourcc = DRM_FORMAT_XRGB8888;
+ break;
+ }
+ }
+
+ format = rzg2l_du_format_info(fourcc);
+ cfg.pixelformat = format->v4l2;
+
+ cfg.premult = state->state.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI;
+
+ vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
+ plane->index, &cfg);
+}
+
+static int __rzg2l_du_vsp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ const struct rzg2l_du_format_info **format)
+{
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!state->crtc) {
+ /*
+ * The visible field is not reset by the DRM core but only
+ * updated by drm_atomic_helper_check_plane_state, set it
+ * manually.
+ */
+ state->visible = false;
+ *format = NULL;
+ return 0;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+ if (ret < 0)
+ return ret;
+
+ if (!state->visible) {
+ *format = NULL;
+ return 0;
+ }
+
+ *format = rzg2l_du_format_info(state->fb->format->format);
+
+ return 0;
+}
+
+static int rzg2l_du_vsp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct rzg2l_du_vsp_plane_state *rstate = to_rzg2l_vsp_plane_state(new_plane_state);
+
+ return __rzg2l_du_vsp_plane_atomic_check(plane, new_plane_state, &rstate->format);
+}
+
+static void rzg2l_du_vsp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct rzg2l_du_vsp_plane *rplane = to_rzg2l_vsp_plane(plane);
+ struct rzg2l_du_crtc *crtc = to_rzg2l_crtc(old_state->crtc);
+
+ if (new_state->visible)
+ rzg2l_du_vsp_plane_setup(rplane);
+ else if (old_state->crtc)
+ vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe,
+ rplane->index, NULL);
+}
+
+static const struct drm_plane_helper_funcs rzg2l_du_vsp_plane_helper_funcs = {
+ .atomic_check = rzg2l_du_vsp_plane_atomic_check,
+ .atomic_update = rzg2l_du_vsp_plane_atomic_update,
+};
+
+static struct drm_plane_state *
+rzg2l_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct rzg2l_du_vsp_plane_state *copy;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->state);
+
+ return &copy->state;
+}
+
+static void rzg2l_du_vsp_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_rzg2l_vsp_plane_state(state));
+}
+
+static void rzg2l_du_vsp_plane_reset(struct drm_plane *plane)
+{
+ struct rzg2l_du_vsp_plane_state *state;
+
+ if (plane->state) {
+ rzg2l_du_vsp_plane_atomic_destroy_state(plane, plane->state);
+ plane->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+
+ __drm_atomic_helper_plane_reset(plane, &state->state);
+}
+
+static const struct drm_plane_funcs rzg2l_du_vsp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = rzg2l_du_vsp_plane_reset,
+ .atomic_duplicate_state = rzg2l_du_vsp_plane_atomic_duplicate_state,
+ .atomic_destroy_state = rzg2l_du_vsp_plane_atomic_destroy_state,
+};
+
+static void rzg2l_du_vsp_cleanup(struct drm_device *dev, void *res)
+{
+ struct rzg2l_du_vsp *vsp = res;
+
+ put_device(vsp->vsp);
+}
+
+int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
+ unsigned int crtcs)
+{
+ struct rzg2l_du_device *rcdu = vsp->dev;
+ struct platform_device *pdev;
+ unsigned int num_crtcs = hweight32(crtcs);
+ unsigned int num_planes = 2;
+ unsigned int i;
+ int ret;
+
+ /* Find the VSP device and initialize it. */
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return -ENXIO;
+
+ vsp->vsp = &pdev->dev;
+
+ ret = drmm_add_action_or_reset(&rcdu->ddev, rzg2l_du_vsp_cleanup, vsp);
+ if (ret < 0)
+ return ret;
+
+ ret = vsp1_du_init(vsp->vsp);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_planes; ++i) {
+ enum drm_plane_type type = i < num_crtcs
+ ? DRM_PLANE_TYPE_PRIMARY
+ : DRM_PLANE_TYPE_OVERLAY;
+ struct rzg2l_du_vsp_plane *plane;
+
+ plane = drmm_universal_plane_alloc(&rcdu->ddev, struct rzg2l_du_vsp_plane,
+ plane, crtcs, &rzg2l_du_vsp_plane_funcs,
+ rzg2l_du_vsp_formats,
+ ARRAY_SIZE(rzg2l_du_vsp_formats),
+ NULL, type, NULL);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
+
+ plane->vsp = vsp;
+ plane->index = i;
+
+ drm_plane_helper_add(&plane->plane,
+ &rzg2l_du_vsp_plane_helper_funcs);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h
new file mode 100644
index 000000000000..322eb80dcbaf
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit VSP-Based Compositor
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_vsp.h
+ */
+
+#ifndef __RZG2L_DU_VSP_H__
+#define __RZG2L_DU_VSP_H__
+
+#include <drm/drm_plane.h>
+#include <linux/container_of.h>
+#include <linux/scatterlist.h>
+
+struct device;
+struct drm_framebuffer;
+struct rzg2l_du_device;
+struct rzg2l_du_format_info;
+struct rzg2l_du_vsp;
+
+struct rzg2l_du_vsp_plane {
+ struct drm_plane plane;
+ struct rzg2l_du_vsp *vsp;
+ unsigned int index;
+};
+
+struct rzg2l_du_vsp {
+ unsigned int index;
+ struct device *vsp;
+ struct rzg2l_du_device *dev;
+};
+
+static inline struct rzg2l_du_vsp_plane *to_rzg2l_vsp_plane(struct drm_plane *p)
+{
+ return container_of(p, struct rzg2l_du_vsp_plane, plane);
+}
+
+/**
+ * struct rzg2l_du_vsp_plane_state - Driver-specific plane state
+ * @state: base DRM plane state
+ * @format: information about the pixel format used by the plane
+ */
+struct rzg2l_du_vsp_plane_state {
+ struct drm_plane_state state;
+
+ const struct rzg2l_du_format_info *format;
+};
+
+static inline struct rzg2l_du_vsp_plane_state *
+to_rzg2l_vsp_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct rzg2l_du_vsp_plane_state, state);
+}
+
+#if IS_ENABLED(CONFIG_VIDEO_RENESAS_VSP1)
+int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
+ unsigned int crtcs);
+void rzg2l_du_vsp_enable(struct rzg2l_du_crtc *crtc);
+void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc);
+void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc);
+struct drm_plane *rzg2l_du_vsp_get_drm_plane(struct rzg2l_du_crtc *crtc,
+ unsigned int pipe_index);
+#else
+static inline int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
+ unsigned int crtcs)
+{
+ return -ENXIO;
+}
+
+static inline void rzg2l_du_vsp_enable(struct rzg2l_du_crtc *crtc) { };
+static inline void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc) { };
+static inline void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc) { };
+static inline struct drm_plane *rzg2l_du_vsp_get_drm_plane(struct rzg2l_du_crtc *crtc,
+ unsigned int pipe_index)
+{
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
+#endif /* __RZG2L_DU_VSP_H__ */
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index bd08d57486fe..7069a3d4d581 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -343,6 +343,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
return ret;
}
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dp->encoder,
+ dev->of_node, 0, 0);
+
dp->plat_data.encoder = &dp->encoder.encoder;
ret = analogix_dp_bind(dp->adp, drm_dev);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index e6fbe040ccf6..1d2261643743 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -10,12 +10,12 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
-#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
@@ -26,12 +26,17 @@
#include "inno_hdmi.h"
-struct hdmi_data_info {
- int vic;
- bool sink_has_audio;
- unsigned int enc_in_format;
- unsigned int enc_out_format;
- unsigned int colorimetry;
+#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
+
+struct inno_hdmi_phy_config {
+ unsigned long pixelclock;
+ u8 pre_emphasis;
+ u8 voltage_level_control;
+};
+
+struct inno_hdmi_variant {
+ struct inno_hdmi_phy_config *phy_configs;
+ struct inno_hdmi_phy_config *default_phy_config;
};
struct inno_hdmi_i2c {
@@ -46,10 +51,9 @@ struct inno_hdmi_i2c {
struct inno_hdmi {
struct device *dev;
- struct drm_device *drm_dev;
- int irq;
struct clk *pclk;
+ struct clk *refclk;
void __iomem *regs;
struct drm_connector connector;
@@ -58,10 +62,14 @@ struct inno_hdmi {
struct inno_hdmi_i2c *i2c;
struct i2c_adapter *ddc;
- unsigned int tmds_rate;
+ const struct inno_hdmi_variant *variant;
+};
- struct hdmi_data_info hdmi_data;
- struct drm_display_mode previous_mode;
+struct inno_hdmi_connector_state {
+ struct drm_connector_state base;
+ unsigned int enc_out_format;
+ unsigned int colorimetry;
+ bool rgb_limited_range;
};
static struct inno_hdmi *encoder_to_inno_hdmi(struct drm_encoder *encoder)
@@ -76,10 +84,10 @@ static struct inno_hdmi *connector_to_inno_hdmi(struct drm_connector *connector)
return container_of(connector, struct inno_hdmi, connector);
}
+#define to_inno_hdmi_conn_state(conn_state) \
+ container_of_const(conn_state, struct inno_hdmi_connector_state, base)
+
enum {
- CSC_ITU601_16_235_TO_RGB_0_255_8BIT,
- CSC_ITU601_0_255_TO_RGB_0_255_8BIT,
- CSC_ITU709_16_235_TO_RGB_0_255_8BIT,
CSC_RGB_0_255_TO_ITU601_16_235_8BIT,
CSC_RGB_0_255_TO_ITU709_16_235_8BIT,
CSC_RGB_0_255_TO_RGB_16_235_8BIT,
@@ -87,40 +95,6 @@ enum {
static const char coeff_csc[][24] = {
/*
- * YUV2RGB:601 SD mode(Y[16:235], UV[16:240], RGB[0:255]):
- * R = 1.164*Y + 1.596*V - 204
- * G = 1.164*Y - 0.391*U - 0.813*V + 154
- * B = 1.164*Y + 2.018*U - 258
- */
- {
- 0x04, 0xa7, 0x00, 0x00, 0x06, 0x62, 0x02, 0xcc,
- 0x04, 0xa7, 0x11, 0x90, 0x13, 0x40, 0x00, 0x9a,
- 0x04, 0xa7, 0x08, 0x12, 0x00, 0x00, 0x03, 0x02
- },
- /*
- * YUV2RGB:601 SD mode(YUV[0:255],RGB[0:255]):
- * R = Y + 1.402*V - 248
- * G = Y - 0.344*U - 0.714*V + 135
- * B = Y + 1.772*U - 227
- */
- {
- 0x04, 0x00, 0x00, 0x00, 0x05, 0x9b, 0x02, 0xf8,
- 0x04, 0x00, 0x11, 0x60, 0x12, 0xdb, 0x00, 0x87,
- 0x04, 0x00, 0x07, 0x16, 0x00, 0x00, 0x02, 0xe3
- },
- /*
- * YUV2RGB:709 HD mode(Y[16:235],UV[16:240],RGB[0:255]):
- * R = 1.164*Y + 1.793*V - 248
- * G = 1.164*Y - 0.213*U - 0.534*V + 77
- * B = 1.164*Y + 2.115*U - 289
- */
- {
- 0x04, 0xa7, 0x00, 0x00, 0x07, 0x2c, 0x02, 0xf8,
- 0x04, 0xa7, 0x10, 0xda, 0x12, 0x22, 0x00, 0x4d,
- 0x04, 0xa7, 0x08, 0x74, 0x00, 0x00, 0x03, 0x21
- },
-
- /*
* RGB2YUV:601 SD mode:
* Cb = -0.291G - 0.148R + 0.439B + 128
* Y = 0.504G + 0.257R + 0.098B + 16
@@ -155,6 +129,36 @@ static const char coeff_csc[][24] = {
},
};
+static struct inno_hdmi_phy_config rk3036_hdmi_phy_configs[] = {
+ { 74250000, 0x3f, 0xbb },
+ { 165000000, 0x6f, 0xbb },
+ { ~0UL, 0x00, 0x00 }
+};
+
+static struct inno_hdmi_phy_config rk3128_hdmi_phy_configs[] = {
+ { 74250000, 0x3f, 0xaa },
+ { 165000000, 0x5f, 0xaa },
+ { ~0UL, 0x00, 0x00 }
+};
+
+static int inno_hdmi_find_phy_config(struct inno_hdmi *hdmi,
+ unsigned long pixelclk)
+{
+ const struct inno_hdmi_phy_config *phy_configs =
+ hdmi->variant->phy_configs;
+ int i;
+
+ for (i = 0; phy_configs[i].pixelclock != ~0UL; i++) {
+ if (pixelclk <= phy_configs[i].pixelclock)
+ return i;
+ }
+
+ DRM_DEV_DEBUG(hdmi->dev, "No phy configuration for pixelclock %lu\n",
+ pixelclk);
+
+ return -EINVAL;
+}
+
static inline u8 hdmi_readb(struct inno_hdmi *hdmi, u16 offset)
{
return readl_relaxed(hdmi->regs + (offset) * 0x04);
@@ -174,11 +178,11 @@ static inline void hdmi_modb(struct inno_hdmi *hdmi, u16 offset,
hdmi_writeb(hdmi, offset, temp);
}
-static void inno_hdmi_i2c_init(struct inno_hdmi *hdmi)
+static void inno_hdmi_i2c_init(struct inno_hdmi *hdmi, unsigned long long rate)
{
- int ddc_bus_freq;
+ unsigned long long ddc_bus_freq = rate >> 2;
- ddc_bus_freq = (hdmi->tmds_rate >> 2) / HDMI_SCL_RATE;
+ do_div(ddc_bus_freq, HDMI_SCL_RATE);
hdmi_writeb(hdmi, DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF);
hdmi_writeb(hdmi, DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF);
@@ -196,38 +200,44 @@ static void inno_hdmi_sys_power(struct inno_hdmi *hdmi, bool enable)
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_OFF);
}
-static void inno_hdmi_set_pwr_mode(struct inno_hdmi *hdmi, int mode)
+static void inno_hdmi_standby(struct inno_hdmi *hdmi)
{
- switch (mode) {
- case NORMAL:
- inno_hdmi_sys_power(hdmi, false);
+ inno_hdmi_sys_power(hdmi, false);
- hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x6f);
- hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0xbb);
+ hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+};
- hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
- hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x14);
- hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x10);
- hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x0f);
- hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x00);
- hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x01);
+static void inno_hdmi_power_up(struct inno_hdmi *hdmi,
+ unsigned long mpixelclock)
+{
+ struct inno_hdmi_phy_config *phy_config;
+ int ret = inno_hdmi_find_phy_config(hdmi, mpixelclock);
- inno_hdmi_sys_power(hdmi, true);
- break;
+ if (ret < 0) {
+ phy_config = hdmi->variant->default_phy_config;
+ DRM_DEV_ERROR(hdmi->dev,
+ "Using default phy configuration for TMDS rate %lu",
+ mpixelclock);
+ } else {
+ phy_config = &hdmi->variant->phy_configs[ret];
+ }
- case LOWER_PWR:
- inno_hdmi_sys_power(hdmi, false);
- hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0x00);
- hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x00);
- hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x00);
- hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+ inno_hdmi_sys_power(hdmi, false);
- break;
+ hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, phy_config->pre_emphasis);
+ hdmi_writeb(hdmi, HDMI_PHY_DRIVER, phy_config->voltage_level_control);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x14);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x10);
+ hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x0f);
+ hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x01);
- default:
- DRM_DEV_ERROR(hdmi->dev, "Unknown power mode %d\n", mode);
- }
-}
+ inno_hdmi_sys_power(hdmi, true);
+};
static void inno_hdmi_reset(struct inno_hdmi *hdmi)
{
@@ -244,75 +254,96 @@ static void inno_hdmi_reset(struct inno_hdmi *hdmi)
val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH;
hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val);
- inno_hdmi_set_pwr_mode(hdmi, NORMAL);
+ inno_hdmi_standby(hdmi);
}
-static int inno_hdmi_upload_frame(struct inno_hdmi *hdmi, int setup_rc,
- union hdmi_infoframe *frame, u32 frame_index,
- u32 mask, u32 disable, u32 enable)
+static void inno_hdmi_disable_frame(struct inno_hdmi *hdmi,
+ enum hdmi_infoframe_type type)
{
- if (mask)
- hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, disable);
+ struct drm_connector *connector = &hdmi->connector;
- hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_BUF_INDEX, frame_index);
-
- if (setup_rc >= 0) {
- u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
- ssize_t rc, i;
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(connector->dev,
+ "Unsupported infoframe type: %u\n", type);
+ return;
+ }
- rc = hdmi_infoframe_pack(frame, packed_frame,
- sizeof(packed_frame));
- if (rc < 0)
- return rc;
+ hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_BUF_INDEX, INFOFRAME_AVI);
+}
- for (i = 0; i < rc; i++)
- hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i,
- packed_frame[i]);
+static int inno_hdmi_upload_frame(struct inno_hdmi *hdmi,
+ union hdmi_infoframe *frame, enum hdmi_infoframe_type type)
+{
+ struct drm_connector *connector = &hdmi->connector;
+ u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
+ ssize_t rc, i;
- if (mask)
- hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, enable);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(connector->dev,
+ "Unsupported infoframe type: %u\n", type);
+ return 0;
}
- return setup_rc;
-}
+ inno_hdmi_disable_frame(hdmi, type);
-static int inno_hdmi_config_video_vsi(struct inno_hdmi *hdmi,
- struct drm_display_mode *mode)
-{
- union hdmi_infoframe frame;
- int rc;
+ rc = hdmi_infoframe_pack(frame, packed_frame,
+ sizeof(packed_frame));
+ if (rc < 0)
+ return rc;
- rc = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
- &hdmi->connector,
- mode);
+ for (i = 0; i < rc; i++)
+ hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i,
+ packed_frame[i]);
- return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_VSI,
- m_PACKET_VSI_EN, v_PACKET_VSI_EN(0), v_PACKET_VSI_EN(1));
+ return 0;
}
static int inno_hdmi_config_video_avi(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
+ struct drm_connector *connector = &hdmi->connector;
+ struct drm_connector_state *conn_state = connector->state;
+ struct inno_hdmi_connector_state *inno_conn_state =
+ to_inno_hdmi_conn_state(conn_state);
union hdmi_infoframe frame;
int rc;
rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
&hdmi->connector,
mode);
+ if (rc) {
+ inno_hdmi_disable_frame(hdmi, HDMI_INFOFRAME_TYPE_AVI);
+ return rc;
+ }
- if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_YUV444)
frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
- else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
+ else if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_YUV422)
frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
- return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_AVI, 0, 0, 0);
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ connector, mode,
+ inno_conn_state->rgb_limited_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.avi.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
+ return inno_hdmi_upload_frame(hdmi, &frame, HDMI_INFOFRAME_TYPE_AVI);
}
static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
{
- struct hdmi_data_info *data = &hdmi->hdmi_data;
+ struct drm_connector *connector = &hdmi->connector;
+ struct drm_connector_state *conn_state = connector->state;
+ struct inno_hdmi_connector_state *inno_conn_state =
+ to_inno_hdmi_conn_state(conn_state);
int c0_c2_change = 0;
int csc_enable = 0;
int csc_mode = 0;
@@ -330,9 +361,14 @@ static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
v_VIDEO_INPUT_CSP(0);
hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL2, value);
- if (data->enc_in_format == data->enc_out_format) {
- if ((data->enc_in_format == HDMI_COLORSPACE_RGB) ||
- (data->enc_in_format >= HDMI_COLORSPACE_YUV444)) {
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_RGB) {
+ if (inno_conn_state->rgb_limited_range) {
+ csc_mode = CSC_RGB_0_255_TO_RGB_16_235_8BIT;
+ auto_csc = AUTO_CSC_DISABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_ENABLE;
+
+ } else {
value = v_SOF_DISABLE | v_COLOR_DEPTH_NOT_INDICATED(1);
hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value);
@@ -342,35 +378,21 @@ static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
v_VIDEO_C0_C2_SWAP(C0_C2_CHANGE_DISABLE));
return 0;
}
- }
-
- if (data->colorimetry == HDMI_COLORIMETRY_ITU_601) {
- if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
- (data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
- csc_mode = CSC_RGB_0_255_TO_ITU601_16_235_8BIT;
- auto_csc = AUTO_CSC_DISABLE;
- c0_c2_change = C0_C2_CHANGE_DISABLE;
- csc_enable = v_CSC_ENABLE;
- } else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
- (data->enc_out_format == HDMI_COLORSPACE_RGB)) {
- csc_mode = CSC_ITU601_16_235_TO_RGB_0_255_8BIT;
- auto_csc = AUTO_CSC_ENABLE;
- c0_c2_change = C0_C2_CHANGE_DISABLE;
- csc_enable = v_CSC_DISABLE;
- }
} else {
- if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
- (data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
- csc_mode = CSC_RGB_0_255_TO_ITU709_16_235_8BIT;
- auto_csc = AUTO_CSC_DISABLE;
- c0_c2_change = C0_C2_CHANGE_DISABLE;
- csc_enable = v_CSC_ENABLE;
- } else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
- (data->enc_out_format == HDMI_COLORSPACE_RGB)) {
- csc_mode = CSC_ITU709_16_235_TO_RGB_0_255_8BIT;
- auto_csc = AUTO_CSC_ENABLE;
- c0_c2_change = C0_C2_CHANGE_DISABLE;
- csc_enable = v_CSC_DISABLE;
+ if (inno_conn_state->colorimetry == HDMI_COLORIMETRY_ITU_601) {
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_YUV444) {
+ csc_mode = CSC_RGB_0_255_TO_ITU601_16_235_8BIT;
+ auto_csc = AUTO_CSC_DISABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_ENABLE;
+ }
+ } else {
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_YUV444) {
+ csc_mode = CSC_RGB_0_255_TO_ITU709_16_235_8BIT;
+ auto_csc = AUTO_CSC_DISABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_ENABLE;
+ }
}
}
@@ -411,7 +433,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
- value = mode->hsync_start - mode->hdisplay;
+ value = mode->htotal - mode->hsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
@@ -426,7 +448,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
value = mode->vtotal - mode->vdisplay;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
- value = mode->vsync_start - mode->vdisplay;
+ value = mode->vtotal - mode->vsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
value = mode->vsync_end - mode->vsync_start;
@@ -443,19 +465,7 @@ static int inno_hdmi_setup(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct drm_display_info *display = &hdmi->connector.display_info;
-
- hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
-
- hdmi->hdmi_data.enc_in_format = HDMI_COLORSPACE_RGB;
- hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
-
- if ((hdmi->hdmi_data.vic == 6) || (hdmi->hdmi_data.vic == 7) ||
- (hdmi->hdmi_data.vic == 21) || (hdmi->hdmi_data.vic == 22) ||
- (hdmi->hdmi_data.vic == 2) || (hdmi->hdmi_data.vic == 3) ||
- (hdmi->hdmi_data.vic == 17) || (hdmi->hdmi_data.vic == 18))
- hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
- else
- hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
+ unsigned long mpixelclock = mode->clock * 1000;
/* Mute video and audio output */
hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
@@ -469,10 +479,8 @@ static int inno_hdmi_setup(struct inno_hdmi *hdmi,
inno_hdmi_config_video_csc(hdmi);
- if (display->is_hdmi) {
+ if (display->is_hdmi)
inno_hdmi_config_video_avi(hdmi, mode);
- inno_hdmi_config_video_vsi(hdmi, mode);
- }
/*
* When IP controller have configured to an accurate video
@@ -480,47 +488,73 @@ static int inno_hdmi_setup(struct inno_hdmi *hdmi,
* DCLK_LCDC, so we need to init the TMDS rate to mode pixel
* clock rate, and reconfigure the DDC clock.
*/
- hdmi->tmds_rate = mode->clock * 1000;
- inno_hdmi_i2c_init(hdmi);
+ inno_hdmi_i2c_init(hdmi, mpixelclock);
/* Unmute video and audio output */
hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
v_AUDIO_MUTE(0) | v_VIDEO_MUTE(0));
+ inno_hdmi_power_up(hdmi, mpixelclock);
+
return 0;
}
-static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static enum drm_mode_status inno_hdmi_display_mode_valid(struct inno_hdmi *hdmi,
+ struct drm_display_mode *mode)
{
- struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+ unsigned long mpixelclk, max_tolerance;
+ long rounded_refclk;
- inno_hdmi_setup(hdmi, adj_mode);
+ /* No support for double-clock modes */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_BAD;
- /* Store the display mode for plugin/DPMS poweron events */
- drm_mode_copy(&hdmi->previous_mode, adj_mode);
-}
+ mpixelclk = mode->clock * 1000;
-static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
-{
- struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+ if (mpixelclk < INNO_HDMI_MIN_TMDS_CLOCK)
+ return MODE_CLOCK_LOW;
+
+ if (inno_hdmi_find_phy_config(hdmi, mpixelclk) < 0)
+ return MODE_CLOCK_HIGH;
- inno_hdmi_set_pwr_mode(hdmi, NORMAL);
+ if (hdmi->refclk) {
+ rounded_refclk = clk_round_rate(hdmi->refclk, mpixelclk);
+ if (rounded_refclk < 0)
+ return MODE_BAD;
+
+ /* Vesa DMT standard mentions +/- 0.5% max tolerance */
+ max_tolerance = mpixelclk / 200;
+ if (abs_diff((unsigned long)rounded_refclk, mpixelclk) > max_tolerance)
+ return MODE_NOCLOCK;
+ }
+
+ return MODE_OK;
}
-static void inno_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void inno_hdmi_encoder_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
- inno_hdmi_set_pwr_mode(hdmi, LOWER_PWR);
+ conn_state = drm_atomic_get_new_connector_state(state, &hdmi->connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ inno_hdmi_setup(hdmi, &crtc_state->adjusted_mode);
}
-static bool inno_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static void inno_hdmi_encoder_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- return true;
+ struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+
+ inno_hdmi_standby(hdmi);
}
static int
@@ -529,19 +563,35 @@ inno_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ u8 vic = drm_match_cea_mode(mode);
+ struct inno_hdmi_connector_state *inno_conn_state =
+ to_inno_hdmi_conn_state(conn_state);
s->output_mode = ROCKCHIP_OUT_MODE_P888;
s->output_type = DRM_MODE_CONNECTOR_HDMIA;
- return 0;
+ if (vic == 6 || vic == 7 ||
+ vic == 21 || vic == 22 ||
+ vic == 2 || vic == 3 ||
+ vic == 17 || vic == 18)
+ inno_conn_state->colorimetry = HDMI_COLORIMETRY_ITU_601;
+ else
+ inno_conn_state->colorimetry = HDMI_COLORIMETRY_ITU_709;
+
+ inno_conn_state->enc_out_format = HDMI_COLORSPACE_RGB;
+ inno_conn_state->rgb_limited_range =
+ drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED;
+
+ return inno_hdmi_display_mode_valid(hdmi,
+ &crtc_state->adjusted_mode) == MODE_OK ? 0 : -EINVAL;
}
static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
- .enable = inno_hdmi_encoder_enable,
- .disable = inno_hdmi_encoder_disable,
- .mode_fixup = inno_hdmi_encoder_mode_fixup,
- .mode_set = inno_hdmi_encoder_mode_set,
- .atomic_check = inno_hdmi_encoder_atomic_check,
+ .atomic_check = inno_hdmi_encoder_atomic_check,
+ .atomic_enable = inno_hdmi_encoder_enable,
+ .atomic_disable = inno_hdmi_encoder_disable,
};
static enum drm_connector_status
@@ -564,7 +614,6 @@ static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, hdmi->ddc);
if (edid) {
- hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid);
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -577,14 +626,9 @@ static enum drm_mode_status
inno_hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- return MODE_OK;
-}
+ struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
-static int
-inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- return drm_helper_probe_single_connector_modes(connector, 1920, 1080);
+ return inno_hdmi_display_mode_valid(hdmi, mode);
}
static void inno_hdmi_connector_destroy(struct drm_connector *connector)
@@ -593,13 +637,64 @@ static void inno_hdmi_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
+static void
+inno_hdmi_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct inno_hdmi_connector_state *inno_conn_state =
+ to_inno_hdmi_conn_state(state);
+
+ __drm_atomic_helper_connector_destroy_state(&inno_conn_state->base);
+ kfree(inno_conn_state);
+}
+
+static void inno_hdmi_connector_reset(struct drm_connector *connector)
+{
+ struct inno_hdmi_connector_state *inno_conn_state;
+
+ if (connector->state) {
+ inno_hdmi_connector_destroy_state(connector, connector->state);
+ connector->state = NULL;
+ }
+
+ inno_conn_state = kzalloc(sizeof(*inno_conn_state), GFP_KERNEL);
+ if (!inno_conn_state)
+ return;
+
+ __drm_atomic_helper_connector_reset(connector, &inno_conn_state->base);
+
+ inno_conn_state->colorimetry = HDMI_COLORIMETRY_ITU_709;
+ inno_conn_state->enc_out_format = HDMI_COLORSPACE_RGB;
+ inno_conn_state->rgb_limited_range = false;
+}
+
+static struct drm_connector_state *
+inno_hdmi_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct inno_hdmi_connector_state *inno_conn_state;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ inno_conn_state = kmemdup(to_inno_hdmi_conn_state(connector->state),
+ sizeof(*inno_conn_state), GFP_KERNEL);
+
+ if (!inno_conn_state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector,
+ &inno_conn_state->base);
+
+ return &inno_conn_state->base;
+}
+
static const struct drm_connector_funcs inno_hdmi_connector_funcs = {
- .fill_modes = inno_hdmi_probe_single_connector_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.detect = inno_hdmi_connector_detect,
.destroy = inno_hdmi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = inno_hdmi_connector_reset,
+ .atomic_duplicate_state = inno_hdmi_connector_duplicate_state,
+ .atomic_destroy_state = inno_hdmi_connector_destroy_state,
};
static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
@@ -819,6 +914,7 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct inno_hdmi *hdmi;
+ const struct inno_hdmi_variant *variant;
int irq;
int ret;
@@ -827,7 +923,12 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
return -ENOMEM;
hdmi->dev = dev;
- hdmi->drm_dev = drm;
+
+ variant = of_device_get_match_data(hdmi->dev);
+ if (!variant)
+ return -EINVAL;
+
+ hdmi->variant = variant;
hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
@@ -846,6 +947,20 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
return ret;
}
+ hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref");
+ if (IS_ERR(hdmi->refclk)) {
+ DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI reference clock\n");
+ ret = PTR_ERR(hdmi->refclk);
+ goto err_disable_pclk;
+ }
+
+ ret = clk_prepare_enable(hdmi->refclk);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev,
+ "Cannot enable HDMI reference clock: %d\n", ret);
+ goto err_disable_pclk;
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
@@ -862,13 +977,16 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
}
/*
- * When IP controller haven't configured to an accurate video
- * timing, then the TMDS clock source would be switched to
- * PCLK_HDMI, so we need to init the TMDS rate to PCLK rate,
- * and reconfigure the DDC clock.
+ * When the controller isn't configured to an accurate
+ * video timing and there is no reference clock available,
+ * then the TMDS clock source would be switched to PCLK_HDMI,
+ * so we need to init the TMDS rate to PCLK rate, and
+ * reconfigure the DDC clock.
*/
- hdmi->tmds_rate = clk_get_rate(hdmi->pclk);
- inno_hdmi_i2c_init(hdmi);
+ if (hdmi->refclk)
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk));
+ else
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk));
ret = inno_hdmi_register(drm, hdmi);
if (ret)
@@ -892,6 +1010,8 @@ err_cleanup_hdmi:
err_put_adapter:
i2c_put_adapter(hdmi->ddc);
err_disable_clk:
+ clk_disable_unprepare(hdmi->refclk);
+err_disable_pclk:
clk_disable_unprepare(hdmi->pclk);
return ret;
}
@@ -905,6 +1025,7 @@ static void inno_hdmi_unbind(struct device *dev, struct device *master,
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
i2c_put_adapter(hdmi->ddc);
+ clk_disable_unprepare(hdmi->refclk);
clk_disable_unprepare(hdmi->pclk);
}
@@ -923,8 +1044,22 @@ static void inno_hdmi_remove(struct platform_device *pdev)
component_del(&pdev->dev, &inno_hdmi_ops);
}
+static const struct inno_hdmi_variant rk3036_inno_hdmi_variant = {
+ .phy_configs = rk3036_hdmi_phy_configs,
+ .default_phy_config = &rk3036_hdmi_phy_configs[1],
+};
+
+static const struct inno_hdmi_variant rk3128_inno_hdmi_variant = {
+ .phy_configs = rk3128_hdmi_phy_configs,
+ .default_phy_config = &rk3128_hdmi_phy_configs[1],
+};
+
static const struct of_device_id inno_hdmi_dt_ids[] = {
{ .compatible = "rockchip,rk3036-inno-hdmi",
+ .data = &rk3036_inno_hdmi_variant,
+ },
+ { .compatible = "rockchip,rk3128-inno-hdmi",
+ .data = &rk3128_inno_hdmi_variant,
},
{},
};
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
index 93245b55f967..a7edf3559e60 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.h
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.h
@@ -10,11 +10,6 @@
#define DDC_SEGMENT_ADDR 0x30
-enum PWR_MODE {
- NORMAL,
- LOWER_PWR,
-};
-
#define HDMI_SCL_RATE (100*1000)
#define DDC_BUS_FREQ_L 0x4b
#define DDC_BUS_FREQ_H 0x4c
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 59341654ec32..77b76cff1adb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -576,8 +576,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
ret = -EINVAL;
goto err_put_port;
} else if (ret) {
- DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
- ret = -EPROBE_DEFER;
+ dev_err_probe(dev, ret, "failed to find panel and bridge node\n");
goto err_put_port;
}
if (lvds->panel)
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index c51ca82320cb..b9ee02061d5b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -227,11 +227,22 @@ static const struct vop_win_data rk3126_vop_win_data[] = {
.type = DRM_PLANE_TYPE_CURSOR },
};
+static const struct vop_output rk3126_output = {
+ .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
+ .hdmi_pin_pol = VOP_REG(RK3126_INT_SCALER, 0x7, 4),
+ .hdmi_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 22),
+ .hdmi_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 23),
+ .rgb_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 24),
+ .rgb_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 25),
+ .mipi_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 28),
+ .mipi_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 29),
+};
+
static const struct vop_data rk3126_vop = {
.intr = &rk3036_intr,
.common = &rk3036_common,
.modeset = &rk3036_modeset,
- .output = &rk3036_output,
+ .output = &rk3126_output,
.win = rk3126_vop_win_data,
.win_size = ARRAY_SIZE(rk3126_vop_win_data),
.max_output = { 1920, 1080 },
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index 406e981c75bd..fbf1bcc68625 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -872,6 +872,9 @@
/* rk3036 register definition end */
/* rk3126 register definition */
+#define RK3126_INT_SCALER 0x0c
+
+/* win1 register */
#define RK3126_WIN1_MST 0x4c
#define RK3126_WIN1_DSP_INFO 0x50
#define RK3126_WIN1_DSP_ST 0x54
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index d442b893275b..7e90c9f95611 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1251,7 +1251,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name, struct device *dev)
{
- int i, ret;
+ int i;
sched->ops = ops;
sched->credit_limit = credit_limit;
@@ -1287,11 +1287,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->own_submit_wq = true;
}
- ret = -ENOMEM;
+
sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
if (!sched->sched_rq)
- goto Out_free;
+ goto Out_check_own;
sched->num_rqs = num_rqs;
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
@@ -1316,13 +1316,14 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
Out_unroll:
for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
kfree(sched->sched_rq[i]);
-Out_free:
+
kfree(sched->sched_rq);
sched->sched_rq = NULL;
+Out_check_own:
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
- return ret;
+ return -ENOMEM;
}
EXPORT_SYMBOL(drm_sched_init);
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 84e035a7ab3f..84bfde31d172 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -142,6 +142,11 @@ static const struct of_device_id ssd130x_of_match[] = {
.compatible = "solomon,ssd1327",
.data = &ssd130x_variants[SSD1327_ID],
},
+ /* ssd133x family */
+ {
+ .compatible = "solomon,ssd1331",
+ .data = &ssd130x_variants[SSD1331_ID],
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ssd130x_of_match);
@@ -166,6 +171,8 @@ static const struct spi_device_id ssd130x_spi_table[] = {
{ "ssd1322", SSD1322_ID },
{ "ssd1325", SSD1325_ID },
{ "ssd1327", SSD1327_ID },
+ /* ssd133x family */
+ { "ssd1331", SSD1331_ID },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, ssd130x_spi_table);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 3d0e093a7e6e..ebd943b9e357 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -119,6 +119,26 @@
#define SSD130X_SET_VCOMH_VOLTAGE 0xbe
#define SSD132X_SET_FUNCTION_SELECT_B 0xd5
+/* ssd133x commands */
+#define SSD133X_SET_COL_RANGE 0x15
+#define SSD133X_SET_ROW_RANGE 0x75
+#define SSD133X_CONTRAST_A 0x81
+#define SSD133X_CONTRAST_B 0x82
+#define SSD133X_CONTRAST_C 0x83
+#define SSD133X_SET_MASTER_CURRENT 0x87
+#define SSD132X_SET_PRECHARGE_A 0x8a
+#define SSD132X_SET_PRECHARGE_B 0x8b
+#define SSD132X_SET_PRECHARGE_C 0x8c
+#define SSD133X_SET_DISPLAY_START 0xa1
+#define SSD133X_SET_DISPLAY_OFFSET 0xa2
+#define SSD133X_SET_DISPLAY_NORMAL 0xa4
+#define SSD133X_SET_MASTER_CONFIG 0xad
+#define SSD133X_POWER_SAVE_MODE 0xb0
+#define SSD133X_PHASES_PERIOD 0xb1
+#define SSD133X_SET_CLOCK_FREQ 0xb3
+#define SSD133X_SET_PRECHARGE_VOLTAGE 0xbb
+#define SSD133X_SET_VCOMH_VOLTAGE 0xbe
+
#define MAX_CONTRAST 255
const struct ssd130x_deviceinfo ssd130x_variants[] = {
@@ -180,6 +200,12 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
.default_width = 128,
.default_height = 128,
.family_id = SSD132X_FAMILY,
+ },
+ /* ssd133x family */
+ [SSD1331_ID] = {
+ .default_width = 96,
+ .default_height = 64,
+ .family_id = SSD133X_FAMILY,
}
};
EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
@@ -589,6 +615,117 @@ static int ssd132x_init(struct ssd130x_device *ssd130x)
return 0;
}
+static int ssd133x_init(struct ssd130x_device *ssd130x)
+{
+ int ret;
+
+ /* Set color A contrast */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_CONTRAST_A, 0x91);
+ if (ret < 0)
+ return ret;
+
+ /* Set color B contrast */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_CONTRAST_B, 0x50);
+ if (ret < 0)
+ return ret;
+
+ /* Set color C contrast */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_CONTRAST_C, 0x7d);
+ if (ret < 0)
+ return ret;
+
+ /* Set master current */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_MASTER_CURRENT, 0x06);
+ if (ret < 0)
+ return ret;
+
+ /* Set column start and end */
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD133X_SET_COL_RANGE, 0x00, ssd130x->width - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set row start and end */
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD133X_SET_ROW_RANGE, 0x00, ssd130x->height - 1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Horizontal Address Increment
+ * Normal order SA,SB,SC (e.g. RGB)
+ * COM Split Odd Even
+ * 256 color format
+ */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD13XX_SET_SEG_REMAP, 0x20);
+ if (ret < 0)
+ return ret;
+
+ /* Set display start and offset */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_DISPLAY_START, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_DISPLAY_OFFSET, 0x00);
+ if (ret < 0)
+ return ret;
+
+ /* Set display mode normal */
+ ret = ssd130x_write_cmd(ssd130x, 1, SSD133X_SET_DISPLAY_NORMAL);
+ if (ret < 0)
+ return ret;
+
+ /* Set multiplex ratio value */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD13XX_SET_MULTIPLEX_RATIO, ssd130x->height - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set master configuration */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_MASTER_CONFIG, 0x8e);
+ if (ret < 0)
+ return ret;
+
+ /* Set power mode */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_POWER_SAVE_MODE, 0x0b);
+ if (ret < 0)
+ return ret;
+
+ /* Set Phase 1 and 2 period */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_PHASES_PERIOD, 0x31);
+ if (ret < 0)
+ return ret;
+
+ /* Set clock divider */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_CLOCK_FREQ, 0xf0);
+ if (ret < 0)
+ return ret;
+
+ /* Set pre-charge A */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD132X_SET_PRECHARGE_A, 0x64);
+ if (ret < 0)
+ return ret;
+
+ /* Set pre-charge B */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD132X_SET_PRECHARGE_B, 0x78);
+ if (ret < 0)
+ return ret;
+
+ /* Set pre-charge C */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD132X_SET_PRECHARGE_C, 0x64);
+ if (ret < 0)
+ return ret;
+
+ /* Set pre-charge level */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_PRECHARGE_VOLTAGE, 0x3a);
+ if (ret < 0)
+ return ret;
+
+ /* Set VCOMH voltage */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_VCOMH_VOLTAGE, 0x3e);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
struct drm_rect *rect, u8 *buf,
u8 *data_array)
@@ -753,6 +890,47 @@ static int ssd132x_update_rect(struct ssd130x_device *ssd130x,
return ret;
}
+static int ssd133x_update_rect(struct ssd130x_device *ssd130x,
+ struct drm_rect *rect, u8 *data_array,
+ unsigned int pitch)
+{
+ unsigned int x = rect->x1;
+ unsigned int y = rect->y1;
+ unsigned int columns = drm_rect_width(rect);
+ unsigned int rows = drm_rect_height(rect);
+ int ret;
+
+ /*
+ * The screen is divided in Segment and Common outputs, where
+ * COM0 to COM[N - 1] are the rows and SEG0 to SEG[M - 1] are
+ * the columns.
+ *
+ * Each Segment has a 8-bit pixel and each Common output has a
+ * row of pixels. When using the (default) horizontal address
+ * increment mode, each byte of data sent to the controller has
+ * a Segment (e.g: SEG0).
+ *
+ * When using the 256 color depth format, each pixel contains 3
+ * sub-pixels for color A, B and C. These have 3 bit, 3 bit and
+ * 2 bits respectively.
+ */
+
+ /* Set column start and end */
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD133X_SET_COL_RANGE, x, columns - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set row start and end */
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD133X_SET_ROW_RANGE, y, rows - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Write out update in one go since horizontal addressing mode is used */
+ ret = ssd130x_write_data(ssd130x, data_array, pitch * rows);
+
+ return ret;
+}
+
static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
{
unsigned int pages = DIV_ROUND_UP(ssd130x->height, SSD130X_PAGE_HEIGHT);
@@ -805,6 +983,22 @@ static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
ssd130x_write_data(ssd130x, data_array, columns * height);
}
+static void ssd133x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
+{
+ const struct drm_format_info *fi = drm_format_info(DRM_FORMAT_RGB332);
+ unsigned int pitch;
+
+ if (!fi)
+ return;
+
+ pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+
+ memset(data_array, 0, pitch * ssd130x->height);
+
+ /* Write out update in one go since horizontal addressing mode is used */
+ ssd130x_write_data(ssd130x, data_array, pitch * ssd130x->height);
+}
+
static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
const struct iosys_map *vmap,
struct drm_rect *rect,
@@ -866,6 +1060,36 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
return ret;
}
+static int ssd133x_fb_blit_rect(struct drm_framebuffer *fb,
+ const struct iosys_map *vmap,
+ struct drm_rect *rect, u8 *data_array,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
+ const struct drm_format_info *fi = drm_format_info(DRM_FORMAT_RGB332);
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+ int ret = 0;
+
+ if (!fi)
+ return -EINVAL;
+
+ dst_pitch = drm_format_info_min_pitch(fi, 0, drm_rect_width(rect));
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ iosys_map_set_vaddr(&dst, data_array);
+ drm_fb_xrgb8888_to_rgb332(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+ ssd133x_update_rect(ssd130x, rect, data_array, dst_pitch);
+
+ return ret;
+}
+
static int ssd130x_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -964,6 +1188,29 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane,
return 0;
}
+static int ssd133x_primary_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state = NULL;
+ int ret;
+
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!plane_state->visible)
+ return 0;
+
+ return 0;
+}
+
static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1034,6 +1281,39 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane,
drm_dev_exit(idx);
}
+static void ssd133x_primary_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ struct ssd130x_crtc_state *ssd130x_crtc_state = to_ssd130x_crtc_state(crtc_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_device *drm = plane->dev;
+ struct drm_rect dst_clip;
+ struct drm_rect damage;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ dst_clip = plane_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ ssd133x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip,
+ ssd130x_crtc_state->data_array,
+ &shadow_plane_state->fmtcnv_state);
+ }
+
+ drm_dev_exit(idx);
+}
+
static void ssd130x_primary_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1082,6 +1362,30 @@ static void ssd132x_primary_plane_atomic_disable(struct drm_plane *plane,
drm_dev_exit(idx);
}
+static void ssd133x_primary_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = plane->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ struct ssd130x_crtc_state *ssd130x_crtc_state;
+ int idx;
+
+ if (!plane_state->crtc)
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ ssd130x_crtc_state = to_ssd130x_crtc_state(crtc_state);
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ ssd133x_clear_screen(ssd130x, ssd130x_crtc_state->data_array);
+
+ drm_dev_exit(idx);
+}
+
/* Called during init to allocate the plane's atomic state. */
static void ssd130x_primary_plane_reset(struct drm_plane *plane)
{
@@ -1144,6 +1448,12 @@ static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs[]
.atomic_check = ssd132x_primary_plane_atomic_check,
.atomic_update = ssd132x_primary_plane_atomic_update,
.atomic_disable = ssd132x_primary_plane_atomic_disable,
+ },
+ [SSD133X_FAMILY] = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = ssd133x_primary_plane_atomic_check,
+ .atomic_update = ssd133x_primary_plane_atomic_update,
+ .atomic_disable = ssd133x_primary_plane_atomic_disable,
}
};
@@ -1214,6 +1524,33 @@ static int ssd132x_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
+static int ssd133x_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = crtc->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct ssd130x_crtc_state *ssd130x_state = to_ssd130x_crtc_state(crtc_state);
+ const struct drm_format_info *fi = drm_format_info(DRM_FORMAT_RGB332);
+ unsigned int pitch;
+ int ret;
+
+ if (!fi)
+ return -EINVAL;
+
+ ret = drm_crtc_helper_atomic_check(crtc, state);
+ if (ret)
+ return ret;
+
+ pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+
+ ssd130x_state->data_array = kmalloc(pitch * ssd130x->height, GFP_KERNEL);
+ if (!ssd130x_state->data_array)
+ return -ENOMEM;
+
+ return 0;
+}
+
/* Called during init to allocate the CRTC's atomic state. */
static void ssd130x_crtc_reset(struct drm_crtc *crtc)
{
@@ -1275,6 +1612,10 @@ static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs[] = {
.mode_valid = ssd130x_crtc_mode_valid,
.atomic_check = ssd132x_crtc_atomic_check,
},
+ [SSD133X_FAMILY] = {
+ .mode_valid = ssd130x_crtc_mode_valid,
+ .atomic_check = ssd133x_crtc_atomic_check,
+ },
};
static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
@@ -1337,6 +1678,31 @@ power_off:
ssd130x_power_off(ssd130x);
}
+static void ssd133x_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ int ret;
+
+ ret = ssd130x_power_on(ssd130x);
+ if (ret)
+ return;
+
+ ret = ssd133x_init(ssd130x);
+ if (ret)
+ goto power_off;
+
+ ssd130x_write_cmd(ssd130x, 1, SSD13XX_DISPLAY_ON);
+
+ backlight_enable(ssd130x->bl_dev);
+
+ return;
+
+power_off:
+ ssd130x_power_off(ssd130x);
+}
+
static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
@@ -1358,6 +1724,10 @@ static const struct drm_encoder_helper_funcs ssd130x_encoder_helper_funcs[] = {
[SSD132X_FAMILY] = {
.atomic_enable = ssd132x_encoder_atomic_enable,
.atomic_disable = ssd130x_encoder_atomic_disable,
+ },
+ [SSD133X_FAMILY] = {
+ .atomic_enable = ssd133x_encoder_atomic_enable,
+ .atomic_disable = ssd130x_encoder_atomic_disable,
}
};
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index 075c5c3ee75a..a4554018bb2a 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -25,7 +25,8 @@
enum ssd130x_family_ids {
SSD130X_FAMILY,
- SSD132X_FAMILY
+ SSD132X_FAMILY,
+ SSD133X_FAMILY
};
enum ssd130x_variants {
@@ -39,6 +40,8 @@ enum ssd130x_variants {
SSD1322_ID,
SSD1325_ID,
SSD1327_ID,
+ /* ssd133x family */
+ SSD1331_ID,
NR_SSD130X_VARIANTS
};
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index ef02d530f78d..ae12d001a04b 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -522,7 +522,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
dpaux->irq, err);
- return err;
+ goto err_pm_disable;
}
disable_irq(dpaux->irq);
@@ -542,7 +542,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
*/
err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C);
if (err < 0)
- return err;
+ goto err_pm_disable;
#ifdef CONFIG_GENERIC_PINCONF
dpaux->desc.name = dev_name(&pdev->dev);
@@ -555,7 +555,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
if (IS_ERR(dpaux->pinctrl)) {
dev_err(&pdev->dev, "failed to register pincontrol\n");
- return PTR_ERR(dpaux->pinctrl);
+ err = PTR_ERR(dpaux->pinctrl);
+ goto err_pm_disable;
}
#endif
/* enable and clear all interrupts */
@@ -571,10 +572,15 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
if (err < 0) {
dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
- return err;
+ goto err_pm_disable;
}
return 0;
+
+err_pm_disable:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return err;
}
static void tegra_dpaux_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index ccb5d74fa227..682011166a8f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -13,7 +13,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
@@ -26,6 +25,7 @@
/* XXX move to include/uapi/drm/drm_fourcc.h? */
#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
+struct edid;
struct reset_control;
struct tegra_drm {
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index fbfe92a816d4..db606e151afc 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1544,9 +1544,11 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
if (np) {
struct platform_device *gangster = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!gangster)
+ return -EPROBE_DEFER;
dsi->slave = platform_get_drvdata(gangster);
- of_node_put(np);
if (!dsi->slave) {
put_device(&gangster->dev);
@@ -1594,44 +1596,58 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (!pdev->dev.pm_domain) {
dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
- if (IS_ERR(dsi->rst))
- return PTR_ERR(dsi->rst);
+ if (IS_ERR(dsi->rst)) {
+ err = PTR_ERR(dsi->rst);
+ goto remove;
+ }
}
dsi->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dsi->clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
- "cannot get DSI clock\n");
+ if (IS_ERR(dsi->clk)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
+ "cannot get DSI clock\n");
+ goto remove;
+ }
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
- if (IS_ERR(dsi->clk_lp))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
- "cannot get low-power clock\n");
+ if (IS_ERR(dsi->clk_lp)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
+ "cannot get low-power clock\n");
+ goto remove;
+ }
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
- if (IS_ERR(dsi->clk_parent))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
- "cannot get parent clock\n");
+ if (IS_ERR(dsi->clk_parent)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
+ "cannot get parent clock\n");
+ goto remove;
+ }
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
- if (IS_ERR(dsi->vdd))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
- "cannot get VDD supply\n");
+ if (IS_ERR(dsi->vdd)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
+ "cannot get VDD supply\n");
+ goto remove;
+ }
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
- return err;
+ goto remove;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(dsi->regs))
- return PTR_ERR(dsi->regs);
+ if (IS_ERR(dsi->regs)) {
+ err = PTR_ERR(dsi->regs);
+ goto remove;
+ }
dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
- if (IS_ERR(dsi->mipi))
- return PTR_ERR(dsi->mipi);
+ if (IS_ERR(dsi->mipi)) {
+ err = PTR_ERR(dsi->mipi);
+ goto remove;
+ }
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
@@ -1659,9 +1675,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
return 0;
unregister:
+ pm_runtime_disable(&pdev->dev);
mipi_dsi_host_unregister(&dsi->host);
mipi_free:
tegra_mipi_free(dsi->mipi);
+remove:
+ tegra_output_remove(&dsi->output);
return err;
}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index a1fcee665023..09987e372e3e 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -24,6 +24,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
@@ -1856,12 +1857,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return err;
hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ err = PTR_ERR(hdmi->regs);
+ goto remove;
+ }
err = platform_get_irq(pdev, 0);
if (err < 0)
- return err;
+ goto remove;
hdmi->irq = err;
@@ -1870,18 +1873,18 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
hdmi->irq, err);
- return err;
+ goto remove;
}
platform_set_drvdata(pdev, hdmi);
err = devm_pm_runtime_enable(&pdev->dev);
if (err)
- return err;
+ goto remove;
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
- return err;
+ goto remove;
INIT_LIST_HEAD(&hdmi->client.list);
hdmi->client.ops = &hdmi_client_ops;
@@ -1891,10 +1894,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto remove;
}
return 0;
+
+remove:
+ tegra_output_remove(&hdmi->output);
+ return err;
}
static void tegra_hdmi_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index dc2dcb5ca1c8..4da3c3d1abbc 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
@@ -142,8 +143,10 @@ int tegra_output_probe(struct tegra_output *output)
GPIOD_IN,
"HDMI hotplug detect");
if (IS_ERR(output->hpd_gpio)) {
- if (PTR_ERR(output->hpd_gpio) != -ENOENT)
- return PTR_ERR(output->hpd_gpio);
+ if (PTR_ERR(output->hpd_gpio) != -ENOENT) {
+ err = PTR_ERR(output->hpd_gpio);
+ goto put_i2c;
+ }
output->hpd_gpio = NULL;
}
@@ -152,7 +155,7 @@ int tegra_output_probe(struct tegra_output *output)
err = gpiod_to_irq(output->hpd_gpio);
if (err < 0) {
dev_err(output->dev, "gpiod_to_irq(): %d\n", err);
- return err;
+ goto put_i2c;
}
output->hpd_irq = err;
@@ -165,7 +168,7 @@ int tegra_output_probe(struct tegra_output *output)
if (err < 0) {
dev_err(output->dev, "failed to request IRQ#%u: %d\n",
output->hpd_irq, err);
- return err;
+ goto put_i2c;
}
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
@@ -179,6 +182,12 @@ int tegra_output_probe(struct tegra_output *output)
}
return 0;
+
+put_i2c:
+ if (output->ddc)
+ i2c_put_adapter(output->ddc);
+
+ return err;
}
void tegra_output_remove(struct tegra_output *output)
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index fc66bbd913b2..1e8ec50b759e 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -225,26 +225,28 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
rgb->clk = devm_clk_get(dc->dev, NULL);
if (IS_ERR(rgb->clk)) {
dev_err(dc->dev, "failed to get clock\n");
- return PTR_ERR(rgb->clk);
+ err = PTR_ERR(rgb->clk);
+ goto remove;
}
rgb->clk_parent = devm_clk_get(dc->dev, "parent");
if (IS_ERR(rgb->clk_parent)) {
dev_err(dc->dev, "failed to get parent clock\n");
- return PTR_ERR(rgb->clk_parent);
+ err = PTR_ERR(rgb->clk_parent);
+ goto remove;
}
err = clk_set_parent(rgb->clk, rgb->clk_parent);
if (err < 0) {
dev_err(dc->dev, "failed to set parent clock: %d\n", err);
- return err;
+ goto remove;
}
rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
if (IS_ERR(rgb->pll_d_out0)) {
err = PTR_ERR(rgb->pll_d_out0);
dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
- return err;
+ goto remove;
}
if (dc->soc->has_pll_d2_out0) {
@@ -252,13 +254,19 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
if (IS_ERR(rgb->pll_d2_out0)) {
err = PTR_ERR(rgb->pll_d2_out0);
dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
- return err;
+ goto put_pll;
}
}
dc->rgb = &rgb->output;
return 0;
+
+put_pll:
+ clk_put(rgb->pll_d_out0);
+remove:
+ tegra_output_remove(&rgb->output);
+ return err;
}
void tegra_dc_rgb_remove(struct tegra_dc *dc)
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 83341576630d..bad3b8fcc726 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -20,6 +20,7 @@
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/tests/drm_managed_test.c b/drivers/gpu/drm/tests/drm_managed_test.c
index 1652dca11d30..76eb273c9b36 100644
--- a/drivers/gpu/drm/tests/drm_managed_test.c
+++ b/drivers/gpu/drm/tests/drm_managed_test.c
@@ -12,6 +12,7 @@
#define TEST_TIMEOUT_MS 100
struct managed_test_priv {
+ struct drm_device *drm;
bool action_done;
wait_queue_head_t action_wq;
};
@@ -24,44 +25,88 @@ static void drm_action(struct drm_device *drm, void *ptr)
wake_up_interruptible(&priv->action_wq);
}
-static void drm_test_managed_run_action(struct kunit *test)
+/*
+ * The test verifies that the release action is called when
+ * drmm_release_action is called.
+ */
+static void drm_test_managed_release_action(struct kunit *test)
{
- struct managed_test_priv *priv;
- struct drm_device *drm;
- struct device *dev;
+ struct managed_test_priv *priv = test->priv;
int ret;
- priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
- init_waitqueue_head(&priv->action_wq);
+ ret = drmm_add_action_or_reset(priv->drm, drm_action, priv);
+ KUNIT_EXPECT_EQ(test, ret, 0);
- dev = drm_kunit_helper_alloc_device(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ ret = drm_dev_register(priv->drm, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drmm_release_action(priv->drm, drm_action, priv);
+ ret = wait_event_interruptible_timeout(priv->action_wq, priv->action_done,
+ msecs_to_jiffies(TEST_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+
+ drm_dev_unregister(priv->drm);
+ drm_kunit_helper_free_device(test, priv->drm->dev);
+}
- drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
+/*
+ * The test verifies that the release action is called automatically when the
+ * device is released.
+ */
+static void drm_test_managed_run_action(struct kunit *test)
+{
+ struct managed_test_priv *priv = test->priv;
+ int ret;
- ret = drmm_add_action_or_reset(drm, drm_action, priv);
+ ret = drmm_add_action_or_reset(priv->drm, drm_action, priv);
KUNIT_EXPECT_EQ(test, ret, 0);
- ret = drm_dev_register(drm, 0);
+ ret = drm_dev_register(priv->drm, 0);
KUNIT_ASSERT_EQ(test, ret, 0);
- drm_dev_unregister(drm);
- drm_kunit_helper_free_device(test, dev);
+ drm_dev_unregister(priv->drm);
+ drm_kunit_helper_free_device(test, priv->drm->dev);
ret = wait_event_interruptible_timeout(priv->action_wq, priv->action_done,
msecs_to_jiffies(TEST_TIMEOUT_MS));
KUNIT_EXPECT_GT(test, ret, 0);
}
+static int drm_managed_test_init(struct kunit *test)
+{
+ struct managed_test_priv *priv;
+ struct device *dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ init_waitqueue_head(&priv->action_wq);
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /*
+ * DRM device can't be embedded in priv, since priv->action_done needs
+ * to remain allocated beyond both parent device and drm_device
+ * lifetime.
+ */
+ priv->drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*priv->drm), 0,
+ DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
+
+ test->priv = priv;
+
+ return 0;
+}
+
static struct kunit_case drm_managed_tests[] = {
+ KUNIT_CASE(drm_test_managed_release_action),
KUNIT_CASE(drm_test_managed_run_action),
{}
};
static struct kunit_suite drm_managed_test_suite = {
- .name = "drm-test-managed",
+ .name = "drm_managed",
+ .init = drm_managed_test_init,
.test_cases = drm_managed_tests
};
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 1eb0c304f960..3488d930e3a3 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -188,7 +188,7 @@ out:
static void drm_test_mm_debug(struct kunit *test)
{
- struct drm_printer p = drm_debug_printer(test->name);
+ struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, test->name);
struct drm_mm mm;
struct drm_mm_node nodes[2];
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 23bf16f596f6..cd5eefa06060 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -182,9 +182,6 @@ static void tilcdc_fini(struct drm_device *dev)
if (priv->clk)
clk_put(priv->clk);
- if (priv->mmio)
- iounmap(priv->mmio);
-
if (priv->wq)
destroy_workqueue(priv->wq);
@@ -201,7 +198,6 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct tilcdc_drm_private *priv;
- struct resource *res;
u32 bpp = 0;
int ret;
@@ -226,17 +222,10 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
goto init_failed;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get memory resource\n");
- ret = -EINVAL;
- goto init_failed;
- }
-
- priv->mmio = ioremap(res->start, resource_size(res));
- if (!priv->mmio) {
- dev_err(dev, "failed to ioremap\n");
- ret = -ENOMEM;
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->mmio)) {
+ dev_err(dev, "failed to request / ioremap\n");
+ ret = PTR_ERR(priv->mmio);
goto init_failed;
}
diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
index ec87c4fc1ad5..468535f7eed2 100644
--- a/drivers/gpu/drm/ttm/tests/Makefile
+++ b/drivers/gpu/drm/ttm/tests/Makefile
@@ -3,4 +3,7 @@
obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
ttm_device_test.o \
ttm_pool_test.o \
+ ttm_resource_test.o \
+ ttm_tt_test.o \
+ ttm_bo_test.o \
ttm_kunit_helpers.o
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
new file mode 100644
index 000000000000..1f8a4f8adc92
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/dma-resv.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/ww_mutex.h>
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define BO_SIZE SZ_8K
+
+struct ttm_bo_test_case {
+ const char *description;
+ bool interruptible;
+ bool no_wait;
+};
+
+static const struct ttm_bo_test_case ttm_bo_reserved_cases[] = {
+ {
+ .description = "Cannot be interrupted and sleeps",
+ .interruptible = false,
+ .no_wait = false,
+ },
+ {
+ .description = "Cannot be interrupted, locks straight away",
+ .interruptible = false,
+ .no_wait = true,
+ },
+ {
+ .description = "Can be interrupted, sleeps",
+ .interruptible = true,
+ .no_wait = false,
+ },
+};
+
+static void ttm_bo_init_case_desc(const struct ttm_bo_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_bo_reserve, ttm_bo_reserved_cases, ttm_bo_init_case_desc);
+
+static void ttm_bo_reserve_optimistic_no_ticket(struct kunit *test)
+{
+ const struct ttm_bo_test_case *params = test->param_value;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_bo_reserve(bo, params->interruptible, params->no_wait, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ dma_resv_unlock(bo->base.resv);
+}
+
+static void ttm_bo_reserve_locked_no_sleep(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ bool interruptible = false;
+ bool no_wait = true;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ /* Let's lock it beforehand */
+ dma_resv_lock(bo->base.resv, NULL);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, NULL);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, -EBUSY);
+}
+
+static void ttm_bo_reserve_no_wait_ticket(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = false;
+ bool no_wait = true;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+ KUNIT_ASSERT_EQ(test, err, -EBUSY);
+
+ ww_acquire_fini(&ctx);
+}
+
+static void ttm_bo_reserve_double_resv(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = false;
+ bool no_wait = false;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+
+ dma_resv_unlock(bo->base.resv);
+ ww_acquire_fini(&ctx);
+
+ KUNIT_ASSERT_EQ(test, err, -EALREADY);
+}
+
+/*
+ * A test case heavily inspired by ww_test_edeadlk_normal(). It injects
+ * a deadlock by manipulating the sequence number of the context that holds
+ * dma_resv lock of bo2 so the other context is "wounded" and has to back off
+ * (indicated by -EDEADLK). The subtest checks if ttm_bo_reserve() properly
+ * propagates that error.
+ */
+static void ttm_bo_reserve_deadlock(struct kunit *test)
+{
+ struct ttm_buffer_object *bo1, *bo2;
+ struct ww_acquire_ctx ctx1, ctx2;
+ bool interruptible = false;
+ bool no_wait = false;
+ int err;
+
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ ww_acquire_init(&ctx1, &reservation_ww_class);
+ mutex_lock(&bo2->base.resv->lock.base);
+
+ /* The deadlock will be caught by WW mutex, don't warn about it */
+ lock_release(&bo2->base.resv->lock.base.dep_map, 1);
+
+ bo2->base.resv->lock.ctx = &ctx2;
+ ctx2 = ctx1;
+ ctx2.stamp--; /* Make the context holding the lock younger */
+
+ err = ttm_bo_reserve(bo1, interruptible, no_wait, &ctx1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_bo_reserve(bo2, interruptible, no_wait, &ctx1);
+ KUNIT_ASSERT_EQ(test, err, -EDEADLK);
+
+ dma_resv_unlock(bo1->base.resv);
+ ww_acquire_fini(&ctx1);
+}
+
+#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
+struct signal_timer {
+ struct timer_list timer;
+ struct ww_acquire_ctx *ctx;
+};
+
+static void signal_for_ttm_bo_reserve(struct timer_list *t)
+{
+ struct signal_timer *s_timer = from_timer(s_timer, t, timer);
+ struct task_struct *task = s_timer->ctx->task;
+
+ do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
+}
+
+static int threaded_ttm_bo_reserve(void *arg)
+{
+ struct ttm_buffer_object *bo = arg;
+ struct signal_timer s_timer;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = true;
+ bool no_wait = false;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ /* Prepare a signal that will interrupt the reservation attempt */
+ timer_setup_on_stack(&s_timer.timer, &signal_for_ttm_bo_reserve, 0);
+ s_timer.ctx = &ctx;
+
+ mod_timer(&s_timer.timer, msecs_to_jiffies(100));
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+
+ timer_delete_sync(&s_timer.timer);
+ destroy_timer_on_stack(&s_timer.timer);
+
+ ww_acquire_fini(&ctx);
+
+ return err;
+}
+
+static void ttm_bo_reserve_interrupted(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve");
+
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create ttm bo reserve task\n");
+
+ /* Take a lock so the threaded reserve has to wait */
+ mutex_lock(&bo->base.resv->lock.base);
+
+ wake_up_process(task);
+ msleep(20);
+ err = kthread_stop(task);
+
+ mutex_unlock(&bo->base.resv->lock.base);
+
+ KUNIT_ASSERT_EQ(test, err, -ERESTARTSYS);
+}
+#endif /* IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST) */
+
+static void ttm_bo_unreserve_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource *res1, *res2;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ unsigned int bo_prio = TTM_MAX_BO_PRIORITY - 1;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->priority = bo_prio;
+
+ err = ttm_resource_alloc(bo, place, &res1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ bo->resource = res1;
+
+ /* Add a dummy resource to populate LRU */
+ ttm_resource_alloc(bo, place, &res2);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unreserve(bo);
+
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res1->lru, &man->lru[bo->priority]), 1);
+
+ ttm_resource_free(bo, &res2);
+ ttm_resource_free(bo, &res1);
+}
+
+static void ttm_bo_unreserve_pinned(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource *res1, *res2;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+
+ err = ttm_resource_alloc(bo, place, &res1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res1;
+
+ /* Add a dummy resource to the pinned list */
+ err = ttm_resource_alloc(bo, place, &res2);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res2->lru, &priv->ttm_dev->pinned), 1);
+
+ ttm_bo_unreserve(bo);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res1->lru, &priv->ttm_dev->pinned), 1);
+
+ ttm_resource_free(bo, &res1);
+ ttm_resource_free(bo, &res2);
+}
+
+static void ttm_bo_unreserve_bulk(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo1, *bo2;
+ struct ttm_resource *res1, *res2;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ dma_resv_lock(bo1->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
+ dma_resv_unlock(bo1->base.resv);
+
+ err = ttm_resource_alloc(bo1, place, &res1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo1->resource = res1;
+
+ dma_resv_lock(bo2->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
+ dma_resv_unlock(bo2->base.resv);
+
+ err = ttm_resource_alloc(bo2, place, &res2);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo2->resource = res2;
+
+ ttm_bo_reserve(bo1, false, false, NULL);
+ ttm_bo_unreserve(bo1);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+ KUNIT_ASSERT_PTR_EQ(test, res1, pos->last);
+
+ ttm_resource_free(bo1, &res1);
+ ttm_resource_free(bo2, &res2);
+}
+
+static void ttm_bo_put_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->type = ttm_bo_type_device;
+
+ err = ttm_resource_alloc(bo, place, &res);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_bo_put(bo);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "kunit-ttm-bo-put";
+}
+
+static const struct dma_fence_ops mock_fence_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+};
+
+static void ttm_bo_put_shared_resv(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct dma_resv *external_resv;
+ struct dma_fence *fence;
+ /* A dummy DMA fence lock */
+ spinlock_t fence_lock;
+ struct ttm_device *ttm_dev;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ external_resv = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, external_resv);
+
+ dma_resv_init(external_resv);
+
+ fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, fence);
+
+ spin_lock_init(&fence_lock);
+ dma_fence_init(fence, &mock_fence_ops, &fence_lock, 0, 0);
+
+ dma_resv_lock(external_resv, NULL);
+ dma_resv_reserve_fences(external_resv, 1);
+ dma_resv_add_fence(external_resv, fence, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_unlock(external_resv);
+
+ dma_fence_signal(fence);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->type = ttm_bo_type_device;
+ bo->base.resv = external_resv;
+
+ ttm_bo_put(bo);
+}
+
+static void ttm_bo_pin_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ unsigned int no_pins = 3;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ for (int i = 0; i < no_pins; i++) {
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+ dma_resv_unlock(bo->base.resv);
+ }
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, no_pins);
+}
+
+static void ttm_bo_pin_unpin_resource(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_resource_alloc(bo, place, &res);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo, &lru_bulk_move);
+ ttm_bo_pin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_PTR_EQ(test, res, pos->last);
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 0);
+
+ ttm_resource_free(bo, &res);
+}
+
+static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_resource_alloc(bo, place, &res);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo, &lru_bulk_move);
+
+ /* Multiple pins */
+ ttm_bo_pin(bo);
+ ttm_bo_pin(bo);
+
+ dma_resv_unlock(bo->base.resv);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 2);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ ttm_resource_free(bo, &res);
+}
+
+static struct kunit_case ttm_bo_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_bo_reserve_optimistic_no_ticket,
+ ttm_bo_reserve_gen_params),
+ KUNIT_CASE(ttm_bo_reserve_locked_no_sleep),
+ KUNIT_CASE(ttm_bo_reserve_no_wait_ticket),
+ KUNIT_CASE(ttm_bo_reserve_double_resv),
+#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
+ KUNIT_CASE(ttm_bo_reserve_interrupted),
+#endif
+ KUNIT_CASE(ttm_bo_reserve_deadlock),
+ KUNIT_CASE(ttm_bo_unreserve_basic),
+ KUNIT_CASE(ttm_bo_unreserve_pinned),
+ KUNIT_CASE(ttm_bo_unreserve_bulk),
+ KUNIT_CASE(ttm_bo_put_basic),
+ KUNIT_CASE(ttm_bo_put_shared_resv),
+ KUNIT_CASE(ttm_bo_pin_basic),
+ KUNIT_CASE(ttm_bo_pin_unpin_resource),
+ KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
+ {}
+};
+
+static struct kunit_suite ttm_bo_test_suite = {
+ .name = "ttm_bo",
+ .init = ttm_test_devices_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_bo_test_cases,
+};
+
+kunit_test_suites(&ttm_bo_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
index 81661d8827aa..7b7c1fa805fc 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -2,9 +2,33 @@
/*
* Copyright © 2023 Intel Corporation
*/
+#include <drm/ttm/ttm_tt.h>
+
#include "ttm_kunit_helpers.h"
+static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
+
+ return tt;
+}
+
+static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
+{
+ kfree(ttm);
+}
+
+static void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
+{
+}
+
struct ttm_device_funcs ttm_dev_funcs = {
+ .ttm_tt_create = ttm_tt_simple_create,
+ .ttm_tt_destroy = ttm_tt_simple_destroy,
};
EXPORT_SYMBOL_GPL(ttm_dev_funcs);
@@ -29,19 +53,41 @@ struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
size_t size)
{
- struct drm_gem_object gem_obj = { .size = size };
+ struct drm_gem_object gem_obj = { };
struct ttm_buffer_object *bo;
+ int err;
bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, bo);
bo->base = gem_obj;
+ err = drm_gem_object_init(devs->drm, &bo->base, size);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
bo->bdev = devs->ttm_dev;
+ bo->destroy = dummy_ttm_bo_destroy;
+
+ kref_init(&bo->kref);
return bo;
}
EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
+struct ttm_place *ttm_place_kunit_init(struct kunit *test,
+ uint32_t mem_type, uint32_t flags)
+{
+ struct ttm_place *place;
+
+ place = kunit_kzalloc(test, sizeof(*place), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, place);
+
+ place->mem_type = mem_type;
+ place->flags = flags;
+
+ return place;
+}
+EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
+
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
{
struct ttm_test_devices *devs;
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
index e261e3660d0b..2f51c833a536 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
#include <drm/drm_kunit_helpers.h>
#include <kunit/test.h>
@@ -28,6 +29,8 @@ int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
size_t size);
+struct ttm_place *ttm_place_kunit_init(struct kunit *test,
+ uint32_t mem_type, uint32_t flags);
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
index cceaa18d4e46..0a3fede84da9 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -78,10 +78,9 @@ static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
struct ttm_test_devices *devs = priv->devs;
struct ttm_pool *pool;
struct ttm_tt *tt;
- unsigned long order = __fls(size / PAGE_SIZE);
int err;
- tt = ttm_tt_kunit_init(test, order, caching, size);
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
KUNIT_ASSERT_NOT_NULL(test, tt);
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
new file mode 100644
index 000000000000..029e1f094bb0
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define RES_SIZE SZ_4K
+#define TTM_PRIV_DUMMY_REG (TTM_NUM_MEM_TYPES - 1)
+
+struct ttm_resource_test_case {
+ const char *description;
+ uint32_t mem_type;
+ uint32_t flags;
+};
+
+struct ttm_resource_test_priv {
+ struct ttm_test_devices *devs;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+};
+
+static const struct ttm_resource_manager_func ttm_resource_manager_mock_funcs = { };
+
+static int ttm_resource_test_init(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->devs = ttm_test_devices_all(test);
+ KUNIT_ASSERT_NOT_NULL(test, priv->devs);
+
+ test->priv = priv;
+
+ return 0;
+}
+
+static void ttm_resource_test_fini(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+
+ ttm_test_devices_put(test, priv->devs);
+}
+
+static void ttm_init_test_mocks(struct kunit *test,
+ struct ttm_resource_test_priv *priv,
+ uint32_t mem_type, uint32_t flags)
+{
+ size_t size = RES_SIZE;
+
+ /* Make sure we have what we need for a good BO mock */
+ KUNIT_ASSERT_NOT_NULL(test, priv->devs->ttm_dev);
+
+ priv->bo = ttm_bo_kunit_init(test, priv->devs, size);
+ priv->place = ttm_place_kunit_init(test, mem_type, flags);
+}
+
+static void ttm_init_test_manager(struct kunit *test,
+ struct ttm_resource_test_priv *priv,
+ uint32_t mem_type)
+{
+ struct ttm_device *ttm_dev = priv->devs->ttm_dev;
+ struct ttm_resource_manager *man;
+ size_t size = SZ_16K;
+
+ man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ man->use_tt = false;
+ man->func = &ttm_resource_manager_mock_funcs;
+
+ ttm_resource_manager_init(man, ttm_dev, size);
+ ttm_set_driver_manager(ttm_dev, mem_type, man);
+ ttm_resource_manager_set_used(man, true);
+}
+
+static const struct ttm_resource_test_case ttm_resource_cases[] = {
+ {
+ .description = "Init resource in TTM_PL_SYSTEM",
+ .mem_type = TTM_PL_SYSTEM,
+ },
+ {
+ .description = "Init resource in TTM_PL_VRAM",
+ .mem_type = TTM_PL_VRAM,
+ },
+ {
+ .description = "Init resource in a private placement",
+ .mem_type = TTM_PRIV_DUMMY_REG,
+ },
+ {
+ .description = "Init resource in TTM_PL_SYSTEM, set placement flags",
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_TOPDOWN,
+ },
+};
+
+static void ttm_resource_case_desc(const struct ttm_resource_test_case *t, char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_resource, ttm_resource_cases, ttm_resource_case_desc);
+
+static void ttm_resource_init_basic(struct kunit *test)
+{
+ const struct ttm_resource_test_case *params = test->param_value;
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ uint64_t expected_usage;
+
+ ttm_init_test_mocks(test, priv, params->mem_type, params->flags);
+ bo = priv->bo;
+ place = priv->place;
+
+ if (params->mem_type > TTM_PL_SYSTEM)
+ ttm_init_test_manager(test, priv, params->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+ expected_usage = man->usage + RES_SIZE;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
+
+ ttm_resource_init(bo, place, res);
+
+ KUNIT_ASSERT_EQ(test, res->start, 0);
+ KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
+ KUNIT_ASSERT_EQ(test, res->mem_type, place->mem_type);
+ KUNIT_ASSERT_EQ(test, res->placement, place->flags);
+ KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
+
+ KUNIT_ASSERT_NULL(test, res->bus.addr);
+ KUNIT_ASSERT_EQ(test, res->bus.offset, 0);
+ KUNIT_ASSERT_FALSE(test, res->bus.is_iomem);
+ KUNIT_ASSERT_EQ(test, res->bus.caching, ttm_cached);
+ KUNIT_ASSERT_EQ(test, man->usage, expected_usage);
+
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_resource_init_pinned(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+ ttm_resource_init(bo, place, res);
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->pinned));
+
+ ttm_bo_unpin(bo);
+ ttm_resource_fini(man, res);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
+}
+
+static void ttm_resource_fini_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ ttm_resource_init(bo, place, res);
+ ttm_resource_fini(man, res);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&res->lru));
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+}
+
+static void ttm_resource_manager_init_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ size_t size = SZ_16K;
+
+ man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ ttm_resource_manager_init(man, priv->devs->ttm_dev, size);
+
+ KUNIT_ASSERT_PTR_EQ(test, man->bdev, priv->devs->ttm_dev);
+ KUNIT_ASSERT_EQ(test, man->size, size);
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+ KUNIT_ASSERT_NULL(test, man->move);
+ KUNIT_ASSERT_NOT_NULL(test, &man->move_lock);
+
+ for (int i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[i]));
+}
+
+static void ttm_resource_manager_usage_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ uint64_t actual_usage;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, TTM_PL_FLAG_TOPDOWN);
+ bo = priv->bo;
+ place = priv->place;
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ ttm_resource_init(bo, place, res);
+ actual_usage = ttm_resource_manager_usage(man);
+
+ KUNIT_ASSERT_EQ(test, actual_usage, RES_SIZE);
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_resource_manager_set_used_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, TTM_PL_SYSTEM);
+ KUNIT_ASSERT_TRUE(test, man->use_type);
+
+ ttm_resource_manager_set_used(man, false);
+ KUNIT_ASSERT_FALSE(test, man->use_type);
+}
+
+static void ttm_sys_man_alloc_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource *res;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ int ret;
+
+ ttm_init_test_mocks(test, priv, mem_type, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
+ ret = man->func->alloc(man, bo, place, &res);
+
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
+ KUNIT_ASSERT_EQ(test, res->mem_type, mem_type);
+ KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_sys_man_free_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource *res;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+
+ ttm_init_test_mocks(test, priv, mem_type, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ ttm_resource_alloc(bo, place, &res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
+ man->func->free(man, res);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+}
+
+static struct kunit_case ttm_resource_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_resource_init_basic, ttm_resource_gen_params),
+ KUNIT_CASE(ttm_resource_init_pinned),
+ KUNIT_CASE(ttm_resource_fini_basic),
+ KUNIT_CASE(ttm_resource_manager_init_basic),
+ KUNIT_CASE(ttm_resource_manager_usage_basic),
+ KUNIT_CASE(ttm_resource_manager_set_used_basic),
+ KUNIT_CASE(ttm_sys_man_alloc_basic),
+ KUNIT_CASE(ttm_sys_man_free_basic),
+ {}
+};
+
+static struct kunit_suite ttm_resource_test_suite = {
+ .name = "ttm_resource",
+ .init = ttm_resource_test_init,
+ .exit = ttm_resource_test_fini,
+ .test_cases = ttm_resource_test_cases,
+};
+
+kunit_test_suites(&ttm_resource_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_tt_test.c b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
new file mode 100644
index 000000000000..fd4502c18de6
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/shmem_fs.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define BO_SIZE SZ_4K
+
+struct ttm_tt_test_case {
+ const char *description;
+ uint32_t size;
+ uint32_t extra_pages_num;
+};
+
+static int ttm_tt_test_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_all(test);
+ test->priv = priv;
+
+ return 0;
+}
+
+static const struct ttm_tt_test_case ttm_tt_init_basic_cases[] = {
+ {
+ .description = "Page-aligned size",
+ .size = SZ_4K,
+ },
+ {
+ .description = "Extra pages requested",
+ .size = SZ_4K,
+ .extra_pages_num = 1,
+ },
+};
+
+static void ttm_tt_init_case_desc(const struct ttm_tt_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_cases,
+ ttm_tt_init_case_desc);
+
+static void ttm_tt_init_basic(struct kunit *test)
+{
+ const struct ttm_tt_test_case *params = test->param_value;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ uint32_t page_flags = TTM_TT_FLAG_ZERO_ALLOC;
+ enum ttm_caching caching = ttm_cached;
+ uint32_t extra_pages = params->extra_pages_num;
+ int num_pages = params->size >> PAGE_SHIFT;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, params->size);
+
+ err = ttm_tt_init(tt, bo, page_flags, caching, extra_pages);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages + extra_pages);
+
+ KUNIT_ASSERT_EQ(test, tt->page_flags, page_flags);
+ KUNIT_ASSERT_EQ(test, tt->caching, caching);
+
+ KUNIT_ASSERT_NULL(test, tt->dma_address);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+}
+
+static void ttm_tt_init_misaligned(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ uint32_t size = SZ_8K;
+ int num_pages = (size + SZ_4K) >> PAGE_SHIFT;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size);
+
+ /* Make the object size misaligned */
+ bo->base.size += 1;
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages);
+}
+
+static void ttm_tt_fini_basic(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, tt->pages);
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->pages);
+}
+
+static void ttm_tt_fini_sg(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_sg_tt_init(tt, bo, 0, caching);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, tt->dma_address);
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->dma_address);
+}
+
+static void ttm_tt_fini_shmem(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ struct file *shmem;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ shmem = shmem_file_setup("ttm swap", BO_SIZE, 0);
+ tt->swap_storage = shmem;
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+}
+
+static void ttm_tt_create_basic(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->type = ttm_bo_type_device;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+
+ /* Free manually, as it was allocated outside of KUnit */
+ kfree(bo->ttm);
+}
+
+static void ttm_tt_create_invalid_bo_type(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->type = ttm_bo_type_sg + 1;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+ KUNIT_EXPECT_NULL(test, bo->ttm);
+}
+
+static void ttm_tt_create_ttm_exists(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->ttm = tt;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ /* Expect to keep the previous TTM */
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_PTR_EQ(test, tt, bo->ttm);
+}
+
+static struct ttm_tt *ttm_tt_null_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ return NULL;
+}
+
+static struct ttm_device_funcs ttm_dev_empty_funcs = {
+ .ttm_tt_create = ttm_tt_null_create,
+};
+
+static void ttm_tt_create_failed(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ /* Update ttm_device_funcs so we don't alloc ttm_tt */
+ devs->ttm_dev->funcs = &ttm_dev_empty_funcs;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, -ENOMEM);
+}
+
+static void ttm_tt_destroy_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+
+ ttm_tt_destroy(devs->ttm_dev, bo->ttm);
+}
+
+static struct kunit_case ttm_tt_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_gen_params),
+ KUNIT_CASE(ttm_tt_init_misaligned),
+ KUNIT_CASE(ttm_tt_fini_basic),
+ KUNIT_CASE(ttm_tt_fini_sg),
+ KUNIT_CASE(ttm_tt_fini_shmem),
+ KUNIT_CASE(ttm_tt_create_basic),
+ KUNIT_CASE(ttm_tt_create_invalid_bo_type),
+ KUNIT_CASE(ttm_tt_create_ttm_exists),
+ KUNIT_CASE(ttm_tt_create_failed),
+ KUNIT_CASE(ttm_tt_destroy_basic),
+ {}
+};
+
+static struct kunit_suite ttm_tt_test_suite = {
+ .name = "ttm_tt",
+ .init = ttm_tt_test_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_tt_test_cases,
+};
+
+kunit_test_suites(&ttm_tt_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index edf10618fe2b..96a724e8f3ff 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -49,7 +49,7 @@
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct drm_printer p = drm_debug_printer(TTM_PFX);
+ struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX);
struct ttm_resource_manager *man;
int i, mem_type;
@@ -410,8 +410,8 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *hop_mem;
int ret;
- hop_placement.num_placement = hop_placement.num_busy_placement = 1;
- hop_placement.placement = hop_placement.busy_placement = hop;
+ hop_placement.num_placement = 1;
+ hop_placement.placement = hop;
/* find space in the bounce domain */
ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
@@ -440,10 +440,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
placement.num_placement = 0;
- placement.num_busy_placement = 0;
bdev->funcs->evict_flags(bo, &placement);
- if (!placement.num_placement && !placement.num_busy_placement) {
+ if (!placement.num_placement) {
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
@@ -770,7 +769,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
* This function may sleep while waiting for space to become available.
* Returns:
* -EBUSY: No space available (only if no_wait == 1).
- * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * -ENOSPC: Could not allocate space for the buffer object, either due to
* fragmentation or concurrent allocators.
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
@@ -791,6 +790,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
+ if (place->flags & TTM_PL_FLAG_FALLBACK)
+ continue;
+
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
@@ -813,10 +815,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0;
}
- for (i = 0; i < placement->num_busy_placement; ++i) {
- const struct ttm_place *place = &placement->busy_placement[i];
+ for (i = 0; i < placement->num_placement; ++i) {
+ const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
+ if (place->flags & TTM_PL_FLAG_DESIRED)
+ continue;
+
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
@@ -830,7 +835,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
goto error;
}
- ret = -ENOMEM;
+ ret = -ENOSPC;
if (!type_found) {
pr_err(TTM_PFX "No compatible memory type found\n");
ret = -EINVAL;
@@ -904,11 +909,11 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Remove the backing store if no placement is given.
*/
- if (!placement->num_placement && !placement->num_busy_placement)
+ if (!placement->num_placement)
return ttm_bo_pipeline_gutting(bo);
/* Check whether we need to move buffer. */
- if (bo->resource && ttm_resource_compat(bo->resource, placement))
+ if (bo->resource && ttm_resource_compatible(bo->resource, placement))
return 0;
/* Moving of pinned BOs is forbidden */
@@ -916,6 +921,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
return -EINVAL;
ret = ttm_bo_move_buffer(bo, placement, ctx);
+ /* For backward compatibility with userspace */
+ if (ret == -ENOSPC)
+ return -ENOMEM;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fd9fd3d15101..0b3f4267130c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -294,7 +294,13 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
enum ttm_caching caching;
man = ttm_manager_type(bo->bdev, res->mem_type);
- caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
+ if (man->use_tt) {
+ caching = bo->ttm->caching;
+ if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
+ tmp = pgprot_decrypted(tmp);
+ } else {
+ caching = res->bus.caching;
+ }
return ttm_prot_from_caching(caching, tmp);
}
@@ -337,6 +343,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
pgprot_t prot;
int ret;
@@ -346,7 +354,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
if (ret)
return ret;
- if (num_pages == 1 && ttm->caching == ttm_cached) {
+ if (num_pages == 1 && ttm->caching == ttm_cached &&
+ !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 46ff9c75bb12..fb14f7716cf8 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -30,6 +30,8 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
+#include <drm/drm_util.h>
+
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
* @bulk: the structure to init
@@ -240,6 +242,7 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
spin_unlock(&bo->bdev->lru_lock);
return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
{
@@ -288,37 +291,15 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
}
/**
- * ttm_resource_compatible - test for compatibility
- *
- * @bdev: TTM device structure
- * @res: The resource to test
- * @place: The placement to test
- * @size: How many bytes the new allocation needs.
+ * ttm_resource_compatible - check if resource is compatible with placement
*
- * Test if @res compatible with @place and @size.
+ * @res: the resource to check
+ * @placement: the placement to check against
*
- * Returns true if the res placement compatible with @place and @size.
+ * Returns true if the placement is compatible.
*/
-bool ttm_resource_compatible(struct ttm_device *bdev,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- struct ttm_resource_manager *man;
-
- if (!res || !place)
- return false;
-
- man = ttm_manager_type(bdev, res->mem_type);
- if (!man->func->compatible)
- return true;
-
- return man->func->compatible(man, res, place, size);
-}
-
-static bool ttm_resource_places_compat(struct ttm_resource *res,
- const struct ttm_place *places,
- unsigned num_placement)
+bool ttm_resource_compatible(struct ttm_resource *res,
+ struct ttm_placement *placement)
{
struct ttm_buffer_object *bo = res->bo;
struct ttm_device *bdev = bo->bdev;
@@ -327,44 +308,25 @@ static bool ttm_resource_places_compat(struct ttm_resource *res,
if (res->placement & TTM_PL_FLAG_TEMPORARY)
return false;
- for (i = 0; i < num_placement; i++) {
- const struct ttm_place *heap = &places[i];
+ for (i = 0; i < placement->num_placement; i++) {
+ const struct ttm_place *place = &placement->placement[i];
+ struct ttm_resource_manager *man;
- if (!ttm_resource_compatible(bdev, res, heap, bo->base.size))
+ if (res->mem_type != place->mem_type)
+ continue;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ if (man->func->compatible &&
+ !man->func->compatible(man, res, place, bo->base.size))
continue;
- if ((res->mem_type == heap->mem_type) &&
- (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
+ if ((!(place->flags & TTM_PL_FLAG_CONTIGUOUS) ||
(res->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
}
return false;
}
-/**
- * ttm_resource_compat - check if resource is compatible with placement
- *
- * @res: the resource to check
- * @placement: the placement to check against
- *
- * Returns true if the placement is compatible.
- */
-bool ttm_resource_compat(struct ttm_resource *res,
- struct ttm_placement *placement)
-{
- if (ttm_resource_places_compat(res, placement->placement,
- placement->num_placement))
- return true;
-
- if ((placement->busy_placement != placement->placement ||
- placement->num_busy_placement > placement->num_placement) &&
- ttm_resource_places_compat(res, placement->busy_placement,
- placement->num_busy_placement))
- return true;
-
- return false;
-}
-
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e0a77671edd6..578a7c37f00b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,11 +31,14 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/cc_platform.h>
#include <linux/sched.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_util.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -60,6 +63,7 @@ static atomic_long_t ttm_dma32_pages_allocated;
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_device *bdev = bo->bdev;
+ struct drm_device *ddev = bo->base.dev;
uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv);
@@ -81,6 +85,15 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
pr_err("Illegal buffer object type\n");
return -EINVAL;
}
+ /*
+ * When using dma_alloc_coherent with memory encryption the
+ * mapped TT pages need to be decrypted or otherwise the drivers
+ * will end up sending encrypted mem to the gpu.
+ */
+ if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ page_flags |= TTM_TT_FLAG_DECRYPTED;
+ drm_info(ddev, "TT memory decryption enabled.");
+ }
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL))
@@ -91,6 +104,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
/*
* Allocates storage for pointers to the pages that back the ttm.
@@ -129,6 +143,7 @@ void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig
index 11e865be81c6..5121fed571a5 100644
--- a/drivers/gpu/drm/tve200/Kconfig
+++ b/drivers/gpu/drm/tve200/Kconfig
@@ -9,7 +9,6 @@ config DRM_TVE200
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Faraday TV Encoder
TVE200 Controller.
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 94eafcecc65b..dc3cf708d02e 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -260,11 +260,26 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
return 0;
}
+static int v3d_debugfs_mm(struct seq_file *m, void *unused)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+
+ spin_lock(&v3d->mm_lock);
+ drm_mm_print(&v3d->mm, &p);
+ spin_unlock(&v3d->mm_lock);
+
+ return 0;
+}
+
static const struct drm_debugfs_info v3d_debugfs_list[] = {
{"v3d_ident", v3d_v3d_debugfs_ident, 0},
{"v3d_regs", v3d_v3d_debugfs_regs, 0},
{"measure_clock", v3d_measure_clock, 0},
{"bo_stats", v3d_debugfs_bo_stats, 0},
+ {"v3d_mm", v3d_debugfs_mm, 0},
};
void
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index f05e2c95a60d..34f807ed1c31 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -35,6 +35,7 @@
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 00e713faecd5..07caf2a47c6c 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1497,16 +1497,16 @@ static int vc4_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_bo *bo;
+ int ret;
if (!state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
- drm_gem_plane_helper_prepare_fb(plane, state);
-
- if (plane->state->fb == state->fb)
- return 0;
+ ret = drm_gem_plane_helper_prepare_fb(plane, state);
+ if (ret)
+ return ret;
return vc4_bo_inc_usecnt(bo);
}
@@ -1516,7 +1516,7 @@ static void vc4_cleanup_fb(struct drm_plane *plane,
{
struct vc4_bo *bo;
- if (plane->state->fb == state->fb || !state->fb)
+ if (!state->fb)
return;
bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
index 5c514946bbad..1c7c7f61a222 100644
--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -99,8 +99,8 @@ virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
return 0;
/*
- * kvalloc at first tries to allocate memory using kmalloc and
- * falls back to vmalloc only on failure. It also uses __GFP_NOWARN
+ * kvmalloc() at first tries to allocate memory using kmalloc() and
+ * falls back to vmalloc() only on failure. It also uses __GFP_NOWARN
* internally for allocations larger than a page size, preventing
* storm of KMSG warnings.
*/
@@ -529,7 +529,7 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
virtio_gpu_submit(&submit);
/*
- * Set up usr-out data after submitting the job to optimize
+ * Set up user-out data after submitting the job to optimize
* the job submission path.
*/
virtio_gpu_install_out_fence_fd(&submit);
diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig
new file mode 100644
index 000000000000..b9ecdebecb0b
--- /dev/null
+++ b/drivers/gpu/drm/vkms/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_VKMS
+ tristate "Virtual KMS (EXPERIMENTAL)"
+ depends on DRM && MMU
+ select DRM_KMS_HELPER
+ select DRM_GEM_SHMEM_HELPER
+ select CRC32
+ default n
+ help
+ Virtual Kernel Mode-Setting (VKMS) is used for testing or for
+ running GPU in a headless machines. Choose this option to get
+ a VKMS.
+
+ If M is selected the module will be called vkms.
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 3c99fb8b54e2..e7441b227b3c 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -123,6 +123,8 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan
enum lut_channel channel)
{
s64 lut_index = get_lut_index(lut, channel_value);
+ u16 *floor_lut_value, *ceil_lut_value;
+ u16 floor_channel_value, ceil_channel_value;
/*
* This checks if `struct drm_color_lut` has any gap added by the compiler
@@ -130,11 +132,15 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan
*/
static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
- u16 *floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
- u16 *ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
+ floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
+ if (drm_fixp2int(lut_index) == (lut->lut_length - 1))
+ /* We're at the end of the LUT array, use same value for ceil and floor */
+ ceil_lut_value = floor_lut_value;
+ else
+ ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
- u16 floor_channel_value = floor_lut_value[channel];
- u16 ceil_channel_value = ceil_lut_value[channel];
+ floor_channel_value = floor_lut_value[channel];
+ ceil_channel_value = ceil_lut_value[channel];
return lerp_u16(floor_channel_value, ceil_channel_value,
lut_index & DRM_FIXED_DECIMAL_MASK);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index ddf8373c1d77..6806c05e57f6 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -648,7 +648,6 @@ out_unref:
* @tfile: struct ttm_object_file identifying the caller
* @size: The size of the dma_bufs we export.
* @prime: The object to be initialized.
- * @shareable: See ttm_base_object_init
* @type: See ttm_base_object_init
* @refcount_release: See ttm_base_object_init
*
@@ -656,10 +655,11 @@ out_unref:
* for data sharing between processes and devices.
*/
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
- struct ttm_prime_object *prime, bool shareable,
+ struct ttm_prime_object *prime,
enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object **))
{
+ bool shareable = !!(type == VMW_RES_SURFACE);
mutex_init(&prime->mutex);
prime->size = PAGE_ALIGN(size);
prime->real_type = type;
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index e6b77ee33e55..573e038c0fab 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (c) 2006-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2006-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -288,7 +288,6 @@ extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
extern int ttm_prime_object_init(struct ttm_object_file *tfile,
size_t size,
struct ttm_prime_object *prime,
- bool shareable,
enum ttm_object_type type,
void (*refcount_release)
(struct ttm_base_object **));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 2bfac3aad7b7..bfd41ce3c8f4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -742,9 +742,21 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
vmw_resource_unbind_list(vbo);
}
+static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
+{
+ if (desired & fallback & domain)
+ return 0;
+
+ if (desired & domain)
+ return TTM_PL_FLAG_DESIRED;
+
+ return TTM_PL_FLAG_FALLBACK;
+}
+
static u32
-set_placement_list(struct ttm_place *pl, u32 domain)
+set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
{
+ u32 domain = desired | fallback;
u32 n = 0;
/*
@@ -752,35 +764,40 @@ set_placement_list(struct ttm_place *pl, u32 domain)
*/
if (domain & VMW_BO_DOMAIN_MOB) {
pl[n].mem_type = VMW_PL_MOB;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
+ fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_GMR) {
pl[n].mem_type = VMW_PL_GMR;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
+ fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_VRAM) {
pl[n].mem_type = TTM_PL_VRAM;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
+ fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
pl[n].mem_type = VMW_PL_SYSTEM;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
+ desired, fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_SYS) {
pl[n].mem_type = TTM_PL_SYSTEM;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
+ fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
@@ -806,7 +823,7 @@ void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
u32 i;
pl->placement = bo->places;
- pl->num_placement = set_placement_list(bo->places, domain);
+ pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
for (i = 0; i < pl->num_placement; ++i) {
@@ -821,8 +838,6 @@ void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
__func__, bo->tbo.resource->mem_type, domain);
}
- pl->busy_placement = bo->busy_places;
- pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
}
void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 3cd5090dedfc..12efecc17df6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -942,7 +942,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
-extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
extern struct ttm_device_funcs vmw_bo_driver;
extern const struct vmw_sg_table *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 36987ef3fc30..cc3086e649eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -447,7 +447,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
vmw_res_type(ctx) == vmw_res_dx_context) {
for (i = 0; i < cotable_max; ++i) {
res = vmw_context_cotable(ctx, i);
- if (IS_ERR(res))
+ if (IS_ERR_OR_NULL(res))
continue;
ret = vmw_execbuf_res_val_add(sw_context, res,
@@ -621,10 +621,10 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @dirty: Whether to change dirty status.
- * @converter: User-space visisble type specific information.
+ * @converter: User-space visible type specific information.
* @id_loc: Pointer to the location in the command buffer currently being parsed
* from where the user-space resource id handle is located.
- * @p_res: Pointer to pointer to resource validalidation node. Populated on
+ * @p_res: Pointer to pointer to resource validation node. Populated on
* exit.
*/
static int
@@ -1266,6 +1266,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
+ if (IS_ERR_OR_NULL(cotable_res))
+ return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
return ret;
@@ -2484,6 +2486,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
return ret;
res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
ret = vmw_cotable_notify(res, cmd->defined_id);
if (unlikely(ret != 0))
return ret;
@@ -2569,8 +2573,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
so_type = vmw_so_cmd_to_type(header->id);
res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
- if (IS_ERR(res))
- return PTR_ERR(res);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cotable_notify(res, cmd->defined_id);
@@ -2689,6 +2693,8 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
return -EINVAL;
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
ret = vmw_cotable_notify(res, cmd->body.shaderId);
if (ret)
return ret;
@@ -3010,6 +3016,8 @@ static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
}
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
ret = vmw_cotable_notify(res, cmd->body.soid);
if (ret)
return ret;
@@ -3603,6 +3611,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_bind_streamoutput, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
&vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
+ &vmw_cmd_invalid, false, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ceb4d3d3b965..a0b47c9b33f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -64,8 +64,11 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
ttm_resource_init(bo, place, *res);
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ ttm_resource_fini(man, *res);
+ kfree(*res);
return id;
+ }
spin_lock(&gman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 5fd0ccaa0b41..cd4925346ed4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -35,6 +35,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include <drm/drm_sysfs.h>
+#include <drm/drm_edid.h>
void vmw_du_cleanup(struct vmw_display_unit *du)
{
@@ -184,13 +185,12 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
*/
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
{
- bool is_iomem;
if (vps->surf) {
if (vps->surf_mapped)
return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
return vps->surf->snooper.image;
} else if (vps->bo)
- return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
+ return vmw_bo_map_and_cache(vps->bo);
return NULL;
}
@@ -272,6 +272,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
u32 i;
u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
int ret;
if (!dev_priv->has_mob ||
@@ -313,7 +314,15 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
if (ret != 0)
goto teardown;
- vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.bo->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
ttm_bo_unreserve(&vps->cursor.bo->tbo);
return 0;
@@ -643,22 +652,12 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- bool is_iomem;
if (vps->surf_mapped) {
vmw_bo_unmap(vps->surf->res.guest_memory_bo);
vps->surf_mapped = false;
}
- if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
- const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
-
- if (likely(ret == 0)) {
- ttm_bo_kunmap(&vps->bo->map);
- ttm_bo_unreserve(&vps->bo->tbo);
- }
- }
-
vmw_du_cursor_plane_unmap_cm(vps);
vmw_du_put_cursor_mob(vcp, vps);
@@ -694,6 +693,10 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
int ret = 0;
if (vps->surf) {
+ if (vps->surf_mapped) {
+ vmw_bo_unmap(vps->surf->res.guest_memory_bo);
+ vps->surf_mapped = false;
+ }
vmw_surface_unreference(&vps->surf);
vps->surf = NULL;
}
@@ -2278,107 +2281,6 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
connector_status_connected : connector_status_disconnected);
}
-static struct drm_display_mode vmw_kms_connector_builtin[] = {
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x720@60Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
- 1472, 1664, 0, 720, 723, 728, 748, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@60Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
- 2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1440@60Hz */
- { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
- 2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2880x1800@60Hz */
- { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
- 2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 3840x2160@60Hz */
- { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
- 3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 3840x2400@60Hz */
- { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
- 3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* Terminate */
- { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
-};
-
/**
* vmw_guess_mode_timing - Provide fake timings for a
* 60Hz vrefresh mode.
@@ -2400,88 +2302,6 @@ void vmw_guess_mode_timing(struct drm_display_mode *mode)
}
-int vmw_du_connector_fill_modes(struct drm_connector *connector,
- uint32_t max_width, uint32_t max_height)
-{
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
- struct drm_device *dev = connector->dev;
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_display_mode *mode = NULL;
- struct drm_display_mode *bmode;
- struct drm_display_mode prefmode = { DRM_MODE("preferred",
- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
- };
- int i;
- u32 assumed_bpp = 4;
-
- if (dev_priv->assume_16bpp)
- assumed_bpp = 2;
-
- max_width = min(max_width, dev_priv->texture_max_width);
- max_height = min(max_height, dev_priv->texture_max_height);
-
- /*
- * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
- * HEIGHT registers.
- */
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- max_width = min(max_width, dev_priv->stdu_max_width);
- max_height = min(max_height, dev_priv->stdu_max_height);
- }
-
- /* Add preferred mode */
- mode = drm_mode_duplicate(dev, &prefmode);
- if (!mode)
- return 0;
- mode->hdisplay = du->pref_width;
- mode->vdisplay = du->pref_height;
- vmw_guess_mode_timing(mode);
- drm_mode_set_name(mode);
-
- if (vmw_kms_validate_mode_vram(dev_priv,
- mode->hdisplay * assumed_bpp,
- mode->vdisplay)) {
- drm_mode_probed_add(connector, mode);
- } else {
- drm_mode_destroy(dev, mode);
- mode = NULL;
- }
-
- if (du->pref_mode) {
- list_del_init(&du->pref_mode->head);
- drm_mode_destroy(dev, du->pref_mode);
- }
-
- /* mode might be null here, this is intended */
- du->pref_mode = mode;
-
- for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
- bmode = &vmw_kms_connector_builtin[i];
- if (bmode->hdisplay > max_width ||
- bmode->vdisplay > max_height)
- continue;
-
- if (!vmw_kms_validate_mode_vram(dev_priv,
- bmode->hdisplay * assumed_bpp,
- bmode->vdisplay))
- continue;
-
- mode = drm_mode_duplicate(dev, bmode);
- if (!mode)
- return 0;
-
- drm_mode_probed_add(connector, mode);
- }
-
- drm_connector_list_update(connector);
- /* Move the prefered mode first, help apps pick the right mode. */
- drm_mode_sort(&connector->modes);
-
- return 1;
-}
-
/**
* vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
* @dev: drm device for the ioctl
@@ -3022,3 +2842,91 @@ out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret;
}
+
+/**
+ * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
+ *
+ * @connector: the drm connector, part of a DU container
+ * @mode: drm mode to check
+ *
+ * Returns MODE_OK on success, or a drm_mode_status error code.
+ */
+enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ u32 max_width = dev_priv->texture_max_width;
+ u32 max_height = dev_priv->texture_max_height;
+ u32 assumed_cpp = 4;
+
+ if (dev_priv->assume_16bpp)
+ assumed_cpp = 2;
+
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ max_width = min(dev_priv->stdu_max_width, max_width);
+ max_height = min(dev_priv->stdu_max_height, max_height);
+ }
+
+ if (max_width < mode->hdisplay)
+ return MODE_BAD_HVALUE;
+
+ if (max_height < mode->vdisplay)
+ return MODE_BAD_VVALUE;
+
+ if (!vmw_kms_validate_mode_vram(dev_priv,
+ mode->hdisplay * assumed_cpp,
+ mode->vdisplay))
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+/**
+ * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
+ *
+ * @connector: the drm connector, part of a DU container
+ *
+ * Returns the number of added modes.
+ */
+int vmw_connector_get_modes(struct drm_connector *connector)
+{
+ struct vmw_display_unit *du = vmw_connector_to_du(connector);
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode prefmode = { DRM_MODE("preferred",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ u32 max_width;
+ u32 max_height;
+ u32 num_modes;
+
+ /* Add preferred mode */
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+
+ mode->hdisplay = du->pref_width;
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+
+ /* Probe connector for all modes not exceeding our geom limits */
+ max_width = dev_priv->texture_max_width;
+ max_height = dev_priv->texture_max_height;
+
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ max_width = min(dev_priv->stdu_max_width, max_width);
+ max_height = min(dev_priv->stdu_max_height, max_height);
+ }
+
+ num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
+
+ return num_modes;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index db81e635dc06..a94947b588e8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -378,7 +378,6 @@ struct vmw_display_unit {
unsigned pref_width;
unsigned pref_height;
bool pref_active;
- struct drm_display_mode *pref_mode;
/*
* Gui positioning
@@ -428,8 +427,6 @@ void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force);
-int vmw_du_connector_fill_modes(struct drm_connector *connector,
- uint32_t max_width, uint32_t max_height);
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
const struct drm_clip_rect *clips,
@@ -438,6 +435,9 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int num_clips,
int increment,
struct vmw_kms_dirty *dirty);
+enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+int vmw_connector_get_modes(struct drm_connector *connector);
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a82fa9700370..c4db4aecca6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -304,7 +304,7 @@ static void vmw_ldu_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
- .fill_modes = vmw_du_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
@@ -313,6 +313,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
+ .get_modes = vmw_connector_get_modes,
+ .mode_valid = vmw_connector_mode_valid
};
static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
@@ -449,7 +451,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
ldu->base.pref_active = (unit == 0);
ldu->base.pref_width = dev_priv->initial_width;
ldu->base.pref_height = dev_priv->initial_height;
- ldu->base.pref_mode = NULL;
/*
* Remove this after enabling atomic because property values can
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 556a403b7eb5..30c3ad27b662 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -347,7 +347,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
- .fill_modes = vmw_du_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
@@ -357,6 +357,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
+ .get_modes = vmw_connector_get_modes,
+ .mode_valid = vmw_connector_mode_valid
};
@@ -826,7 +828,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
- sou->base.pref_mode = NULL;
/*
* Remove this after enabling atomic because property values can
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index ba0c0e12cfe9..3c8414a13dba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -53,7 +53,6 @@ enum stdu_content_type {
* struct vmw_stdu_dirty - closure structure for the update functions
*
* @base: The base type we derive from. Used by vmw_kms_helper_dirty().
- * @transfer: Transfer direction for DMA command.
* @left: Left side of bounding box.
* @right: Right side of bounding box.
* @top: Top side of bounding box.
@@ -100,7 +99,7 @@ struct vmw_stdu_update_gb_image {
};
/**
- * struct vmw_screen_target_display_unit
+ * struct vmw_screen_target_display_unit - conglomerated STDU structure
*
* @base: VMW specific DU structure
* @display_srf: surface to be displayed. The dimension of this will always
@@ -208,6 +207,8 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
* @res: Buffer to bind to the screen target. Set to NULL to blank screen.
*
* Binding a surface to a Screen Target the same as flipping
+ *
+ * Returns: %0 on success or -errno code on failure
*/
static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
struct vmw_screen_target_display_unit *stdu,
@@ -314,6 +315,9 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
*
* @dev_priv: VMW DRM device
* @stdu: display unit to destroy
+ *
+ * Returns: %0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
*/
static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
struct vmw_screen_target_display_unit *stdu)
@@ -536,7 +540,8 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
* If DMA-ing till the screen target system, the function will also notify
* the screen target system that a bounding box of the cliprects has been
* updated.
- * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ *
+ * Returns: %0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_stdu_readback(struct vmw_private *dev_priv,
@@ -703,7 +708,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
* case the device has already synchronized.
* @crtc: If crtc is passed, perform surface dirty on that crtc only.
*
- * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * Returns: %0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
@@ -830,7 +835,7 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
- .fill_modes = vmw_du_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vmw_stdu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
@@ -840,6 +845,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
+ .get_modes = vmw_connector_get_modes,
+ .mode_valid = vmw_connector_mode_valid
};
@@ -887,7 +894,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
* backed by a buffer object. The display surface is pinned here, and it'll
* be unpinned in .cleanup_fb()
*
- * Returns 0 on success
+ * Returns: %0 on success
*/
static int
vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
@@ -1465,6 +1472,8 @@ static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
* This function is called once per CRTC, and allocates one Screen Target
* display unit to represent that CRTC. Since the SVGA device does not separate
* out encoder and connector, they are represented as part of the STDU as well.
+ *
+ * Returns: %0 on success or -errno code on failure
*/
static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 680441bb1786..e7a744dfcecf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -44,7 +44,6 @@
* struct vmw_user_surface - User-space visible surface resource
*
* @prime: The TTM prime object.
- * @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @master: Master of the creating client. Used for security check.
*/
@@ -833,8 +832,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->snooper.image = NULL;
}
- user_srf->prime.base.shareable = false;
- user_srf->prime.base.tfile = NULL;
if (drm_is_primary_client(file_priv))
user_srf->master = drm_file_get_master(file_priv);
@@ -848,10 +845,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
/*
- * A gb-aware client referencing a shared surface will
- * expect a backup buffer to be present.
+ * A gb-aware client referencing a surface will expect a backup
+ * buffer to be present.
*/
- if (dev_priv->has_mob && req->shareable) {
+ if (dev_priv->has_mob) {
struct vmw_bo_params params = {
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
@@ -870,8 +867,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
tmp = vmw_resource_reference(&srf->res);
- ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
- req->shareable, VMW_RES_SURFACE,
+ ret = ttm_prime_object_init(tfile, res->guest_memory_size,
+ &user_srf->prime,
+ VMW_RES_SURFACE,
&vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
@@ -1550,8 +1548,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
- req->base.drm_surface_flags &
- drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
&vmw_user_surface_base_release);
@@ -2053,8 +2049,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
}
*srf_out = &user_srf->srf;
- user_srf->prime.base.shareable = false;
- user_srf->prime.base.tfile = NULL;
srf = &user_srf->srf;
srf->metadata = *req;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index af8562c95cc3..4d23d0a70bcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -43,46 +43,14 @@ static const struct ttm_place sys_placement_flags = {
.flags = 0
};
-static const struct ttm_place gmr_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
-};
-
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &vram_placement_flags
-};
-
-static const struct ttm_place vram_gmr_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }
-};
-
-struct ttm_placement vmw_vram_gmr_placement = {
- .num_placement = 2,
- .placement = vram_gmr_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &gmr_placement_flags
};
struct ttm_placement vmw_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
};
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
diff --git a/drivers/gpu/drm/xe/.kunitconfig b/drivers/gpu/drm/xe/.kunitconfig
index 9590eac91af3..ad4b9b4a9f55 100644
--- a/drivers/gpu/drm/xe/.kunitconfig
+++ b/drivers/gpu/drm/xe/.kunitconfig
@@ -11,3 +11,8 @@ CONFIG_DRM_XE_DISPLAY=n
CONFIG_EXPERT=y
CONFIG_FB=y
CONFIG_DRM_XE_KUNIT_TEST=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_LOCKDEP=y
+CONFIG_DEBUG_LOCKDEP=y
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index e36ae1f0d885..0e31dfb8989e 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
tristate "Intel Xe Graphics"
- depends on DRM && PCI && MMU && (m || (y && KUNIT=y)) && 64BIT
+ depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index efcf0ab7a1a6..c531210695db 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -76,6 +76,7 @@ xe-y += xe_bb.o \
xe_ggtt.o \
xe_gpu_scheduler.o \
xe_gsc.o \
+ xe_gsc_proxy.o \
xe_gsc_submit.o \
xe_gt.o \
xe_gt_ccs_mode.o \
@@ -92,6 +93,7 @@ xe-y += xe_bb.o \
xe_guc.o \
xe_guc_ads.o \
xe_guc_ct.o \
+ xe_guc_db_mgr.o \
xe_guc_debugfs.o \
xe_guc_hwconfig.o \
xe_guc_log.o \
@@ -137,6 +139,7 @@ xe-y += xe_bb.o \
xe_uc_debugfs.o \
xe_uc_fw.o \
xe_vm.o \
+ xe_vram_freq.o \
xe_wait_user_fence.o \
xe_wa.o \
xe_wopcm.o
@@ -145,18 +148,23 @@ xe-y += xe_bb.o \
xe-$(CONFIG_HWMON) += xe_hwmon.o
# graphics virtualization (SR-IOV) support
-xe-y += xe_sriov.o
+xe-y += \
+ xe_guc_relay.o \
+ xe_memirq.o \
+ xe_sriov.o
xe-$(CONFIG_PCI_IOV) += \
xe_lmtt.o \
xe_lmtt_2l.o \
xe_lmtt_ml.o
+xe-$(CONFIG_DRM_XE_KUNIT_TEST) += \
+ tests/xe_kunit_helpers.o
+
# i915 Display compat #defines and #includes
subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
-I$(srctree)/$(src)/display/ext \
-I$(srctree)/$(src)/compat-i915-headers \
- -I$(srctree)/drivers/gpu/drm/xe/display/ \
-I$(srctree)/drivers/gpu/drm/i915/display/ \
-Ddrm_i915_gem_object=xe_bo \
-Ddrm_i915_private=xe_device
@@ -176,17 +184,17 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
# Display code specific to xe
xe-$(CONFIG_DRM_XE_DISPLAY) += \
- xe_display.o \
- display/xe_fb_pin.o \
- display/xe_hdcp_gsc.o \
- display/xe_plane_initial.o \
- display/xe_display_rps.o \
+ display/ext/i915_irq.o \
+ display/ext/i915_utils.o \
+ display/intel_fb_bo.o \
+ display/intel_fbdev_fb.o \
+ display/xe_display.o \
display/xe_display_misc.o \
+ display/xe_display_rps.o \
display/xe_dsb_buffer.o \
- display/intel_fbdev_fb.o \
- display/intel_fb_bo.o \
- display/ext/i915_irq.o \
- display/ext/i915_utils.o
+ display/xe_fb_pin.o \
+ display/xe_hdcp_gsc.o \
+ display/xe_plane_initial.o
# SOC code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
@@ -213,8 +221,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
- i915-display/intel_display_debugfs.o \
- i915-display/intel_display_debugfs_params.o \
i915-display/intel_display_device.o \
i915-display/intel_display_driver.o \
i915-display/intel_display_irq.o \
@@ -258,7 +264,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
i915-display/intel_panel.o \
- i915-display/intel_pipe_crc.o \
i915-display/intel_pmdemand.o \
i915-display/intel_pps.o \
i915-display/intel_psr.o \
@@ -285,6 +290,13 @@ ifeq ($(CONFIG_DRM_FBDEV_EMULATION),y)
xe-$(CONFIG_DRM_XE_DISPLAY) += i915-display/intel_fbdev.o
endif
+ifeq ($(CONFIG_DEBUG_FS),y)
+ xe-$(CONFIG_DRM_XE_DISPLAY) += \
+ i915-display/intel_display_debugfs.o \
+ i915-display/intel_display_debugfs_params.o \
+ i915-display/intel_pipe_crc.o
+endif
+
obj-$(CONFIG_DRM_XE) += xe.o
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h b/drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h
new file mode 100644
index 000000000000..80bbf06a3eb8
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _ABI_GSC_PROXY_COMMANDS_ABI_H
+#define _ABI_GSC_PROXY_COMMANDS_ABI_H
+
+#include <linux/types.h>
+
+/* Heci client ID for proxy commands */
+#define HECI_MEADDRESS_PROXY 10
+
+/* FW-defined proxy header */
+struct xe_gsc_proxy_header {
+ /*
+ * hdr:
+ * Bits 0-7: type of the proxy message (see enum xe_gsc_proxy_type)
+ * Bits 8-15: rsvd
+ * Bits 16-31: length in bytes of the payload following the proxy header
+ */
+ u32 hdr;
+#define GSC_PROXY_TYPE GENMASK(7, 0)
+#define GSC_PROXY_PAYLOAD_LENGTH GENMASK(31, 16)
+
+ u32 source; /* Source of the Proxy message */
+ u32 destination; /* Destination of the Proxy message */
+#define GSC_PROXY_ADDRESSING_KMD 0x10000
+#define GSC_PROXY_ADDRESSING_GSC 0x20000
+#define GSC_PROXY_ADDRESSING_CSME 0x30000
+
+ u32 status; /* Command status */
+} __packed;
+
+/* FW-defined proxy types */
+enum xe_gsc_proxy_type {
+ GSC_PROXY_MSG_TYPE_PROXY_INVALID = 0,
+ GSC_PROXY_MSG_TYPE_PROXY_QUERY = 1,
+ GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD = 2,
+ GSC_PROXY_MSG_TYPE_PROXY_END = 3,
+ GSC_PROXY_MSG_TYPE_PROXY_NOTIFICATION = 4,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
new file mode 100644
index 000000000000..5496a5890847
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _GUC_ACTIONS_PF_ABI_H
+#define _GUC_ACTIONS_PF_ABI_H
+
+#include "guc_communication_ctb_abi.h"
+
+/**
+ * DOC: GUC2PF_RELAY_FROM_VF
+ *
+ * This message is used by the GuC firmware to forward a VF2PF `Relay Message`_
+ * received from the Virtual Function (VF) driver to this Physical Function (PF)
+ * driver.
+ *
+ * This message is always sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF` = 0x5100 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VFID** - source VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **RELAY_ID** - VF/PF message ID |
+ * +---+-------+-----------------+--------------------------------------------+
+ * | 3 | 31:0 | **RELAY_DATA1** | |
+ * +---+-------+-----------------+ |
+ * |...| | | [Embedded `Relay Message`_] |
+ * +---+-------+-----------------+ |
+ * | n | 31:0 | **RELAY_DATAx** | |
+ * +---+-------+-----------------+--------------------------------------------+
+ */
+#define XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF 0x5100
+
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u)
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN \
+ (GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN)
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_3_RELAY_DATA1 GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_n_RELAY_DATAx GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+
+/**
+ * DOC: PF2GUC_RELAY_TO_VF
+ *
+ * This H2G message is used by the Physical Function (PF) driver to send embedded
+ * VF2PF `Relay Message`_ to the VF.
+ *
+ * This action message must be sent over CTB as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = `GUC_HXG_TYPE_FAST_REQUEST`_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`XE_GUC_ACTION_PF2GUC_RELAY_TO_VF` = 0x5101 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VFID** - target VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **RELAY_ID** - VF/PF message ID |
+ * +---+-------+-----------------+--------------------------------------------+
+ * | 3 | 31:0 | **RELAY_DATA1** | |
+ * +---+-------+-----------------+ |
+ * |...| | | [Embedded `Relay Message`_] |
+ * +---+-------+-----------------+ |
+ * | n | 31:0 | **RELAY_DATAx** | |
+ * +---+-------+-----------------+--------------------------------------------+
+ */
+#define XE_GUC_ACTION_PF2GUC_RELAY_TO_VF 0x5101
+
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 2u)
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_MAX_LEN \
+ (PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN)
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_3_RELAY_DATA1 GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+
+/**
+ * DOC: GUC2VF_RELAY_FROM_PF
+ *
+ * This message is used by the GuC firmware to deliver `Relay Message`_ from the
+ * Physical Function (PF) driver to this Virtual Function (VF) driver.
+ * See `GuC Relay Communication`_ for details.
+ *
+ * This message is always sent over CTB.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF` = 0x5102 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **RELAY_ID** - VF/PF message ID |
+ * +---+-------+-----------------+--------------------------------------------+
+ * | 2 | 31:0 | **RELAY_DATA1** | |
+ * +---+-------+-----------------+ |
+ * |...| | | [Embedded `Relay Message`_] |
+ * +---+-------+-----------------+ |
+ * | n | 31:0 | **RELAY_DATAx** | |
+ * +---+-------+-----------------+--------------------------------------------+
+ */
+#define XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF 0x5102
+
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 1u)
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN \
+ (GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN)
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_n_RELAY_DATAx GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+
+/**
+ * DOC: VF2GUC_RELAY_TO_PF
+ *
+ * This message is used by the Virtual Function (VF) drivers to communicate with
+ * the Physical Function (PF) driver and send `Relay Message`_ to the PF driver.
+ * See `GuC Relay Communication`_ for details.
+ *
+ * This message must be sent over CTB.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ or GUC_HXG_TYPE_FAST_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`XE_GUC_ACTION_VF2GUC_RELAY_TO_PF` = 0x5103 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **RELAY_ID** - VF/PF message ID |
+ * +---+-------+-----------------+--------------------------------------------+
+ * | 2 | 31:0 | **RELAY_DATA1** | |
+ * +---+-------+-----------------+ |
+ * |...| | | [Embedded `Relay Message`_] |
+ * +---+-------+-----------------+ |
+ * | n | 31:0 | **RELAY_DATAx** | |
+ * +---+-------+-----------------+--------------------------------------------+
+ */
+#define XE_GUC_ACTION_VF2GUC_RELAY_TO_PF 0x5103
+
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_MAX_LEN \
+ (VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN)
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID GUC_HXG_REQUEST_MSG_n_DATAn
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
index 0b1146d0c997..8f86a16dc577 100644
--- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
@@ -81,12 +81,13 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
#define GUC_CTB_HDR_LEN 1u
#define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN
-#define GUC_CTB_MSG_MAX_LEN 256u
+#define GUC_CTB_MSG_MAX_LEN (GUC_CTB_MSG_MIN_LEN + GUC_CTB_MAX_DWORDS)
#define GUC_CTB_MSG_0_FENCE (0xffffu << 16)
#define GUC_CTB_MSG_0_FORMAT (0xfu << 12)
#define GUC_CTB_FORMAT_HXG 0u
#define GUC_CTB_MSG_0_RESERVED (0xfu << 8)
#define GUC_CTB_MSG_0_NUM_DWORDS (0xffu << 0)
+#define GUC_CTB_MAX_DWORDS 255
/**
* DOC: CTB HXG Message
diff --git a/drivers/gpu/drm/xe/abi/guc_messages_abi.h b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
index 29e414c82d56..534a39db7772 100644
--- a/drivers/gpu/drm/xe/abi/guc_messages_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
@@ -24,6 +24,7 @@
* | | 30:28 | **TYPE** - message type |
* | | | - _`GUC_HXG_TYPE_REQUEST` = 0 |
* | | | - _`GUC_HXG_TYPE_EVENT` = 1 |
+ * | | | - _`GUC_HXG_TYPE_FAST_REQUEST` = 2 |
* | | | - _`GUC_HXG_TYPE_NO_RESPONSE_BUSY` = 3 |
* | | | - _`GUC_HXG_TYPE_NO_RESPONSE_RETRY` = 5 |
* | | | - _`GUC_HXG_TYPE_RESPONSE_FAILURE` = 6 |
@@ -46,6 +47,7 @@
#define GUC_HXG_MSG_0_TYPE (0x7u << 28)
#define GUC_HXG_TYPE_REQUEST 0u
#define GUC_HXG_TYPE_EVENT 1u
+#define GUC_HXG_TYPE_FAST_REQUEST 2u
#define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u
#define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u
#define GUC_HXG_TYPE_RESPONSE_FAILURE 6u
diff --git a/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h
new file mode 100644
index 000000000000..747e428de421
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_RELAY_ACTIONS_ABI_H_
+#define _ABI_GUC_RELAY_ACTIONS_ABI_H_
+
+/**
+ * DOC: GuC Relay Debug Actions
+ *
+ * This range of action codes is reserved for debugging purposes only and should
+ * be used only on debug builds. These actions may not be supported by the
+ * production drivers. Their definitions could be changed in the future.
+ *
+ * _`GUC_RELAY_ACTION_DEBUG_ONLY_START` = 0xDEB0
+ * _`GUC_RELAY_ACTION_DEBUG_ONLY_END` = 0xDEFF
+ */
+
+#define GUC_RELAY_ACTION_DEBUG_ONLY_START 0xDEB0
+#define GUC_RELAY_ACTION_DEBUG_ONLY_END 0xDEFF
+
+/**
+ * DOC: VFXPF_TESTLOOP
+ *
+ * This `Relay Message`_ is used to selftest the `GuC Relay Communication`_.
+ *
+ * The following opcodes are defined:
+ * VFXPF_TESTLOOP_OPCODE_NOP_ will return no data.
+ * VFXPF_TESTLOOP_OPCODE_BUSY_ will reply with BUSY response first.
+ * VFXPF_TESTLOOP_OPCODE_RETRY_ will reply with RETRY response instead.
+ * VFXPF_TESTLOOP_OPCODE_ECHO_ will return same data as received.
+ * VFXPF_TESTLOOP_OPCODE_FAIL_ will always fail with error.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ or GUC_HXG_TYPE_FAST_REQUEST_ |
+ * | | | or GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | **OPCODE** |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_NOP` = 0x0 |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_BUSY` = 0xB |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_RETRY` = 0xD |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_ECHO` = 0xE |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_FAIL` = 0xF |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`IOV_ACTION_SELFTEST_RELAY` |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **DATA1** = optional, depends on **OPCODE**: |
+ * | | | for VFXPF_TESTLOOP_OPCODE_BUSY_: time in ms for reply |
+ * | | | for VFXPF_TESTLOOP_OPCODE_FAIL_: expected error |
+ * | | | for VFXPF_TESTLOOP_OPCODE_ECHO_: payload |
+ * +---+-------+--------------------------------------------------------------+
+ * |...| 31:0 | **DATAn** = only for **OPCODE** VFXPF_TESTLOOP_OPCODE_ECHO_ |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ * |...| 31:0 | DATAn = only for **OPCODE** VFXPF_TESTLOOP_OPCODE_ECHO_ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_RELAY_ACTION_VFXPF_TESTLOOP (GUC_RELAY_ACTION_DEBUG_ONLY_START + 1)
+#define VFXPF_TESTLOOP_OPCODE_NOP 0x0
+#define VFXPF_TESTLOOP_OPCODE_BUSY 0xB
+#define VFXPF_TESTLOOP_OPCODE_RETRY 0xD
+#define VFXPF_TESTLOOP_OPCODE_ECHO 0xE
+#define VFXPF_TESTLOOP_OPCODE_FAIL 0xF
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h b/drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h
new file mode 100644
index 000000000000..f92625f04796
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_RELAY_COMMUNICATION_ABI_H
+#define _ABI_GUC_RELAY_COMMUNICATION_ABI_H
+
+#include <linux/build_bug.h>
+
+#include "guc_actions_sriov_abi.h"
+#include "guc_communication_ctb_abi.h"
+#include "guc_messages_abi.h"
+
+/**
+ * DOC: GuC Relay Communication
+ *
+ * The communication between Virtual Function (VF) drivers and Physical Function
+ * (PF) drivers is based on the GuC firmware acting as a proxy (relay) agent.
+ *
+ * To communicate with the PF driver, VF's drivers use `VF2GUC_RELAY_TO_PF`_
+ * action that takes the `Relay Message`_ as opaque payload and requires the
+ * relay message identifier (RID) as additional parameter.
+ *
+ * This identifier is used by the drivers to match related messages.
+ *
+ * The GuC forwards this `Relay Message`_ and its identifier to the PF driver
+ * in `GUC2PF_RELAY_FROM_VF`_ action. This event message additionally contains
+ * the identifier of the origin VF (VFID).
+ *
+ * Likewise, to communicate with the VF drivers, PF driver use
+ * `VF2GUC_RELAY_TO_PF`_ action that in addition to the `Relay Message`_
+ * and the relay message identifier (RID) also takes the target VF identifier.
+ *
+ * The GuC uses this target VFID from the message to select where to send the
+ * `GUC2VF_RELAY_FROM_PF`_ with the embedded `Relay Message`_ with response::
+ *
+ * VF GuC PF
+ * | | |
+ * [ ] VF2GUC_RELAY_TO_PF | |
+ * [ ]---------------------------> [ ] |
+ * [ ] { rid, msg } [ ] |
+ * [ ] [ ] GUC2PF_RELAY_FROM_VF |
+ * [ ] [ ]---------------------------> [ ]
+ * [ ] | { VFID, rid, msg } [ ]
+ * [ ] | [ ]
+ * [ ] | PF2GUC_RELAY_TO_VF [ ]
+ * [ ] [ ] <---------------------------[ ]
+ * [ ] [ ] { VFID, rid, reply } |
+ * [ ] GUC2VF_RELAY_FROM_PF [ ] |
+ * [ ] <---------------------------[ ] |
+ * | { rid, reply } | |
+ * | | |
+ *
+ * It is also possible that PF driver will initiate communication with the
+ * selected VF driver. The same GuC action messages will be used::
+ *
+ * VF GuC PF
+ * | | |
+ * | | PF2GUC_RELAY_TO_VF [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] { VFID, rid, msg } [ ]
+ * | GUC2VF_RELAY_FROM_PF [ ] [ ]
+ * [ ] <---------------------------[ ] [ ]
+ * [ ] { rid, msg } | [ ]
+ * [ ] | [ ]
+ * [ ] VF2GUC_RELAY_TO_PF | [ ]
+ * [ ]---------------------------> [ ] [ ]
+ * | { rid, reply } [ ] [ ]
+ * | [ ] GUC2PF_RELAY_FROM_VF [ ]
+ * | [ ]---------------------------> [ ]
+ * | | { VFID, rid, reply } |
+ * | | |
+ */
+
+/**
+ * DOC: Relay Message
+ *
+ * The `Relay Message`_ is used by Physical Function (PF) driver and Virtual
+ * Function (VF) drivers to communicate using `GuC Relay Communication`_.
+ *
+ * Format of the `Relay Message`_ follows format of the generic `HXG Message`_.
+ *
+ * +--------------------------------------------------------------------------+
+ * | `Relay Message`_ |
+ * +==========================================================================+
+ * | `HXG Message`_ |
+ * +--------------------------------------------------------------------------+
+ *
+ * Maximum length of the `Relay Message`_ is limited by the maximum length of
+ * the `CTB HXG Message`_ and format of the `GUC2PF_RELAY_FROM_VF`_ message.
+ */
+
+#define GUC_RELAY_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_RELAY_MSG_MAX_LEN \
+ (GUC_CTB_MAX_DWORDS - GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN)
+
+static_assert(PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN >
+ VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN);
+
+/**
+ * DOC: Relay Error Codes
+ *
+ * The `GuC Relay Communication`_ can be used to pass `Relay Message`_ between
+ * drivers that run on different Operating Systems. To help in troubleshooting,
+ * `GuC Relay Communication`_ uses error codes that mostly match errno values.
+ */
+
+#define GUC_RELAY_ERROR_UNDISCLOSED 0
+#define GUC_RELAY_ERROR_OPERATION_NOT_PERMITTED 1 /* EPERM */
+#define GUC_RELAY_ERROR_PERMISSION_DENIED 13 /* EACCES */
+#define GUC_RELAY_ERROR_INVALID_ARGUMENT 22 /* EINVAL */
+#define GUC_RELAY_ERROR_INVALID_REQUEST_CODE 56 /* EBADRQC */
+#define GUC_RELAY_ERROR_NO_DATA_AVAILABLE 61 /* ENODATA */
+#define GUC_RELAY_ERROR_PROTOCOL_ERROR 71 /* EPROTO */
+#define GUC_RELAY_ERROR_MESSAGE_SIZE 90 /* EMSGSIZE */
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 5d2a77b52db4..420eba0e4be0 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -162,18 +162,18 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#include "intel_wakeref.h"
-static inline bool intel_runtime_pm_get(struct xe_runtime_pm *pm)
+static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
if (xe_pm_runtime_get(xe) < 0) {
xe_pm_runtime_put(xe);
- return false;
+ return 0;
}
- return true;
+ return 1;
}
-static inline bool intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
+static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
@@ -187,7 +187,7 @@ static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
xe_pm_runtime_put(xe);
}
-static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, bool wakeref)
+static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref)
{
if (wakeref)
intel_runtime_pm_put_unchecked(pm);
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
index 888e7a87a925..bd233007c1b7 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
@@ -19,6 +19,9 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
int err;
u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT;
+ if (align)
+ size = ALIGN(size, align);
+
bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
NULL, size, start, end,
ttm_bo_type_kernel, flags);
diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index e4db069f0db3..e4db069f0db3 100644
--- a/drivers/gpu/drm/xe/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
diff --git a/drivers/gpu/drm/xe/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 710e56180b52..710e56180b52 100644
--- a/drivers/gpu/drm/xe/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index ccf83c12b545..866d1dd6eeb4 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -18,19 +19,20 @@
#include "intel_plane_initial.h"
static bool
-intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
- const struct intel_initial_plane_config *plane_config,
+intel_reuse_initial_plane_obj(struct intel_crtc *this,
+ const struct intel_initial_plane_config plane_configs[],
struct drm_framebuffer **fb)
{
+ struct drm_i915_private *i915 = to_i915(this->base.dev);
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
+ const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
if (!crtc_state->uapi.active)
continue;
@@ -38,7 +40,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
if (!plane_state->ggtt_vma)
continue;
- if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) {
*fb = plane_state->hw.fb;
return true;
}
@@ -178,10 +180,10 @@ err_bo:
static void
intel_find_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+ struct intel_initial_plane_config plane_configs[])
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_initial_plane_config *plane_config =
+ &plane_configs[crtc->pipe];
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
@@ -201,7 +203,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
if (intel_alloc_initial_plane_obj(crtc, plane_config))
fb = &plane_config->fb->base;
- else if (!intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb))
+ else if (!intel_reuse_initial_plane_obj(crtc, plane_configs, &fb))
goto nofb;
plane_state->uapi.rotation = plane_config->rotation;
@@ -267,25 +269,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
}
}
-void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
+void intel_initial_plane_config(struct drm_i915_private *i915)
{
- struct xe_device *xe = to_xe_device(crtc->base.dev);
- struct intel_initial_plane_config plane_config = {};
+ struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
- /*
- * Note that reserving the BIOS fb up front prevents us
- * from stuffing other stolen allocations like the ring
- * on top. This prevents some ugliness at boot time, and
- * can even allow for smooth boot transitions if the BIOS
- * fb is large enough for the active pipe configuration.
- */
- xe->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_initial_plane_config *plane_config =
+ &plane_configs[crtc->pipe];
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &plane_config);
+ if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
+ continue;
- plane_config_fini(&plane_config);
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
+
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, plane_configs);
+
+ if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ plane_config_fini(plane_config);
+ }
}
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index 1cfa96167fde..c74ceb550dce 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -56,6 +56,9 @@
#define MI_FLUSH_IMM_QW REG_FIELD_PREP(MI_FLUSH_DW_LEN_DW, 5 - 2)
#define MI_FLUSH_DW_USE_GTT REG_BIT(2)
+#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
+#define MI_LRM_USE_GGTT REG_BIT(22)
+
#define MI_BATCH_BUFFER_START __MI_INSTR(0x31)
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 5592774fc690..0b1266c88a6a 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -75,12 +75,17 @@
#define FF_THREAD_MODE(base) XE_REG((base) + 0xa0)
#define FF_TESSELATION_DOP_GATE_DISABLE BIT(19)
+#define RING_INT_SRC_RPT_PTR(base) XE_REG((base) + 0xa4)
#define RING_IMR(base) XE_REG((base) + 0xa8)
+#define RING_INT_STATUS_RPT_PTR(base) XE_REG((base) + 0xac)
#define RING_EIR(base) XE_REG((base) + 0xb0)
#define RING_EMR(base) XE_REG((base) + 0xb4)
#define RING_ESR(base) XE_REG((base) + 0xb8)
+#define INSTPM(base) XE_REG((base) + 0xc0, XE_REG_OPTION_MASKED)
+#define ENABLE_SEMAPHORE_POLL_BIT REG_BIT(13)
+
#define RING_CMD_CCTL(base) XE_REG((base) + 0xc4, XE_REG_OPTION_MASKED)
/*
* CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
@@ -136,6 +141,7 @@
#define TAIL_ADDR 0x001FFFF8
#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8)
+#define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc)
#define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4)
#define RING_FORCE_TO_NONPRIV_DENY REG_BIT(30)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 1dd361046b5d..15ac2d284d48 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -144,8 +144,12 @@
#define GSCPSMI_BASE XE_REG(0x880c)
+#define CCCHKNREG1 XE_REG_MCR(0x8828)
+#define ENCOMPPERFFIX REG_BIT(18)
+
/* Fuse readout registers for GT */
#define XEHP_FUSE4 XE_REG(0x9114)
+#define CFEG_WMTP_DISABLE REG_BIT(20)
#define CCS_EN_MASK REG_GENMASK(19, 16)
#define GT_L3_EXC_MASK REG_GENMASK(6, 4)
@@ -288,6 +292,9 @@
#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
#define XEHP_LNESPARE REG_BIT(19)
+#define L3SQCREG3 XE_REG_MCR(0xb108)
+#define COMPPWOVERFETCHEN REG_BIT(28)
+
#define XEHP_L3SQCREG5 XE_REG_MCR(0xb158)
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
@@ -344,6 +351,9 @@
#define ROW_CHICKEN3 XE_REG_MCR(0xe49c, XE_REG_OPTION_MASKED)
#define DIS_FIX_EOT1_FLUSH REG_BIT(9)
+#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
+#define SLM_WMTP_RESTORE REG_BIT(11)
+
#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
#define UGM_BACKUP_MODE REG_BIT(13)
#define MDQ_ARBITRATION_MODE REG_BIT(12)
@@ -430,6 +440,15 @@
#define VOLTAGE_MASK REG_GENMASK(10, 0)
#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4))
+#define INTR_GSC REG_BIT(31)
+#define INTR_GUC REG_BIT(25)
+#define INTR_MGUC REG_BIT(24)
+#define INTR_BCS8 REG_BIT(23)
+#define INTR_BCS(x) REG_BIT(15 - (x))
+#define INTR_CCS(x) REG_BIT(4 + (x))
+#define INTR_RCS0 REG_BIT(0)
+#define INTR_VECS(x) REG_BIT(31 - (x))
+#define INTR_VCS(x) REG_BIT(x)
#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030)
#define VCS_VECS_INTR_ENABLE XE_REG(0x190034)
@@ -446,6 +465,7 @@
#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
#define OTHER_GUC_INSTANCE 0
+#define OTHER_GSC_HECI2_INSTANCE 3
#define OTHER_GSC_INSTANCE 6
#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4))
@@ -454,6 +474,7 @@
#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8)
#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac)
#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0)
+#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
#define GUC_SG_INTR_MASK XE_REG(0x1900e8)
#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec)
#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4)
@@ -469,10 +490,4 @@
#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
-#define PVC_GT0_PACKAGE_ENERGY_STATUS XE_REG(0x281004)
-#define PVC_GT0_PACKAGE_RAPL_LIMIT XE_REG(0x281008)
-#define PVC_GT0_PACKAGE_POWER_SKU_UNIT XE_REG(0x281068)
-#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
-#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
-
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 4be81abc86ad..1825d8f79db6 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -14,4 +14,13 @@
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)
+#define CTX_LRM_INT_MASK_ENABLE 0x50
+#define CTX_INT_MASK_ENABLE_REG (CTX_LRM_INT_MASK_ENABLE + 1)
+#define CTX_INT_MASK_ENABLE_PTR (CTX_LRM_INT_MASK_ENABLE + 2)
+#define CTX_LRI_INT_REPORT_PTR 0x55
+#define CTX_INT_STATUS_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 1)
+#define CTX_INT_STATUS_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 2)
+#define CTX_INT_SRC_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 3)
+#define CTX_INT_SRC_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 4)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
new file mode 100644
index 000000000000..3dae858508c8
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_PCODE_REGS_H_
+#define _XE_PCODE_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+/*
+ * This file contains addresses of PCODE registers visible through GT MMIO space.
+ */
+
+#define PVC_GT0_PACKAGE_ENERGY_STATUS XE_REG(0x281004)
+#define PVC_GT0_PACKAGE_RAPL_LIMIT XE_REG(0x281008)
+#define PVC_GT0_PACKAGE_POWER_SKU_UNIT XE_REG(0x281068)
+#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
+#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
+
+#endif /* _XE_PCODE_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
index 39d8a0892274..9d1d88af8b2f 100644
--- a/drivers/gpu/drm/xe/tests/Makefile
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -1,10 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
+# "live" kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += \
xe_bo_test.o \
xe_dma_buf_test.o \
xe_migrate_test.o \
- xe_mocs_test.o \
+ xe_mocs_test.o
+
+# Normal kunit tests
+obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_test.o
+xe_test-y = xe_test_mod.o \
xe_pci_test.o \
xe_rtp_test.o \
xe_wa_test.o
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c b/drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c
new file mode 100644
index 000000000000..a87a7b4b040a
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+
+static int guc_dbm_test_init(struct kunit *test)
+{
+ struct xe_guc_db_mgr *dbm;
+
+ xe_kunit_helper_xe_device_test_init(test);
+ dbm = &xe_device_get_gt(test->priv, 0)->uc.guc.dbm;
+
+ mutex_init(dbm_mutex(dbm));
+ test->priv = dbm;
+ return 0;
+}
+
+static void test_empty(struct kunit *test)
+{
+ struct xe_guc_db_mgr *dbm = test->priv;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, 0), 0);
+ KUNIT_ASSERT_EQ(test, dbm->count, 0);
+
+ mutex_lock(dbm_mutex(dbm));
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ mutex_unlock(dbm_mutex(dbm));
+
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+}
+
+static void test_default(struct kunit *test)
+{
+ struct xe_guc_db_mgr *dbm = test->priv;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
+ KUNIT_ASSERT_EQ(test, dbm->count, GUC_NUM_DOORBELLS);
+}
+
+static const unsigned int guc_dbm_params[] = {
+ GUC_NUM_DOORBELLS / 64,
+ GUC_NUM_DOORBELLS / 32,
+ GUC_NUM_DOORBELLS / 8,
+ GUC_NUM_DOORBELLS,
+};
+
+static void uint_param_get_desc(const unsigned int *p, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u", *p);
+}
+
+KUNIT_ARRAY_PARAM(guc_dbm, guc_dbm_params, uint_param_get_desc);
+
+static void test_size(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ unsigned int n;
+ int id;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, *p), 0);
+ KUNIT_ASSERT_EQ(test, dbm->count, *p);
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++) {
+ KUNIT_EXPECT_GE(test, id = xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ KUNIT_EXPECT_LT(test, id, dbm->count);
+ }
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ mutex_unlock(dbm_mutex(dbm));
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++)
+ xe_guc_db_mgr_release_id_locked(dbm, n);
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+static void test_reuse(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, *p), 0);
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++)
+ KUNIT_EXPECT_GE(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ mutex_unlock(dbm_mutex(dbm));
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++) {
+ xe_guc_db_mgr_release_id_locked(dbm, n);
+ KUNIT_EXPECT_EQ(test, xe_guc_db_mgr_reserve_id_locked(dbm), n);
+ }
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ mutex_unlock(dbm_mutex(dbm));
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++)
+ xe_guc_db_mgr_release_id_locked(dbm, n);
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+static void test_range_overlap(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ int id1, id2, id3;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
+ KUNIT_ASSERT_LE(test, *p, dbm->count);
+
+ KUNIT_ASSERT_GE(test, id1 = xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0);
+ for (n = 0; n < dbm->count - *p; n++) {
+ KUNIT_ASSERT_GE(test, id2 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ KUNIT_ASSERT_NE(test, id2, id1);
+ KUNIT_ASSERT_NE_MSG(test, id2 < id1, id2 > id1 + *p - 1,
+ "id1=%d id2=%d", id1, id2);
+ }
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ xe_guc_db_mgr_release_range(dbm, 0, dbm->count);
+
+ if (*p >= 1) {
+ KUNIT_ASSERT_GE(test, id1 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ KUNIT_ASSERT_GE(test, id2 = xe_guc_db_mgr_reserve_range(dbm, *p - 1, 0), 0);
+ KUNIT_ASSERT_NE(test, id2, id1);
+ KUNIT_ASSERT_NE_MSG(test, id1 < id2, id1 > id2 + *p - 2,
+ "id1=%d id2=%d", id1, id2);
+ for (n = 0; n < dbm->count - *p; n++) {
+ KUNIT_ASSERT_GE(test, id3 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ KUNIT_ASSERT_NE(test, id3, id1);
+ KUNIT_ASSERT_NE(test, id3, id2);
+ KUNIT_ASSERT_NE_MSG(test, id3 < id2, id3 > id2 + *p - 2,
+ "id3=%d id2=%d", id3, id2);
+ }
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ xe_guc_db_mgr_release_range(dbm, 0, dbm->count);
+ }
+}
+
+static void test_range_compact(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
+ KUNIT_ASSERT_NE(test, *p, 0);
+ KUNIT_ASSERT_LE(test, *p, dbm->count);
+ if (dbm->count % *p)
+ kunit_skip(test, "must be divisible");
+
+ KUNIT_ASSERT_GE(test, xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0);
+ for (n = 1; n < dbm->count / *p; n++)
+ KUNIT_ASSERT_GE(test, xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0);
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ xe_guc_db_mgr_release_range(dbm, 0, dbm->count);
+}
+
+static void test_range_spare(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ int id;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
+ KUNIT_ASSERT_LE(test, *p, dbm->count);
+
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count), 0);
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count - *p + 1), 0);
+ KUNIT_ASSERT_EQ(test, id = xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count - *p), 0);
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, dbm->count - *p), 0);
+ xe_guc_db_mgr_release_range(dbm, id, *p);
+}
+
+static struct kunit_case guc_dbm_test_cases[] = {
+ KUNIT_CASE(test_empty),
+ KUNIT_CASE(test_default),
+ KUNIT_CASE_PARAM(test_size, guc_dbm_gen_params),
+ KUNIT_CASE_PARAM(test_reuse, guc_dbm_gen_params),
+ KUNIT_CASE_PARAM(test_range_overlap, guc_dbm_gen_params),
+ KUNIT_CASE_PARAM(test_range_compact, guc_dbm_gen_params),
+ KUNIT_CASE_PARAM(test_range_spare, guc_dbm_gen_params),
+ {}
+};
+
+static struct kunit_suite guc_dbm_suite = {
+ .name = "guc_dbm",
+ .test_cases = guc_dbm_test_cases,
+ .init = guc_dbm_test_init,
+};
+
+kunit_test_suites(&guc_dbm_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_relay_test.c b/drivers/gpu/drm/xe/tests/xe_guc_relay_test.c
new file mode 100644
index 000000000000..13701451b923
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_relay_test.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+#define TEST_RID 1234
+#define TEST_VFID 5
+#define TEST_LEN 6
+#define TEST_ACTION 0xa
+#define TEST_DATA(n) (0xd0 + (n))
+
+static int replacement_relay_get_totalvfs(struct xe_guc_relay *relay)
+{
+ return TEST_VFID;
+}
+
+static int relay_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_guc_relay *relay;
+ struct xe_device *xe;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ xe = test->priv;
+ KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
+
+ relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
+ kunit_activate_static_stub(test, relay_get_totalvfs,
+ replacement_relay_get_totalvfs);
+
+ KUNIT_ASSERT_EQ(test, xe_guc_relay_init(relay), 0);
+ KUNIT_EXPECT_TRUE(test, relay_is_ready(relay));
+ relay->last_rid = TEST_RID - 1;
+
+ test->priv = relay;
+ return 0;
+}
+
+static const u32 TEST_MSG[TEST_LEN] = {
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, TEST_ACTION) |
+ FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_DATA0, TEST_DATA(0)),
+ TEST_DATA(1), TEST_DATA(2), TEST_DATA(3), TEST_DATA(4),
+};
+
+static int replacement_xe_guc_ct_send_recv_always_fails(struct xe_guc_ct *ct,
+ const u32 *msg, u32 len,
+ u32 *response_buffer)
+{
+ struct kunit *test = kunit_get_current_test();
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg);
+ KUNIT_ASSERT_GE(test, len, GUC_HXG_MSG_MIN_LEN);
+
+ return -ECOMM;
+}
+
+static int replacement_xe_guc_ct_send_recv_expects_pf2guc_relay(struct xe_guc_ct *ct,
+ const u32 *msg, u32 len,
+ u32 *response_buffer)
+{
+ struct kunit *test = kunit_get_current_test();
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg);
+ KUNIT_ASSERT_GE(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN);
+ KUNIT_ASSERT_EQ(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN + TEST_LEN);
+ KUNIT_EXPECT_EQ(test, GUC_HXG_ORIGIN_HOST, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]));
+ KUNIT_EXPECT_EQ(test, GUC_HXG_TYPE_REQUEST, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]));
+ KUNIT_EXPECT_EQ(test, XE_GUC_ACTION_PF2GUC_RELAY_TO_VF,
+ FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]));
+ KUNIT_EXPECT_EQ(test, TEST_VFID,
+ FIELD_GET(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, msg[1]));
+ KUNIT_EXPECT_EQ(test, TEST_RID,
+ FIELD_GET(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, msg[2]));
+ KUNIT_EXPECT_MEMEQ(test, TEST_MSG, msg + PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN,
+ sizeof(u32) * TEST_LEN);
+ return 0;
+}
+
+static const u32 test_guc2pf[GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN] = {
+ /* transport */
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF),
+ FIELD_PREP_CONST(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, TEST_VFID),
+ FIELD_PREP_CONST(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, TEST_RID),
+ /* payload */
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
+};
+
+static const u32 test_guc2vf[GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN] = {
+ /* transport */
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF),
+ FIELD_PREP_CONST(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, TEST_RID),
+ /* payload */
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
+};
+
+static void pf_rejects_guc2pf_too_short(struct kunit *test)
+{
+ const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN - 1;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2pf;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2pf(relay, msg, len));
+}
+
+static void pf_rejects_guc2pf_too_long(struct kunit *test)
+{
+ const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN + 1;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2pf;
+
+ KUNIT_ASSERT_EQ(test, -EMSGSIZE, xe_guc_relay_process_guc2pf(relay, msg, len));
+}
+
+static void pf_rejects_guc2pf_no_payload(struct kunit *test)
+{
+ const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2pf;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2pf(relay, msg, len));
+}
+
+static void pf_fails_no_payload(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ const u32 msg = 0;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, relay_process_msg(relay, TEST_VFID, TEST_RID, &msg, 0));
+}
+
+static void pf_fails_bad_origin(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ static const u32 msg[] = {
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
+ };
+ u32 len = ARRAY_SIZE(msg);
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, relay_process_msg(relay, TEST_VFID, TEST_RID, msg, len));
+}
+
+static void pf_fails_bad_type(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ const u32 msg[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, 4), /* only 4 is undefined */
+ };
+ u32 len = ARRAY_SIZE(msg);
+
+ KUNIT_ASSERT_EQ(test, -EBADRQC, relay_process_msg(relay, TEST_VFID, TEST_RID, msg, len));
+}
+
+static void pf_txn_reports_error(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ struct relay_transaction *txn;
+
+ txn = __relay_get_transaction(relay, false, TEST_VFID, TEST_RID,
+ TEST_MSG, TEST_LEN, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, txn);
+
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_always_fails);
+ KUNIT_EXPECT_EQ(test, -ECOMM, relay_send_transaction(relay, txn));
+
+ relay_release_transaction(relay, txn);
+}
+
+static void pf_txn_sends_pf2guc(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ struct relay_transaction *txn;
+
+ txn = __relay_get_transaction(relay, false, TEST_VFID, TEST_RID,
+ TEST_MSG, TEST_LEN, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, txn);
+
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_expects_pf2guc_relay);
+ KUNIT_ASSERT_EQ(test, 0, relay_send_transaction(relay, txn));
+
+ relay_release_transaction(relay, txn);
+}
+
+static void pf_sends_pf2guc(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_expects_pf2guc_relay);
+ KUNIT_ASSERT_EQ(test, 0,
+ xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ TEST_MSG, TEST_LEN, NULL, 0));
+}
+
+static int replacement_xe_guc_ct_send_recv_loopback_relay(struct xe_guc_ct *ct,
+ const u32 *msg, u32 len,
+ u32 *response_buffer)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_guc_relay *relay = test->priv;
+ u32 *reply = kunit_kzalloc(test, len * sizeof(u32), GFP_KERNEL);
+ int (*guc2relay)(struct xe_guc_relay *, const u32 *, u32);
+ u32 action;
+ int err;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg);
+ KUNIT_ASSERT_GE(test, len, GUC_HXG_MSG_MIN_LEN);
+ KUNIT_ASSERT_EQ(test, GUC_HXG_TYPE_REQUEST,
+ FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]));
+ KUNIT_ASSERT_GE(test, len, GUC_HXG_REQUEST_MSG_MIN_LEN);
+ KUNIT_ASSERT_NOT_NULL(test, reply);
+
+ switch (FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0])) {
+ case XE_GUC_ACTION_PF2GUC_RELAY_TO_VF:
+ KUNIT_ASSERT_GE(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN);
+ action = XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF;
+ guc2relay = xe_guc_relay_process_guc2pf;
+ break;
+ case XE_GUC_ACTION_VF2GUC_RELAY_TO_PF:
+ KUNIT_ASSERT_GE(test, len, VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN);
+ action = XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF;
+ guc2relay = xe_guc_relay_process_guc2vf;
+ break;
+ default:
+ KUNIT_FAIL(test, "bad RELAY action %#x", msg[0]);
+ return -EINVAL;
+ }
+
+ reply[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION, action);
+ memcpy(reply + 1, msg + 1, sizeof(u32) * (len - 1));
+
+ err = guc2relay(relay, reply, len);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ return err;
+}
+
+static void test_requires_relay_testloop(struct kunit *test)
+{
+ /*
+ * The debug relay action GUC_RELAY_ACTION_VFXPF_TESTLOOP is available
+ * only on builds with CONFIG_DRM_XE_DEBUG_SRIOV enabled.
+ * See "kunit.py --kconfig_add" option if it's missing.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV))
+ kunit_skip(test, "requires %s\n", __stringify(CONFIG_DRM_XE_DEBUG_SRIOV));
+}
+
+static void pf_loopback_nop(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_NOP),
+ };
+ u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, response[0]),
+ GUC_HXG_ORIGIN_HOST);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_TYPE, response[0]),
+ GUC_HXG_TYPE_RESPONSE_SUCCESS);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, response[0]), 0);
+}
+
+static void pf_loopback_echo(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_ECHO),
+ TEST_DATA(1), TEST_DATA(2), TEST_DATA(3), TEST_DATA(4),
+ };
+ u32 response[ARRAY_SIZE(request)];
+ unsigned int n;
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(response));
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, response[0]),
+ GUC_HXG_ORIGIN_HOST);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_TYPE, response[0]),
+ GUC_HXG_TYPE_RESPONSE_SUCCESS);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, response[0]),
+ ARRAY_SIZE(response));
+ for (n = GUC_HXG_RESPONSE_MSG_MIN_LEN; n < ret; n++)
+ KUNIT_EXPECT_EQ(test, request[n], response[n]);
+}
+
+static void pf_loopback_fail(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_FAIL),
+ };
+ u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, -EREMOTEIO);
+}
+
+static void pf_loopback_busy(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_BUSY),
+ TEST_DATA(0xb),
+ };
+ u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_testonly_nop, relay_process_incoming_action);
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN);
+}
+
+static void pf_loopback_retry(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_RETRY),
+ TEST_DATA(0xd), TEST_DATA(0xd),
+ };
+ u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN);
+}
+
+static struct kunit_case pf_relay_test_cases[] = {
+ KUNIT_CASE(pf_rejects_guc2pf_too_short),
+ KUNIT_CASE(pf_rejects_guc2pf_too_long),
+ KUNIT_CASE(pf_rejects_guc2pf_no_payload),
+ KUNIT_CASE(pf_fails_no_payload),
+ KUNIT_CASE(pf_fails_bad_origin),
+ KUNIT_CASE(pf_fails_bad_type),
+ KUNIT_CASE(pf_txn_reports_error),
+ KUNIT_CASE(pf_txn_sends_pf2guc),
+ KUNIT_CASE(pf_sends_pf2guc),
+ KUNIT_CASE(pf_loopback_nop),
+ KUNIT_CASE(pf_loopback_echo),
+ KUNIT_CASE(pf_loopback_fail),
+ KUNIT_CASE_SLOW(pf_loopback_busy),
+ KUNIT_CASE_SLOW(pf_loopback_retry),
+ {}
+};
+
+static struct kunit_suite pf_relay_suite = {
+ .name = "pf_relay",
+ .test_cases = pf_relay_test_cases,
+ .init = relay_test_init,
+};
+
+static void vf_rejects_guc2vf_too_short(struct kunit *test)
+{
+ const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN - 1;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2vf;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2vf(relay, msg, len));
+}
+
+static void vf_rejects_guc2vf_too_long(struct kunit *test)
+{
+ const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN + 1;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2vf;
+
+ KUNIT_ASSERT_EQ(test, -EMSGSIZE, xe_guc_relay_process_guc2vf(relay, msg, len));
+}
+
+static void vf_rejects_guc2vf_no_payload(struct kunit *test)
+{
+ const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2vf;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2vf(relay, msg, len));
+}
+
+static struct kunit_case vf_relay_test_cases[] = {
+ KUNIT_CASE(vf_rejects_guc2vf_too_short),
+ KUNIT_CASE(vf_rejects_guc2vf_too_long),
+ KUNIT_CASE(vf_rejects_guc2vf_no_payload),
+ {}
+};
+
+static struct kunit_suite vf_relay_suite = {
+ .name = "vf_relay",
+ .test_cases = vf_relay_test_cases,
+ .init = relay_test_init,
+};
+
+static void xe_drops_guc2pf_if_not_ready(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
+ const u32 *msg = test_guc2pf;
+ u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MIN_LEN;
+
+ KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_process_guc2pf(relay, msg, len));
+}
+
+static void xe_drops_guc2vf_if_not_ready(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
+ const u32 *msg = test_guc2vf;
+ u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MIN_LEN;
+
+ KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_process_guc2vf(relay, msg, len));
+}
+
+static void xe_rejects_send_if_not_ready(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
+ u32 msg[GUC_RELAY_MSG_MIN_LEN];
+ u32 len = ARRAY_SIZE(msg);
+
+ KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_send_to_pf(relay, msg, len, NULL, 0));
+ KUNIT_ASSERT_EQ(test, -ENODEV, relay_send_to(relay, TEST_VFID, msg, len, NULL, 0));
+}
+
+static struct kunit_case no_relay_test_cases[] = {
+ KUNIT_CASE(xe_drops_guc2pf_if_not_ready),
+ KUNIT_CASE(xe_drops_guc2vf_if_not_ready),
+ KUNIT_CASE(xe_rejects_send_if_not_ready),
+ {}
+};
+
+static struct kunit_suite no_relay_suite = {
+ .name = "no_relay",
+ .test_cases = no_relay_test_cases,
+ .init = xe_kunit_helper_xe_device_test_init,
+};
+
+kunit_test_suites(&no_relay_suite,
+ &pf_relay_suite,
+ &vf_relay_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
new file mode 100644
index 000000000000..fefe79b3b75a
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+#include <kunit/visibility.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include "tests/xe_kunit_helpers.h"
+#include "tests/xe_pci_test.h"
+#include "xe_device_types.h"
+
+/**
+ * xe_kunit_helper_alloc_xe_device - Allocate a &xe_device for a KUnit test.
+ * @test: the &kunit where this &xe_device will be used
+ * @dev: The parent device object
+ *
+ * This function allocates xe_device using drm_kunit_helper_alloc_device().
+ * The xe_device allocation is managed by the test.
+ *
+ * @dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * This function uses KUNIT_ASSERT to detect any allocation failures.
+ *
+ * Return: A pointer to the new &xe_device.
+ */
+struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
+ struct device *dev)
+{
+ struct xe_device *xe;
+
+ xe = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct xe_device,
+ drm, DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+ return xe;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_alloc_xe_device);
+
+static void kunit_action_restore_priv(void *priv)
+{
+ struct kunit *test = kunit_get_current_test();
+
+ test->priv = priv;
+}
+
+/**
+ * xe_kunit_helper_xe_device_test_init - Prepare a &xe_device for a KUnit test.
+ * @test: the &kunit where this fake &xe_device will be used
+ *
+ * This function allocates and initializes a fake &xe_device and stores its
+ * pointer as &kunit.priv to allow the test code to access it.
+ *
+ * This function can be directly used as custom implementation of
+ * &kunit_suite.init.
+ *
+ * It is possible to prepare specific variant of the fake &xe_device by passing
+ * in &kunit.priv pointer to the struct xe_pci_fake_data supplemented with
+ * desired parameters prior to calling this function.
+ *
+ * This function uses KUNIT_ASSERT to detect any failures.
+ *
+ * Return: Always 0.
+ */
+int xe_kunit_helper_xe_device_test_init(struct kunit *test)
+{
+ struct xe_device *xe;
+ struct device *dev;
+ int err;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ xe = xe_kunit_helper_alloc_xe_device(test, dev);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+
+ err = xe_pci_fake_device_init(xe);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = kunit_add_action_or_reset(test, kunit_action_restore_priv, test->priv);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ test->priv = xe;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_test_init);
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
new file mode 100644
index 000000000000..067a1babf049
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_KUNIT_HELPERS_H_
+#define _XE_KUNIT_HELPERS_H_
+
+struct device;
+struct kunit;
+struct xe_device;
+
+struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
+ struct device *dev);
+int xe_kunit_helper_xe_device_test_init(struct kunit *test);
+
+#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 7dd34f94e809..df5c36b70ab4 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -128,3 +128,39 @@ void xe_live_mocs_kernel_kunit(struct kunit *test)
xe_call_for_each_device(mocs_kernel_test_run_device);
}
EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit);
+
+static int mocs_reset_test_run_device(struct xe_device *xe)
+{
+ /* Check the mocs setup is retained over GT reset */
+
+ struct live_mocs mocs;
+ struct xe_gt *gt;
+ unsigned int flags;
+ int id;
+ struct kunit *test = xe_cur_kunit();
+
+ for_each_gt(gt, xe, id) {
+ flags = live_mocs_init(&mocs, gt);
+ kunit_info(test, "mocs_reset_test before reset\n");
+ if (flags & HAS_GLOBAL_MOCS)
+ read_mocs_table(gt, &mocs.table);
+ if (flags & HAS_LNCF_MOCS)
+ read_l3cc_table(gt, &mocs.table);
+
+ xe_gt_reset_async(gt);
+ flush_work(&gt->reset.worker);
+
+ kunit_info(test, "mocs_reset_test after reset\n");
+ if (flags & HAS_GLOBAL_MOCS)
+ read_mocs_table(gt, &mocs.table);
+ if (flags & HAS_LNCF_MOCS)
+ read_l3cc_table(gt, &mocs.table);
+ }
+ return 0;
+}
+
+void xe_live_mocs_reset_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(mocs_reset_test_run_device);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_reset_kunit);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
index 421b819fd4ba..ee40f31e1e12 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
@@ -9,6 +9,7 @@
static struct kunit_case xe_mocs_tests[] = {
KUNIT_CASE(xe_live_mocs_kernel_kunit),
+ KUNIT_CASE(xe_live_mocs_reset_kunit),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
index 7faa3575e6c3..e7699d495411 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
@@ -9,5 +9,6 @@
struct kunit;
void xe_live_mocs_kernel_kunit(struct kunit *test);
+void xe_live_mocs_reset_kunit(struct kunit *test);
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 602793644f61..f62809ca8b51 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -156,6 +156,9 @@ int xe_pci_fake_device_init(struct xe_device *xe)
return -ENODEV;
done:
+ xe->sriov.__mode = data && data->sriov_mode ?
+ data->sriov_mode : XE_SRIOV_MODE_NONE;
+
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
xe_info_init_early(xe, desc, subplatform_desc);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index 171e4180f1aa..a6705a536391 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -64,8 +64,3 @@ static struct kunit_suite xe_pci_test_suite = {
};
kunit_test_suite(xe_pci_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_pci kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index 811ffe5bd9fd..f40dcec83992 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "xe_platform_types.h"
+#include "xe_sriov_types.h"
struct xe_device;
struct xe_graphics_desc;
@@ -23,6 +24,7 @@ void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
void xe_call_for_each_media_ip(xe_media_fn xe_fn);
struct xe_pci_fake_data {
+ enum xe_sriov_mode sriov_mode;
enum xe_platform platform;
enum xe_subplatform subplatform;
u32 graphics_verx100;
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index 4a6972897675..06759d754783 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -15,6 +15,7 @@
#include "regs/xe_reg_defs.h"
#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_kunit_helpers.h"
#include "xe_pci_test.h"
#include "xe_reg_sr.h"
#include "xe_rtp.h"
@@ -276,9 +277,7 @@ static int xe_rtp_test_init(struct kunit *test)
dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
- xe = drm_kunit_helper_alloc_drm_device(test, dev,
- struct xe_device,
- drm, DRIVER_GEM);
+ xe = xe_kunit_helper_alloc_xe_device(test, dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
/* Initialize an empty device */
@@ -312,8 +311,3 @@ static struct kunit_suite xe_rtp_test_suite = {
};
kunit_test_suite(xe_rtp_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_rtp kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_test_mod.c b/drivers/gpu/drm/xe/tests/xe_test_mod.c
new file mode 100644
index 000000000000..875f3e6f965e
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_test_mod.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe kunit tests");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index b4715b78ef3b..44570d888355 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -9,6 +9,7 @@
#include <kunit/test.h>
#include "xe_device.h"
+#include "xe_kunit_helpers.h"
#include "xe_pci_test.h"
#include "xe_reg_sr.h"
#include "xe_tuning.h"
@@ -65,14 +66,8 @@ static const struct platform_test_case cases[] = {
PLATFORM_CASE(ALDERLAKE_P, C0),
SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
- SUBPLATFORM_CASE(DG2, G10, A0),
- SUBPLATFORM_CASE(DG2, G10, A1),
- SUBPLATFORM_CASE(DG2, G10, B0),
SUBPLATFORM_CASE(DG2, G10, C0),
- SUBPLATFORM_CASE(DG2, G11, A0),
- SUBPLATFORM_CASE(DG2, G11, B0),
SUBPLATFORM_CASE(DG2, G11, B1),
- SUBPLATFORM_CASE(DG2, G12, A0),
SUBPLATFORM_CASE(DG2, G12, A1),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
@@ -105,9 +100,7 @@ static int xe_wa_test_init(struct kunit *test)
dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
- xe = drm_kunit_helper_alloc_drm_device(test, dev,
- struct xe_device,
- drm, DRIVER_GEM);
+ xe = xe_kunit_helper_alloc_xe_device(test, dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
test->priv = &data;
@@ -160,8 +153,3 @@ static struct kunit_suite xe_rtp_test_suite = {
};
kunit_test_suite(xe_rtp_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_wa kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 0b0e262e2166..76dfaf1cd200 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -28,6 +28,14 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
+const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
+ [XE_PL_SYSTEM] = "system",
+ [XE_PL_TT] = "gtt",
+ [XE_PL_VRAM0] = "vram0",
+ [XE_PL_VRAM1] = "vram1",
+ [XE_PL_STOLEN] = "stolen"
+};
+
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
@@ -38,22 +46,26 @@ static const struct ttm_place sys_placement_flags = {
static struct ttm_placement sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags,
};
-static const struct ttm_place tt_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = XE_PL_TT,
- .flags = 0,
+static const struct ttm_place tt_placement_flags[] = {
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = XE_PL_TT,
+ .flags = TTM_PL_FLAG_DESIRED,
+ },
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = XE_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_FALLBACK,
+ }
};
static struct ttm_placement tt_placement = {
- .num_placement = 1,
- .placement = &tt_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags,
+ .num_placement = 2,
+ .placement = tt_placement_flags,
};
bool mem_type_is_vram(u32 mem_type)
@@ -230,8 +242,6 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
bo->placement = (struct ttm_placement) {
.num_placement = c,
.placement = bo->placements,
- .num_busy_placement = c,
- .busy_placement = bo->placements,
};
return 0;
@@ -251,7 +261,6 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
/* Don't handle scatter gather BOs */
if (tbo->type == ttm_bo_type_sg) {
placement->num_placement = 0;
- placement->num_busy_placement = 0;
return;
}
@@ -586,6 +595,8 @@ static int xe_bo_move_notify(struct xe_bo *bo,
{
struct ttm_buffer_object *ttm_bo = &bo->ttm;
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct ttm_resource *old_mem = ttm_bo->resource;
+ u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
int ret;
/*
@@ -605,6 +616,18 @@ static int xe_bo_move_notify(struct xe_bo *bo,
if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
dma_buf_move_notify(ttm_bo->base.dma_buf);
+ /*
+ * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual),
+ * so if we moved from VRAM make sure to unlink this from the userfault
+ * tracking.
+ */
+ if (mem_type_is_vram(old_mem_type)) {
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ if (!list_empty(&bo->vram_userfault_link))
+ list_del_init(&bo->vram_userfault_link);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+ }
+
return 0;
}
@@ -713,8 +736,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
migrate = xe->tiles[0].migrate;
xe_assert(xe, migrate);
-
- trace_xe_bo_move(bo);
+ trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type);
xe_device_mem_access_get(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
@@ -1027,7 +1049,7 @@ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
}
}
-struct ttm_device_funcs xe_ttm_funcs = {
+const struct ttm_device_funcs xe_ttm_funcs = {
.ttm_tt_create = xe_ttm_tt_create,
.ttm_tt_populate = xe_ttm_tt_populate,
.ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
@@ -1063,6 +1085,11 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
if (bo->vm && xe_bo_is_user(bo))
xe_vm_put(bo->vm);
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ if (!list_empty(&bo->vram_userfault_link))
+ list_del(&bo->vram_userfault_link);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+
kfree(bo);
}
@@ -1110,16 +1137,20 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
struct drm_device *ddev = tbo->base.dev;
+ struct xe_device *xe = to_xe_device(ddev);
+ struct xe_bo *bo = ttm_to_xe_bo(tbo);
+ bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
vm_fault_t ret;
int idx, r = 0;
+ if (needs_rpm)
+ xe_device_mem_access_get(xe);
+
ret = ttm_bo_vm_reserve(tbo, vmf);
if (ret)
- return ret;
+ goto out;
if (drm_dev_enter(ddev, &idx)) {
- struct xe_bo *bo = ttm_to_xe_bo(tbo);
-
trace_xe_bo_cpu_fault(bo);
if (should_migrate_to_system(bo)) {
@@ -1137,10 +1168,24 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
+
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
+ goto out;
+ /*
+ * ttm_bo_vm_reserve() already has dma_resv_lock.
+ */
+ if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) {
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ if (list_empty(&bo->vram_userfault_link))
+ list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+ }
dma_resv_unlock(tbo->base.resv);
+out:
+ if (needs_rpm)
+ xe_device_mem_access_put(xe);
+
return ret;
}
@@ -1254,6 +1299,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
#ifdef CONFIG_PROC_FS
INIT_LIST_HEAD(&bo->client_link);
#endif
+ INIT_LIST_HEAD(&bo->vram_userfault_link);
drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
@@ -1353,8 +1399,6 @@ static int __xe_bo_fixed_placement(struct xe_device *xe,
bo->placement = (struct ttm_placement) {
.num_placement = 1,
.placement = place,
- .num_busy_placement = 1,
- .busy_placement = place,
};
return 0;
@@ -1568,6 +1612,38 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til
return bo;
}
+/**
+ * xe_managed_bo_reinit_in_vram
+ * @xe: xe device
+ * @tile: Tile where the new buffer will be created
+ * @src: Managed buffer object allocated in system memory
+ *
+ * Replace a managed src buffer object allocated in system memory with a new
+ * one allocated in vram, copying the data between them.
+ * Buffer object in VRAM is not going to have the same GGTT address, the caller
+ * is responsible for making sure that any old references to it are updated.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
+{
+ struct xe_bo *bo;
+
+ xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, !(*src)->vmap.is_iomem);
+
+ bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, (*src)->size,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ drmm_release_action(&xe->drm, __xe_bo_unpin_map_no_vm, *src);
+ *src = bo;
+
+ return 0;
+}
+
/*
* XXX: This is in the VM bind data path, likely should calculate this once and
* store, with a recalculation if the BO is moved.
@@ -2112,9 +2188,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
xe_place_from_ttm_type(mem_type, &requested);
placement.num_placement = 1;
- placement.num_busy_placement = 1;
placement.placement = &requested;
- placement.busy_placement = &requested;
/*
* Stolen needs to be handled like below VRAM handling if we ever need
@@ -2264,6 +2338,16 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
return err;
}
+void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo)
+{
+ struct ttm_buffer_object *tbo = &bo->ttm;
+ struct ttm_device *bdev = tbo->bdev;
+
+ drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping);
+
+ list_del_init(&bo->vram_userfault_link);
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_bo.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 9b1279aca127..c59ad15961ce 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -44,6 +44,7 @@
#define XE_BO_FIXED_PLACEMENT_BIT BIT(11)
#define XE_BO_PAGETABLE BIT(12)
#define XE_BO_NEEDS_CPU_ACCESS BIT(13)
+#define XE_BO_NEEDS_UC BIT(14)
/* this one is trigger internally only */
#define XE_BO_INTERNAL_TEST BIT(30)
#define XE_BO_INTERNAL_64K BIT(31)
@@ -128,6 +129,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
size_t size, u32 flags);
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size, u32 flags);
+int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags);
@@ -242,12 +244,15 @@ int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
int xe_bo_evict_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
-extern struct ttm_device_funcs xe_ttm_funcs;
+extern const struct ttm_device_funcs xe_ttm_funcs;
+extern const char *const xe_mem_type_to_name[];
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo);
+
int xe_bo_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 64c2249a4e40..14ef13b7b421 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -88,6 +88,9 @@ struct xe_bo {
* objects.
*/
u16 cpu_caching;
+
+ /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
+ struct list_head vram_userfault_link;
};
#define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index c56fd7d59f05..01db5b27bec5 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -55,6 +55,7 @@ static int info(struct seq_file *m, void *data)
drm_printf(&p, "force_execlist %s\n", str_yes_no(xe->info.force_execlist));
drm_printf(&p, "has_flat_ccs %s\n", str_yes_no(xe->info.has_flat_ccs));
drm_printf(&p, "has_usm %s\n", str_yes_no(xe->info.has_usm));
+ drm_printf(&p, "skip_guc_pc %s\n", str_yes_no(xe->info.skip_guc_pc));
for_each_gt(gt, xe, id) {
drm_printf(&p, "gt%d force wake %d\n", id,
xe_force_wake_ref(gt_to_fw(gt), XE_FW_GT));
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 68abc0b195be..68d3d623a05b 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -16,6 +16,8 @@
#include "xe_guc_ct.h"
#include "xe_guc_submit.h"
#include "xe_hw_engine.h"
+#include "xe_sched_job.h"
+#include "xe_vm.h"
/**
* DOC: Xe device coredump
@@ -58,11 +60,22 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
return &q->gt->uc.guc;
}
+static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+{
+ struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
+
+ xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+ if (ss->vm)
+ xe_vm_snapshot_capture_delayed(ss->vm);
+ xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+}
+
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
- struct xe_devcoredump_snapshot *ss;
+ struct xe_device *xe = coredump_to_xe(coredump);
+ struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
struct drm_printer p;
struct drm_print_iterator iter;
struct timespec64 ts;
@@ -72,12 +85,14 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
if (!data || !coredump_to_xe(coredump))
return -ENODEV;
+ /* Ensure delayed work is captured before continuing */
+ flush_work(&ss->work);
+
iter.data = buffer;
iter.offset = 0;
iter.start = offset;
iter.remain = count;
- ss = &coredump->snapshot;
p = drm_coredump_printer(&iter);
drm_printf(&p, "**** Xe Device Coredump ****\n");
@@ -88,16 +103,24 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
ts = ktime_to_timespec64(ss->boot_time);
drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
+ xe_device_snapshot_print(xe, &p);
drm_printf(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
+ drm_printf(&p, "\n**** Job ****\n");
+ xe_sched_job_snapshot_print(coredump->snapshot.job, &p);
+
drm_printf(&p, "\n**** HW Engines ****\n");
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
if (coredump->snapshot.hwe[i])
xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i],
&p);
+ if (coredump->snapshot.vm) {
+ drm_printf(&p, "\n**** VM state ****\n");
+ xe_vm_snapshot_print(coredump->snapshot.vm, &p);
+ }
return count - iter.remain;
}
@@ -111,21 +134,28 @@ static void xe_devcoredump_free(void *data)
if (!data || !coredump_to_xe(coredump))
return;
+ cancel_work_sync(&coredump->snapshot.work);
+
xe_guc_ct_snapshot_free(coredump->snapshot.ct);
xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge);
+ xe_sched_job_snapshot_free(coredump->snapshot.job);
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
if (coredump->snapshot.hwe[i])
xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]);
+ xe_vm_snapshot_free(coredump->snapshot.vm);
+ /* To prevent stale data on next snapshot, clear everything */
+ memset(&coredump->snapshot, 0, sizeof(coredump->snapshot));
coredump->captured = false;
drm_info(&coredump_to_xe(coredump)->drm,
"Xe device coredump has been deleted.\n");
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
- struct xe_exec_queue *q)
+ struct xe_sched_job *job)
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
+ struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
@@ -137,6 +167,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
+ ss->gt = q->gt;
+ INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
+
cookie = dma_fence_begin_signalling();
for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
if (adj_logical_mask & BIT(i)) {
@@ -150,7 +183,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
- coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
+ coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(job);
+ coredump->snapshot.job = xe_sched_job_snapshot_capture(job);
+ coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm);
for_each_hw_engine(hwe, q->gt, id) {
if (hwe->class != q->hwe->class ||
@@ -161,21 +196,24 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
}
+ if (ss->vm)
+ queue_work(system_unbound_wq, &ss->work);
+
xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
dma_fence_end_signalling(cookie);
}
/**
* xe_devcoredump - Take the required snapshots and initialize coredump device.
- * @q: The faulty xe_exec_queue, where the issue was detected.
+ * @job: The faulty xe_sched_job, where the issue was detected.
*
* This function should be called at the crash time within the serialized
* gt_reset. It is skipped if we still have the core dump device available
* with the information of the 'first' snapshot.
*/
-void xe_devcoredump(struct xe_exec_queue *q)
+void xe_devcoredump(struct xe_sched_job *job)
{
- struct xe_device *xe = gt_to_xe(q->gt);
+ struct xe_device *xe = gt_to_xe(job->q->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
if (coredump->captured) {
@@ -184,7 +222,7 @@ void xe_devcoredump(struct xe_exec_queue *q)
}
coredump->captured = true;
- devcoredump_snapshot(coredump, q);
+ devcoredump_snapshot(coredump, job);
drm_info(&xe->drm, "Xe device coredump has been created\n");
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
@@ -194,3 +232,4 @@ void xe_devcoredump(struct xe_exec_queue *q)
xe_devcoredump_read, xe_devcoredump_free);
}
#endif
+
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index 6ac218a5c194..df8671f0b5eb 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -7,12 +7,12 @@
#define _XE_DEVCOREDUMP_H_
struct xe_device;
-struct xe_exec_queue;
+struct xe_sched_job;
#ifdef CONFIG_DEV_COREDUMP
-void xe_devcoredump(struct xe_exec_queue *q);
+void xe_devcoredump(struct xe_sched_job *job);
#else
-static inline void xe_devcoredump(struct xe_exec_queue *q)
+static inline void xe_devcoredump(struct xe_sched_job *job)
{
}
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 7fdad9c3d3dd..6f654b63c7f1 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -12,6 +12,7 @@
#include "xe_hw_engine_types.h"
struct xe_device;
+struct xe_gt;
/**
* struct xe_devcoredump_snapshot - Crash snapshot
@@ -26,13 +27,23 @@ struct xe_devcoredump_snapshot {
/** @boot_time: Relative boot time so the uptime can be calculated. */
ktime_t boot_time;
+ /** @gt: Affected GT, used by forcewake for delayed capture */
+ struct xe_gt *gt;
+ /** @work: Workqueue for deferred capture outside of signaling context */
+ struct work_struct work;
+
/* GuC snapshots */
/** @ct: GuC CT snapshot */
struct xe_guc_ct_snapshot *ct;
/** @ge: Guc Engine snapshot */
struct xe_guc_submit_exec_queue_snapshot *ge;
+
/** @hwe: HW Engine snapshot array */
struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES];
+ /** @job: Snapshot of job state */
+ struct xe_sched_job_snapshot *job;
+ /** @vm: Snapshot of VM state */
+ struct xe_vm_snapshot *vm;
};
/**
@@ -44,8 +55,6 @@ struct xe_devcoredump_snapshot {
* for reading the information.
*/
struct xe_devcoredump {
- /** @xe: Xe device. */
- struct xe_device *xe;
/** @captured: The snapshot of the first hang has already been taken. */
bool captured;
/** @snapshot: Snapshot is captured at time of the first crash */
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 5176c27e4b6a..ca85e81fdb44 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -15,32 +15,35 @@
#include <drm/drm_print.h>
#include <drm/xe_drm.h>
+#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
#include "xe_debugfs.h"
-#include "xe_display.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_drv.h"
-#include "xe_exec_queue.h"
#include "xe_exec.h"
+#include "xe_exec_queue.h"
#include "xe_ggtt.h"
+#include "xe_gsc_proxy.h"
#include "xe_gt.h"
#include "xe_gt_mcr.h"
+#include "xe_hwmon.h"
#include "xe_irq.h"
+#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_pat.h"
#include "xe_pcode.h"
#include "xe_pm.h"
#include "xe_query.h"
+#include "xe_sriov.h"
#include "xe_tile.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
#include "xe_wait_user_fence.h"
-#include "xe_hwmon.h"
#ifdef CONFIG_LOCKDEP
struct lockdep_map xe_device_mem_access_lockdep_map = {
@@ -424,10 +427,15 @@ int xe_device_probe(struct xe_device *xe)
struct xe_tile *tile;
struct xe_gt *gt;
int err;
+ u8 last_gt;
u8 id;
xe_pat_init_early(xe);
+ err = xe_sriov_init(xe);
+ if (err)
+ return err;
+
xe->info.mem_region_mask = 1;
err = xe_display_init_nommio(xe);
if (err)
@@ -448,6 +456,17 @@ int xe_device_probe(struct xe_device *xe)
err = xe_ggtt_init_early(tile->mem.ggtt);
if (err)
return err;
+ if (IS_SRIOV_VF(xe)) {
+ err = xe_memirq_init(&tile->sriov.vf.memirq);
+ if (err)
+ return err;
+ }
+ }
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_init_hwconfig(gt);
+ if (err)
+ return err;
}
err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe);
@@ -502,16 +521,18 @@ int xe_device_probe(struct xe_device *xe)
goto err_irq_shutdown;
for_each_gt(gt, xe, id) {
+ last_gt = id;
+
err = xe_gt_init(gt);
if (err)
- goto err_irq_shutdown;
+ goto err_fini_gt;
}
xe_heci_gsc_init(xe);
err = xe_display_init(xe);
if (err)
- goto err_irq_shutdown;
+ goto err_fini_gt;
err = drm_dev_register(&xe->drm, 0);
if (err)
@@ -532,6 +553,14 @@ int xe_device_probe(struct xe_device *xe)
err_fini_display:
xe_display_driver_remove(xe);
+err_fini_gt:
+ for_each_gt(gt, xe, id) {
+ if (id < last_gt)
+ xe_gt_remove(gt);
+ else
+ break;
+ }
+
err_irq_shutdown:
xe_irq_shutdown(xe);
err:
@@ -549,12 +578,18 @@ static void xe_device_remove_display(struct xe_device *xe)
void xe_device_remove(struct xe_device *xe)
{
+ struct xe_gt *gt;
+ u8 id;
+
xe_device_remove_display(xe);
xe_display_fini(xe);
xe_heci_gsc_fini(xe);
+ for_each_gt(gt, xe, id)
+ xe_gt_remove(gt);
+
xe_irq_shutdown(xe);
}
@@ -659,3 +694,33 @@ void xe_device_mem_access_put(struct xe_device *xe)
xe_assert(xe, ref >= 0);
}
+
+void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
+{
+ struct xe_gt *gt;
+ u8 id;
+
+ drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid);
+ drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid);
+
+ for_each_gt(gt, xe, id) {
+ drm_printf(p, "GT id: %u\n", id);
+ drm_printf(p, "\tType: %s\n",
+ gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
+ drm_printf(p, "\tIP ver: %u.%u.%u\n",
+ REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid),
+ REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid),
+ REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid));
+ drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock);
+ }
+}
+
+u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address)
+{
+ return sign_extend64(address, xe->info.va_bits - 1);
+}
+
+u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
+{
+ return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
+}
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 08d8b72c7731..14be34d9f543 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -164,6 +164,16 @@ static inline bool xe_device_has_sriov(struct xe_device *xe)
return xe->info.has_sriov;
}
+static inline bool xe_device_has_memirq(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) >= 1250;
+}
+
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
+void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p);
+
+u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address);
+u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index e8491979a6f2..9785eef2e5a4 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -16,6 +16,7 @@
#include "xe_heci_gsc.h"
#include "xe_gt_types.h"
#include "xe_lmtt_types.h"
+#include "xe_memirq_types.h"
#include "xe_platform_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_types.h"
@@ -142,10 +143,10 @@ struct xe_tile {
* * 8MB-16MB: global GTT
*/
struct {
- /** @size: size of tile's MMIO space */
+ /** @mmio.size: size of tile's MMIO space */
size_t size;
- /** @regs: pointer to tile's MMIO space (starting with registers) */
+ /** @mmio.regs: pointer to tile's MMIO space (starting with registers) */
void __iomem *regs;
} mmio;
@@ -155,31 +156,31 @@ struct xe_tile {
* Each tile has its own additional 256MB (28-bit) MMIO-extension space.
*/
struct {
- /** @size: size of tile's additional MMIO-extension space */
+ /** @mmio_ext.size: size of tile's additional MMIO-extension space */
size_t size;
- /** @regs: pointer to tile's additional MMIO-extension space */
+ /** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */
void __iomem *regs;
} mmio_ext;
/** @mem: memory management info for tile */
struct {
/**
- * @vram: VRAM info for tile.
+ * @mem.vram: VRAM info for tile.
*
* Although VRAM is associated with a specific tile, it can
* still be accessed by all tiles' GTs.
*/
struct xe_mem_region vram;
- /** @vram_mgr: VRAM TTM manager */
+ /** @mem.vram_mgr: VRAM TTM manager */
struct xe_ttm_vram_mgr *vram_mgr;
- /** @ggtt: Global graphics translation table */
+ /** @mem.ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
/**
- * @kernel_bb_pool: Pool from which batchbuffers are allocated.
+ * @mem.kernel_bb_pool: Pool from which batchbuffers are allocated.
*
* Media GT shares a pool with its primary GT.
*/
@@ -192,6 +193,10 @@ struct xe_tile {
/** @sriov.pf.lmtt: Local Memory Translation Table. */
struct xe_lmtt lmtt;
} pf;
+ struct {
+ /** @sriov.vf.memirq: Memory Based Interrupts. */
+ struct xe_memirq memirq;
+ } vf;
} sriov;
/** @migrate: Migration helper for vram blits and clearing */
@@ -213,68 +218,68 @@ struct xe_device {
/** @info: device info */
struct intel_device_info {
- /** @graphics_name: graphics IP name */
+ /** @info.graphics_name: graphics IP name */
const char *graphics_name;
- /** @media_name: media IP name */
+ /** @info.media_name: media IP name */
const char *media_name;
- /** @tile_mmio_ext_size: size of MMIO extension space, per-tile */
+ /** @info.tile_mmio_ext_size: size of MMIO extension space, per-tile */
u32 tile_mmio_ext_size;
- /** @graphics_verx100: graphics IP version */
+ /** @info.graphics_verx100: graphics IP version */
u32 graphics_verx100;
- /** @media_verx100: media IP version */
+ /** @info.media_verx100: media IP version */
u32 media_verx100;
- /** @mem_region_mask: mask of valid memory regions */
+ /** @info.mem_region_mask: mask of valid memory regions */
u32 mem_region_mask;
- /** @platform: XE platform enum */
+ /** @info.platform: XE platform enum */
enum xe_platform platform;
- /** @subplatform: XE subplatform enum */
+ /** @info.subplatform: XE subplatform enum */
enum xe_subplatform subplatform;
- /** @devid: device ID */
+ /** @info.devid: device ID */
u16 devid;
- /** @revid: device revision */
+ /** @info.revid: device revision */
u8 revid;
- /** @step: stepping information for each IP */
+ /** @info.step: stepping information for each IP */
struct xe_step_info step;
- /** @dma_mask_size: DMA address bits */
+ /** @info.dma_mask_size: DMA address bits */
u8 dma_mask_size;
- /** @vram_flags: Vram flags */
+ /** @info.vram_flags: Vram flags */
u8 vram_flags;
- /** @tile_count: Number of tiles */
+ /** @info.tile_count: Number of tiles */
u8 tile_count;
- /** @gt_count: Total number of GTs for entire device */
+ /** @info.gt_count: Total number of GTs for entire device */
u8 gt_count;
- /** @vm_max_level: Max VM level */
+ /** @info.vm_max_level: Max VM level */
u8 vm_max_level;
- /** @va_bits: Maximum bits of a virtual address */
+ /** @info.va_bits: Maximum bits of a virtual address */
u8 va_bits;
- /** @is_dgfx: is discrete device */
+ /** @info.is_dgfx: is discrete device */
u8 is_dgfx:1;
- /** @has_asid: Has address space ID */
+ /** @info.has_asid: Has address space ID */
u8 has_asid:1;
- /** @force_execlist: Forced execlist submission */
+ /** @info.force_execlist: Forced execlist submission */
u8 force_execlist:1;
- /** @has_flat_ccs: Whether flat CCS metadata is used */
+ /** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
- /** @has_llc: Device has a shared CPU+GPU last level cache */
+ /** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
- /** @has_mmio_ext: Device has extra MMIO address range */
+ /** @info.has_mmio_ext: Device has extra MMIO address range */
u8 has_mmio_ext:1;
- /** @has_range_tlb_invalidation: Has range based TLB invalidations */
+ /** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
u8 has_range_tlb_invalidation:1;
- /** @has_sriov: Supports SR-IOV */
+ /** @info.has_sriov: Supports SR-IOV */
u8 has_sriov:1;
- /** @has_usm: Device has unified shared memory support */
+ /** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
- /** @enable_display: display enabled */
+ /** @info.enable_display: display enabled */
u8 enable_display:1;
- /** @skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
+ /** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
u8 skip_mtcfg:1;
- /** @skip_pcode: skip access to PCODE uC */
+ /** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
- /** @has_heci_gscfi: device has heci gscfi */
+ /** @info.has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
- /** @skip_guc_pc: Skip GuC based PM feature init */
+ /** @info.skip_guc_pc: Skip GuC based PM feature init */
u8 skip_guc_pc:1;
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
@@ -286,10 +291,10 @@ struct xe_device {
/** @irq: device interrupt state */
struct {
- /** @lock: lock for processing irq's on this device */
+ /** @irq.lock: lock for processing irq's on this device */
spinlock_t lock;
- /** @enabled: interrupts enabled on this device */
+ /** @irq.enabled: interrupts enabled on this device */
bool enabled;
} irq;
@@ -298,17 +303,17 @@ struct xe_device {
/** @mmio: mmio info for device */
struct {
- /** @size: size of MMIO space for device */
+ /** @mmio.size: size of MMIO space for device */
size_t size;
- /** @regs: pointer to MMIO space for device */
+ /** @mmio.regs: pointer to MMIO space for device */
void __iomem *regs;
} mmio;
/** @mem: memory info for device */
struct {
- /** @vram: VRAM info for device */
+ /** @mem.vram: VRAM info for device */
struct xe_mem_region vram;
- /** @sys_mgr: system TTM manager */
+ /** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
} mem;
@@ -316,40 +321,42 @@ struct xe_device {
struct {
/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
enum xe_sriov_mode __mode;
+ /** @sriov.wq: workqueue used by the virtualization workers */
+ struct workqueue_struct *wq;
} sriov;
/** @clients: drm clients info */
struct {
- /** @lock: Protects drm clients info */
+ /** @clients.lock: Protects drm clients info */
spinlock_t lock;
- /** @count: number of drm clients */
+ /** @clients.count: number of drm clients */
u64 count;
} clients;
/** @usm: unified memory state */
struct {
- /** @asid: convert a ASID to VM */
+ /** @usm.asid: convert a ASID to VM */
struct xarray asid_to_vm;
- /** @next_asid: next ASID, used to cyclical alloc asids */
+ /** @usm.next_asid: next ASID, used to cyclical alloc asids */
u32 next_asid;
- /** @num_vm_in_fault_mode: number of VM in fault mode */
+ /** @usm.num_vm_in_fault_mode: number of VM in fault mode */
u32 num_vm_in_fault_mode;
- /** @num_vm_in_non_fault_mode: number of VM in non-fault mode */
+ /** @usm.num_vm_in_non_fault_mode: number of VM in non-fault mode */
u32 num_vm_in_non_fault_mode;
- /** @lock: protects UM state */
+ /** @usm.lock: protects UM state */
struct mutex lock;
} usm;
/** @pinned: pinned BO state */
struct {
- /** @lock: protected pinned BO list state */
+ /** @pinned.lock: protected pinned BO list state */
spinlock_t lock;
- /** @evicted: pinned kernel BO that are present */
+ /** @pinned.kernel_bo_present: pinned kernel BO that are present */
struct list_head kernel_bo_present;
- /** @evicted: pinned BO that have been evicted */
+ /** @pinned.evicted: pinned BO that have been evicted */
struct list_head evicted;
- /** @external_vram: pinned external BO in vram*/
+ /** @pinned.external_vram: pinned external BO in vram*/
struct list_head external_vram;
} pinned;
@@ -370,36 +377,57 @@ struct xe_device {
* triggering additional actions when they occur.
*/
struct {
- /** @ref: ref count of memory accesses */
+ /** @mem_access.ref: ref count of memory accesses */
atomic_t ref;
+
+ /**
+ * @mem_access.vram_userfault: Encapsulate vram_userfault
+ * related stuff
+ */
+ struct {
+ /**
+ * @mem_access.vram_userfault.lock: Protects access to
+ * @vram_usefault.list Using mutex instead of spinlock
+ * as lock is applied to entire list operation which
+ * may sleep
+ */
+ struct mutex lock;
+
+ /**
+ * @mem_access.vram_userfault.list: Keep list of userfaulted
+ * vram bo, which require to release their mmap mappings
+ * at runtime suspend path
+ */
+ struct list_head list;
+ } vram_userfault;
} mem_access;
/**
* @pat: Encapsulate PAT related stuff
*/
struct {
- /** Internal operations to abstract platforms */
+ /** @pat.ops: Internal operations to abstract platforms */
const struct xe_pat_ops *ops;
- /** PAT table to program in the HW */
+ /** @pat.table: PAT table to program in the HW */
const struct xe_pat_table_entry *table;
- /** Number of PAT entries */
+ /** @pat.n_entries: Number of PAT entries */
int n_entries;
u32 idx[__XE_CACHE_LEVEL_COUNT];
} pat;
/** @d3cold: Encapsulate d3cold related stuff */
struct {
- /** capable: Indicates if root port is d3cold capable */
+ /** @d3cold.capable: Indicates if root port is d3cold capable */
bool capable;
- /** @allowed: Indicates if d3cold is a valid device state */
+ /** @d3cold.allowed: Indicates if d3cold is a valid device state */
bool allowed;
- /** @power_lost: Indicates if card has really lost power. */
+ /** @d3cold.power_lost: Indicates if card has really lost power. */
bool power_lost;
/**
- * @vram_threshold:
+ * @d3cold.vram_threshold:
*
* This represents the permissible threshold(in megabytes)
* for vram save/restore. d3cold will be disallowed,
@@ -408,7 +436,7 @@ struct xe_device {
* Default threshold value is 300mb.
*/
u32 vram_threshold;
- /** @lock: protect vram_threshold */
+ /** @d3cold.lock: protect vram_threshold */
struct mutex lock;
} d3cold;
@@ -516,17 +544,17 @@ struct xe_file {
/** @vm: VM state for file */
struct {
- /** @xe: xarray to store VMs */
+ /** @vm.xe: xarray to store VMs */
struct xarray xa;
- /** @lock: protects file VM state */
+ /** @vm.lock: protects file VM state */
struct mutex lock;
} vm;
/** @exec_queue: Submission exec queue state for file */
struct {
- /** @xe: xarray to store engines */
+ /** @exec_queue.xe: xarray to store engines */
struct xarray xa;
- /** @lock: protects file engine state */
+ /** @exec_queue.lock: protects file engine state */
struct mutex lock;
} exec_queue;
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 82d1305e831f..87c10bd7958b 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -113,7 +113,7 @@ static void bo_meminfo(struct xe_bo *bo,
else
mem_type = XE_PL_TT;
- if (bo->ttm.base.handle_count > 1)
+ if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base))
stats[mem_type].shared += sz;
else
stats[mem_type].private += sz;
@@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo,
static void show_meminfo(struct drm_printer *p, struct drm_file *file)
{
- static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES] = {
- [XE_PL_SYSTEM] = "system",
- [XE_PL_TT] = "gtt",
- [XE_PL_VRAM0] = "vram0",
- [XE_PL_VRAM1] = "vram1",
- [4 ... 6] = NULL,
- [XE_PL_STOLEN] = "stolen"
- };
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
struct xe_file *xef = file->driver_priv;
struct ttm_device *bdev = &xef->xe->ttm;
@@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
spin_unlock(&client->bos_lock);
for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
- if (!mem_type_to_name[mem_type])
+ if (!xe_mem_type_to_name[mem_type])
continue;
man = ttm_manager_type(bdev, mem_type);
@@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
- mem_type_to_name[mem_type]);
+ xe_mem_type_to_name[mem_type]);
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 17f26952e665..952496c6260d 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -96,7 +96,46 @@
static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{
- return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
+ struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int num_fences;
+ int ret;
+
+ ret = drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
+ if (ret)
+ return ret;
+
+ /*
+ * 1 fence slot for the final submit, and 1 more for every per-tile for
+ * GPU bind and 1 extra for CPU bind. Note that there are potentially
+ * many vma per object/dma-resv, however the fence slot will just be
+ * re-used, since they are largely the same timeline and the seqno
+ * should be in order. In the case of CPU bind there is dummy fence used
+ * for all CPU binds, so no need to have a per-tile slot for that.
+ */
+ num_fences = 1 + 1 + vm->xe->info.tile_count;
+
+ /*
+ * We don't know upfront exactly how many fence slots we will need at
+ * the start of the exec, since the TTM bo_validate above can consume
+ * numerous fence slots. Also due to how the dma_resv_reserve_fences()
+ * works it only ensures that at least that many fence slots are
+ * available i.e if there are already 10 slots available and we reserve
+ * two more, it can just noop without reserving anything. With this it
+ * is quite possible that TTM steals some of the fence slots and then
+ * when it comes time to do the vma binding and final exec stage we are
+ * lacking enough fence slots, leading to some nasty BUG_ON() when
+ * adding the fences. Hence just add our own fences here, after the
+ * validate stage.
+ */
+ drm_exec_for_each_locked_object(&vm_exec->exec, index, obj) {
+ ret = dma_resv_reserve_fences(obj->resv, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -197,7 +236,6 @@ retry:
}
vm_exec.vm = &vm->gpuvm;
- vm_exec.num_fences = 1 + vm->xe->info.tile_count;
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
if (xe_vm_in_lr_mode(vm)) {
drm_exec_init(exec, vm_exec.flags, 0);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 3acfd4f07666..4bb8f897bf15 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -30,21 +30,23 @@ enum xe_exec_queue_sched_prop {
XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
};
-static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
- struct xe_vm *vm,
- u32 logical_mask,
- u16 width, struct xe_hw_engine *hwe,
- u32 flags)
+static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
+ u64 extensions, int ext_number, bool create);
+
+static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
+ struct xe_vm *vm,
+ u32 logical_mask,
+ u16 width, struct xe_hw_engine *hwe,
+ u32 flags, u64 extensions)
{
struct xe_exec_queue *q;
struct xe_gt *gt = hwe->gt;
int err;
- int i;
/* only kernel queues can be permanent */
XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
- q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
+ q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
if (!q)
return ERR_PTR(-ENOMEM);
@@ -52,8 +54,6 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->flags = flags;
q->hwe = hwe;
q->gt = gt;
- if (vm)
- q->vm = xe_vm_get(vm);
q->class = hwe->class;
q->width = width;
q->logical_mask = logical_mask;
@@ -66,23 +66,51 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
q->sched_props.preempt_timeout_us =
hwe->eclass->sched_props.preempt_timeout_us;
+ q->sched_props.job_timeout_ms =
+ hwe->eclass->sched_props.job_timeout_ms;
if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
else
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
+ if (extensions) {
+ /*
+ * may set q->usm, must come before xe_lrc_init(),
+ * may overwrite q->sched_props, must come before q->ops->init()
+ */
+ err = exec_queue_user_extensions(xe, q, extensions, 0, true);
+ if (err) {
+ kfree(q);
+ return ERR_PTR(err);
+ }
+ }
+
+ if (vm)
+ q->vm = xe_vm_get(vm);
+
if (xe_exec_queue_is_parallel(q)) {
q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
}
- if (q->flags & EXEC_QUEUE_FLAG_VM) {
- q->bind.fence_ctx = dma_fence_context_alloc(1);
- q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
- }
- for (i = 0; i < width; ++i) {
- err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K);
+ return q;
+}
+
+static void __xe_exec_queue_free(struct xe_exec_queue *q)
+{
+ if (q->vm)
+ xe_vm_put(q->vm);
+ kfree(q);
+}
+
+static int __xe_exec_queue_init(struct xe_exec_queue *q)
+{
+ struct xe_device *xe = gt_to_xe(q->gt);
+ int i, err;
+
+ for (i = 0; i < q->width; ++i) {
+ err = xe_lrc_init(q->lrc + i, q->hwe, q, q->vm, SZ_16K);
if (err)
goto err_lrc;
}
@@ -99,35 +127,47 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
* can perform GuC CT actions when needed. Caller is expected to have
* already grabbed the rpm ref outside any sensitive locks.
*/
- if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm))
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
- return q;
+ return 0;
err_lrc:
for (i = i - 1; i >= 0; --i)
xe_lrc_finish(q->lrc + i);
- kfree(q);
- return ERR_PTR(err);
+ return err;
}
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
- struct xe_hw_engine *hwe, u32 flags)
+ struct xe_hw_engine *hwe, u32 flags,
+ u64 extensions)
{
struct xe_exec_queue *q;
int err;
+ q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
+ extensions);
+ if (IS_ERR(q))
+ return q;
+
if (vm) {
err = xe_vm_lock(vm, true);
if (err)
- return ERR_PTR(err);
+ goto err_post_alloc;
}
- q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
+
+ err = __xe_exec_queue_init(q);
if (vm)
xe_vm_unlock(vm);
+ if (err)
+ goto err_post_alloc;
return q;
+
+err_post_alloc:
+ __xe_exec_queue_free(q);
+ return ERR_PTR(err);
}
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
@@ -152,7 +192,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
if (!logical_mask)
return ERR_PTR(-ENODEV);
- return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags);
+ return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0);
}
void xe_exec_queue_destroy(struct kref *ref)
@@ -178,10 +218,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
xe_lrc_finish(q->lrc + i);
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
xe_device_mem_access_put(gt_to_xe(q->gt));
- if (q->vm)
- xe_vm_put(q->vm);
-
- kfree(q);
+ __xe_exec_queue_free(q);
}
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
@@ -239,7 +276,11 @@ static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q
if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
return -EPERM;
- return q->ops->set_priority(q, value);
+ if (!create)
+ return q->ops->set_priority(q, value);
+
+ q->sched_props.priority = value;
+ return 0;
}
static bool xe_exec_queue_enforce_schedule_limit(void)
@@ -306,7 +347,11 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
- return q->ops->set_timeslice(q, value);
+ if (!create)
+ return q->ops->set_timeslice(q, value);
+
+ q->sched_props.timeslice_us = value;
+ return 0;
}
static int exec_queue_set_preemption_timeout(struct xe_device *xe,
@@ -322,7 +367,11 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe,
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
- return q->ops->set_preempt_timeout(q, value);
+ if (!create)
+ return q->ops->set_preempt_timeout(q, value);
+
+ q->sched_props.preempt_timeout_us = value;
+ return 0;
}
static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
@@ -340,7 +389,9 @@ static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
- return q->ops->set_job_timeout(q, value);
+ q->sched_props.job_timeout_ms = value;
+
+ return 0;
}
static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
@@ -617,6 +668,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
for_each_gt(gt, xe, id) {
struct xe_exec_queue *new;
+ u32 flags;
if (xe_gt_is_media_type(gt))
continue;
@@ -635,14 +687,12 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
/* The migration vm doesn't hold rpm ref */
xe_device_mem_access_get(xe);
+ flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
+
migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
- args->width, hwe,
- EXEC_QUEUE_FLAG_PERSISTENT |
- EXEC_QUEUE_FLAG_VM |
- (id ?
- EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
- 0));
+ args->width, hwe, flags,
+ args->extensions);
xe_device_mem_access_put(xe); /* now held by engine */
@@ -688,7 +738,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
q = xe_exec_queue_create(xe, vm, logical_mask,
- args->width, hwe, 0);
+ args->width, hwe, 0,
+ args->extensions);
up_read(&vm->lock);
xe_vm_put(vm);
if (IS_ERR(q))
@@ -704,12 +755,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
}
- if (args->extensions) {
- err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
- if (XE_IOCTL_DBG(xe, err))
- goto kill_exec_queue;
- }
-
mutex_lock(&xef->exec_queue.lock);
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->exec_queue.lock);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index d959cc4a1a82..02ce8d204622 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -16,7 +16,8 @@ struct xe_file;
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
- struct xe_hw_engine *hw_engine, u32 flags);
+ struct xe_hw_engine *hw_engine, u32 flags,
+ u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
enum xe_engine_class class, u32 flags);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 947bbc4b285d..c40240e88068 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -105,58 +105,49 @@ struct xe_exec_queue {
struct xe_guc_exec_queue *guc;
};
- union {
- /**
- * @parallel: parallel submission state
- */
- struct {
- /** @composite_fence_ctx: context composite fence */
- u64 composite_fence_ctx;
- /** @composite_fence_seqno: seqno for composite fence */
- u32 composite_fence_seqno;
- } parallel;
- /**
- * @bind: bind submission state
- */
- struct {
- /** @fence_ctx: context bind fence */
- u64 fence_ctx;
- /** @fence_seqno: seqno for bind fence */
- u32 fence_seqno;
- } bind;
- };
+ /**
+ * @parallel: parallel submission state
+ */
+ struct {
+ /** @parallel.composite_fence_ctx: context composite fence */
+ u64 composite_fence_ctx;
+ /** @parallel.composite_fence_seqno: seqno for composite fence */
+ u32 composite_fence_seqno;
+ } parallel;
/** @sched_props: scheduling properties */
struct {
- /** @timeslice_us: timeslice period in micro-seconds */
+ /** @sched_props.timeslice_us: timeslice period in micro-seconds */
u32 timeslice_us;
- /** @preempt_timeout_us: preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
- /** @priority: priority of this exec queue */
+ /** @sched_props.job_timeout_ms: job timeout in milliseconds */
+ u32 job_timeout_ms;
+ /** @sched_props.priority: priority of this exec queue */
enum xe_exec_queue_priority priority;
} sched_props;
/** @compute: compute exec queue state */
struct {
- /** @pfence: preemption fence */
+ /** @compute.pfence: preemption fence */
struct dma_fence *pfence;
- /** @context: preemption fence context */
+ /** @compute.context: preemption fence context */
u64 context;
- /** @seqno: preemption fence seqno */
+ /** @compute.seqno: preemption fence seqno */
u32 seqno;
- /** @link: link into VM's list of exec queues */
+ /** @compute.link: link into VM's list of exec queues */
struct list_head link;
- /** @lock: preemption fences lock */
+ /** @compute.lock: preemption fences lock */
spinlock_t lock;
} compute;
/** @usm: unified shared memory state */
struct {
- /** @acc_trigger: access counter trigger */
+ /** @usm.acc_trigger: access counter trigger */
u32 acc_trigger;
- /** @acc_notify: access counter notify */
+ /** @usm.acc_notify: access counter notify */
u32 acc_notify;
- /** @acc_granularity: access counter granularity */
+ /** @usm.acc_granularity: access counter granularity */
u32 acc_granularity;
} usm;
@@ -188,8 +179,6 @@ struct xe_exec_queue_ops {
int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
/** @set_preempt_timeout: Set preemption timeout for exec queue */
int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
- /** @set_job_timeout: Set job timeout for exec queue */
- int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms);
/**
* @suspend: Suspend exec queue from executing, allowed to be called
* multiple times in a row before resume with the caveat that
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 42d01bbbf7d0..1788e78caf5c 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -416,13 +416,6 @@ static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
return 0;
}
-static int execlist_exec_queue_set_job_timeout(struct xe_exec_queue *q,
- u32 job_timeout_ms)
-{
- /* NIY */
- return 0;
-}
-
static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
{
/* NIY */
@@ -453,7 +446,6 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
.set_priority = execlist_exec_queue_set_priority,
.set_timeslice = execlist_exec_queue_set_timeslice,
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
- .set_job_timeout = execlist_exec_queue_set_job_timeout,
.suspend = execlist_exec_queue_suspend,
.suspend_wait = execlist_exec_queue_suspend_wait,
.resume = execlist_exec_queue_resume,
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 3efd2d066bf7..ab96edb058d6 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -11,12 +11,16 @@
#include <drm/i915_drm.h>
#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
+#include "xe_sriov.h"
#include "xe_wopcm.h"
#define XELPG_GGTT_PTE_PAT0 BIT_ULL(52)
@@ -141,7 +145,11 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
unsigned int gsm_size;
- gsm_size = probe_gsm_size(pdev);
+ if (IS_SRIOV_VF(xe))
+ gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
+ else
+ gsm_size = probe_gsm_size(pdev);
+
if (gsm_size == 0) {
drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
return -ENOMEM;
@@ -312,6 +320,74 @@ void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
}
}
+static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
+ const struct drm_mm_node *node, const char *description)
+{
+ char buf[10];
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n",
+ node->start, node->start + node->size, buf, description);
+ }
+}
+
+/**
+ * xe_ggtt_balloon - prevent allocation of specified GGTT addresses
+ * @ggtt: the &xe_ggtt where we want to make reservation
+ * @start: the starting GGTT address of the reserved region
+ * @end: then end GGTT address of the reserved region
+ * @node: the &drm_mm_node to hold reserved GGTT node
+ *
+ * Use xe_ggtt_deballoon() to release a reserved GGTT node.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 end, struct drm_mm_node *node)
+{
+ int err;
+
+ xe_tile_assert(ggtt->tile, start < end);
+ xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
+ xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
+ xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(node));
+
+ node->color = 0;
+ node->start = start;
+ node->size = end - start;
+
+ mutex_lock(&ggtt->lock);
+ err = drm_mm_reserve_node(&ggtt->mm, node);
+ mutex_unlock(&ggtt->lock);
+
+ if (xe_gt_WARN(ggtt->tile->primary_gt, err,
+ "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
+ node->start, node->start + node->size, ERR_PTR(err)))
+ return err;
+
+ xe_ggtt_dump_node(ggtt, node, "balloon");
+ return 0;
+}
+
+/**
+ * xe_ggtt_deballoon - release a reserved GGTT region
+ * @ggtt: the &xe_ggtt where reserved node belongs
+ * @node: the &drm_mm_node with reserved GGTT region
+ *
+ * See xe_ggtt_balloon() for details.
+ */
+void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node)
+{
+ if (!drm_mm_node_allocated(node))
+ return;
+
+ xe_ggtt_dump_node(ggtt, node, "deballoon");
+
+ mutex_lock(&ggtt->lock);
+ drm_mm_remove_node(node);
+ mutex_unlock(&ggtt->lock);
+}
+
int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, struct drm_mm_node *node,
u32 size, u32 align, u32 mm_flags)
{
@@ -334,7 +410,8 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
+ u16 cache_mode = bo->flags & XE_BO_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
u64 start = bo->ggtt_node.start;
u64 offset, pte;
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index a09c166dff70..42705e1338e1 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -16,6 +16,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt);
int xe_ggtt_init(struct xe_ggtt *ggtt);
void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix);
+int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 size, struct drm_mm_node *node);
+void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node);
+
int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
u32 size, u32 align);
int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt,
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index a8a895cf4b44..a61994292c43 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -7,12 +7,14 @@
#include <drm/drm_managed.h>
+#include <generated/xe_wa_oob.h>
+
#include "abi/gsc_mkhi_commands_abi.h"
-#include "generated/xe_wa_oob.h"
#include "xe_bb.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
+#include "xe_gsc_proxy.h"
#include "xe_gsc_submit.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
@@ -242,8 +244,31 @@ static int gsc_upload(struct xe_gsc *gsc)
if (err)
return err;
+ return 0;
+}
+
+static int gsc_upload_and_init(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int ret;
+
+ ret = gsc_upload(gsc);
+ if (ret)
+ return ret;
+
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
xe_gt_dbg(gt, "GSC FW async load completed\n");
+ /* HuC auth failure is not fatal */
+ if (xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GUC))
+ xe_huc_auth(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC);
+
+ ret = xe_gsc_proxy_start(gsc);
+ if (ret)
+ return ret;
+
+ xe_gt_dbg(gt, "GSC proxy init completed\n");
+
return 0;
}
@@ -252,24 +277,28 @@ static void gsc_work(struct work_struct *work)
struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_device *xe = gt_to_xe(gt);
+ u32 actions;
int ret;
+ spin_lock_irq(&gsc->lock);
+ actions = gsc->work_actions;
+ gsc->work_actions = 0;
+ spin_unlock_irq(&gsc->lock);
+
xe_device_mem_access_get(xe);
xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
- ret = gsc_upload(gsc);
- if (ret && ret != -EEXIST) {
- xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
- goto out;
+ if (actions & GSC_ACTION_FW_LOAD) {
+ ret = gsc_upload_and_init(gsc);
+ if (ret && ret != -EEXIST)
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
+ else
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
}
- xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
-
- /* HuC auth failure is not fatal */
- if (xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GUC))
- xe_huc_auth(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC);
+ if (actions & GSC_ACTION_SW_PROXY)
+ xe_gsc_proxy_request_handler(gsc);
-out:
xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
xe_device_mem_access_put(xe);
}
@@ -282,6 +311,7 @@ int xe_gsc_init(struct xe_gsc *gsc)
gsc->fw.type = XE_UC_FW_TYPE_GSC;
INIT_WORK(&gsc->work, gsc_work);
+ spin_lock_init(&gsc->lock);
/* The GSC uC is only available on the media GT */
if (tile->media_gt && (gt != tile->media_gt)) {
@@ -302,6 +332,10 @@ int xe_gsc_init(struct xe_gsc *gsc)
else if (ret)
goto out;
+ ret = xe_gsc_proxy_init(gsc);
+ if (ret && ret != -ENODEV)
+ goto out;
+
return 0;
out:
@@ -356,7 +390,7 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
q = xe_exec_queue_create(xe, NULL,
BIT(hwe->logical_instance), 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
- EXEC_QUEUE_FLAG_PERMANENT);
+ EXEC_QUEUE_FLAG_PERMANENT, 0);
if (IS_ERR(q)) {
xe_gt_err(gt, "Failed to create queue for GSC submission\n");
err = PTR_ERR(q);
@@ -401,6 +435,10 @@ void xe_gsc_load_start(struct xe_gsc *gsc)
return;
}
+ spin_lock_irq(&gsc->lock);
+ gsc->work_actions |= GSC_ACTION_FW_LOAD;
+ spin_unlock_irq(&gsc->lock);
+
queue_work(gsc->wq, &gsc->work);
}
@@ -410,6 +448,15 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
flush_work(&gsc->work);
}
+/**
+ * xe_gsc_remove() - Clean up the GSC structures before driver removal
+ * @gsc: the GSC uC
+ */
+void xe_gsc_remove(struct xe_gsc *gsc)
+{
+ xe_gsc_proxy_remove(gsc);
+}
+
/*
* wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
* GSC engine reset by writing a notification bit in the GS1 register and then
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index bc1ef7f31ea2..c6fb32e3fd79 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -14,6 +14,7 @@ int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
+void xe_gsc_remove(struct xe_gsc *gsc);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
new file mode 100644
index 000000000000..309ef80e3b95
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gsc_proxy.h"
+
+#include <linux/component.h>
+#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+#include <drm/i915_component.h>
+#include <drm/i915_gsc_proxy_mei_interface.h>
+
+#include "abi/gsc_proxy_commands_abi.h"
+#include "regs/xe_gsc_regs.h"
+#include "xe_bo.h"
+#include "xe_gsc.h"
+#include "xe_gsc_submit.h"
+#include "xe_gt.h"
+#include "xe_gt_printk.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_pm.h"
+
+/*
+ * GSC proxy:
+ * The GSC uC needs to communicate with the CSME to perform certain operations.
+ * Since the GSC can't perform this communication directly on platforms where it
+ * is integrated in GT, the graphics driver needs to transfer the messages from
+ * GSC to CSME and back. The proxy flow must be manually started after the GSC
+ * is loaded to signal to GSC that we're ready to handle its messages and allow
+ * it to query its init data from CSME; GSC will then trigger an HECI2 interrupt
+ * if it needs to send messages to CSME again.
+ * The proxy flow is as follow:
+ * 1 - Xe submits a request to GSC asking for the message to CSME
+ * 2 - GSC replies with the proxy header + payload for CSME
+ * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component
+ * 4 - CSME replies with the proxy header + payload for GSC
+ * 5 - Xe submits a request to GSC with the reply from CSME
+ * 6 - GSC replies either with a new header + payload (same as step 2, so we
+ * restart from there) or with an end message.
+ */
+
+/*
+ * The component should load quite quickly in most cases, but it could take
+ * a bit. Using a very big timeout just to cover the worst case scenario
+ */
+#define GSC_PROXY_INIT_TIMEOUT_MS 20000
+
+/* shorthand define for code compactness */
+#define PROXY_HDR_SIZE (sizeof(struct xe_gsc_proxy_header))
+
+/* the protocol supports up to 32K in each direction */
+#define GSC_PROXY_BUFFER_SIZE SZ_32K
+#define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2)
+
+static struct xe_gt *
+gsc_to_gt(struct xe_gsc *gsc)
+{
+ return container_of(gsc, struct xe_gt, uc.gsc);
+}
+
+static inline struct xe_device *kdev_to_xe(struct device *kdev)
+{
+ return dev_get_drvdata(kdev);
+}
+
+static bool gsc_proxy_init_done(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
+
+ return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) ==
+ HECI1_FWSTS1_PROXY_STATE_NORMAL;
+}
+
+static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ /* make sure we never accidentally write the RST bit */
+ clr |= HECI_H_CSR_RST;
+
+ xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
+}
+
+static void gsc_proxy_irq_clear(struct xe_gsc *gsc)
+{
+ /* The status bit is cleared by writing to it */
+ __gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS);
+}
+
+static void gsc_proxy_irq_toggle(struct xe_gsc *gsc, bool enabled)
+{
+ u32 set = enabled ? HECI_H_CSR_IE : 0;
+ u32 clr = enabled ? 0 : HECI_H_CSR_IE;
+
+ __gsc_proxy_irq_rmw(gsc, clr, set);
+}
+
+static int proxy_send_to_csme(struct xe_gsc *gsc, u32 size)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct i915_gsc_proxy_component *comp = gsc->proxy.component;
+ int ret;
+
+ ret = comp->ops->send(comp->mei_dev, gsc->proxy.to_csme, size);
+ if (ret < 0) {
+ xe_gt_err(gt, "Failed to send CSME proxy message\n");
+ return ret;
+ }
+
+ ret = comp->ops->recv(comp->mei_dev, gsc->proxy.from_csme, GSC_PROXY_BUFFER_SIZE);
+ if (ret < 0) {
+ xe_gt_err(gt, "Failed to receive CSME proxy message\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ u64 addr_in = xe_bo_ggtt_addr(gsc->proxy.bo);
+ u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE;
+ int err;
+
+ /* the message must contain at least the gsc and proxy headers */
+ if (size > GSC_PROXY_BUFFER_SIZE) {
+ xe_gt_err(gt, "Invalid GSC proxy message size: %u\n", size);
+ return -EINVAL;
+ }
+
+ err = xe_gsc_pkt_submit_kernel(gsc, addr_in, size,
+ addr_out, GSC_PROXY_BUFFER_SIZE);
+ if (err) {
+ xe_gt_err(gt, "Failed to submit gsc proxy rq (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ return 0;
+}
+
+static int validate_proxy_header(struct xe_gsc_proxy_header *header,
+ u32 source, u32 dest, u32 max_size)
+{
+ u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
+ u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
+
+ if (header->destination != dest || header->source != source)
+ return -ENOEXEC;
+
+ if (length + PROXY_HDR_SIZE > max_size)
+ return -E2BIG;
+
+ switch (type) {
+ case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
+ if (length > 0)
+ break;
+ fallthrough;
+ case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
+ return -EIO;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define proxy_header_wr(xe_, map_, offset_, field_, val_) \
+ xe_map_wr_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_, val_)
+
+#define proxy_header_rd(xe_, map_, offset_, field_) \
+ xe_map_rd_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_)
+
+static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
+{
+ xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE);
+
+ proxy_header_wr(xe, map, offset, hdr,
+ FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) |
+ FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0));
+
+ proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD);
+ proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC);
+ proxy_header_wr(xe, map, offset, status, 0);
+
+ return offset + PROXY_HDR_SIZE;
+}
+
+static int proxy_query(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_gsc_proxy_header *to_csme_hdr = gsc->proxy.to_csme;
+ void *to_csme_payload = gsc->proxy.to_csme + PROXY_HDR_SIZE;
+ u32 wr_offset;
+ u32 reply_offset;
+ u32 size;
+ int ret;
+
+ wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
+ HECI_MEADDRESS_PROXY, 0, PROXY_HDR_SIZE);
+ wr_offset = emit_proxy_header(xe, &gsc->proxy.to_gsc, wr_offset);
+
+ size = wr_offset;
+
+ while (1) {
+ /*
+ * Poison the GSC response header space to make sure we don't
+ * read a stale reply.
+ */
+ xe_gsc_poison_header(xe, &gsc->proxy.from_gsc, 0);
+
+ /* send proxy message to GSC */
+ ret = proxy_send_to_gsc(gsc, size);
+ if (ret)
+ goto proxy_error;
+
+ /* check the reply from GSC */
+ ret = xe_gsc_read_out_header(xe, &gsc->proxy.from_gsc, 0,
+ PROXY_HDR_SIZE, &reply_offset);
+ if (ret) {
+ xe_gt_err(gt, "Invalid gsc header in proxy reply (%pe)\n",
+ ERR_PTR(ret));
+ goto proxy_error;
+ }
+
+ /* copy the proxy header reply from GSC */
+ xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
+ reply_offset, PROXY_HDR_SIZE);
+
+ /* stop if this was the last message */
+ if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END)
+ break;
+
+ /* make sure the GSC-to-CSME proxy header is sane */
+ ret = validate_proxy_header(to_csme_hdr,
+ GSC_PROXY_ADDRESSING_GSC,
+ GSC_PROXY_ADDRESSING_CSME,
+ GSC_PROXY_BUFFER_SIZE - reply_offset);
+ if (ret) {
+ xe_gt_err(gt, "invalid GSC to CSME proxy header! (%pe)\n",
+ ERR_PTR(ret));
+ goto proxy_error;
+ }
+
+ /* copy the rest of the message */
+ size = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, to_csme_hdr->hdr);
+ xe_map_memcpy_from(xe, to_csme_payload, &gsc->proxy.from_gsc,
+ reply_offset + PROXY_HDR_SIZE, size);
+
+ /* send the GSC message to the CSME */
+ ret = proxy_send_to_csme(gsc, size + PROXY_HDR_SIZE);
+ if (ret < 0)
+ goto proxy_error;
+
+ /* reply size from CSME, including the proxy header */
+ size = ret;
+ if (size < PROXY_HDR_SIZE) {
+ xe_gt_err(gt, "CSME to GSC proxy msg too small: 0x%x\n", size);
+ ret = -EPROTO;
+ goto proxy_error;
+ }
+
+ /* make sure the CSME-to-GSC proxy header is sane */
+ ret = validate_proxy_header(gsc->proxy.from_csme,
+ GSC_PROXY_ADDRESSING_CSME,
+ GSC_PROXY_ADDRESSING_GSC,
+ GSC_PROXY_BUFFER_SIZE - reply_offset);
+ if (ret) {
+ xe_gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret);
+ goto proxy_error;
+ }
+
+ /* Emit a new header for sending the reply to the GSC */
+ wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
+ HECI_MEADDRESS_PROXY, 0, size);
+
+ /* copy the CSME reply and update the total msg size to include the GSC header */
+ xe_map_memcpy_to(xe, &gsc->proxy.to_gsc, wr_offset, gsc->proxy.from_csme, size);
+
+ size += wr_offset;
+ }
+
+proxy_error:
+ return ret < 0 ? ret : 0;
+}
+
+int xe_gsc_proxy_request_handler(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int slept;
+ int err;
+
+ if (!gsc->proxy.component_added)
+ return -ENODEV;
+
+ /* when GSC is loaded, we can queue this before the component is bound */
+ for (slept = 0; slept < GSC_PROXY_INIT_TIMEOUT_MS; slept += 100) {
+ if (gsc->proxy.component)
+ break;
+
+ msleep(100);
+ }
+
+ mutex_lock(&gsc->proxy.mutex);
+ if (!gsc->proxy.component) {
+ xe_gt_err(gt, "GSC proxy component not bound!\n");
+ err = -EIO;
+ } else {
+ /*
+ * clear the pending interrupt and allow new proxy requests to
+ * be generated while we handle the current one
+ */
+ gsc_proxy_irq_clear(gsc);
+ err = proxy_query(gsc);
+ }
+ mutex_unlock(&gsc->proxy.mutex);
+ return err;
+}
+
+void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ if (unlikely(!iir))
+ return;
+
+ if (!gsc->proxy.component) {
+ xe_gt_err(gt, "GSC proxy irq received without the component being bound!\n");
+ return;
+ }
+
+ spin_lock(&gsc->lock);
+ gsc->work_actions |= GSC_ACTION_SW_PROXY;
+ spin_unlock(&gsc->lock);
+
+ queue_work(gsc->wq, &gsc->work);
+}
+
+static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe(xe_kdev);
+ struct xe_gt *gt = xe->tiles[0].media_gt;
+ struct xe_gsc *gsc = &gt->uc.gsc;
+
+ mutex_lock(&gsc->proxy.mutex);
+ gsc->proxy.component = data;
+ gsc->proxy.component->mei_dev = mei_kdev;
+ mutex_unlock(&gsc->proxy.mutex);
+
+ return 0;
+}
+
+static void xe_gsc_proxy_component_unbind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe(xe_kdev);
+ struct xe_gt *gt = xe->tiles[0].media_gt;
+ struct xe_gsc *gsc = &gt->uc.gsc;
+
+ xe_gsc_wait_for_worker_completion(gsc);
+
+ mutex_lock(&gsc->proxy.mutex);
+ gsc->proxy.component = NULL;
+ mutex_unlock(&gsc->proxy.mutex);
+}
+
+static const struct component_ops xe_gsc_proxy_component_ops = {
+ .bind = xe_gsc_proxy_component_bind,
+ .unbind = xe_gsc_proxy_component_unbind,
+};
+
+static void proxy_channel_free(struct drm_device *drm, void *arg)
+{
+ struct xe_gsc *gsc = arg;
+
+ if (!gsc->proxy.bo)
+ return;
+
+ if (gsc->proxy.to_csme) {
+ kfree(gsc->proxy.to_csme);
+ gsc->proxy.to_csme = NULL;
+ gsc->proxy.from_csme = NULL;
+ }
+
+ if (gsc->proxy.bo) {
+ iosys_map_clear(&gsc->proxy.to_gsc);
+ iosys_map_clear(&gsc->proxy.from_gsc);
+ xe_bo_unpin_map_no_vm(gsc->proxy.bo);
+ gsc->proxy.bo = NULL;
+ }
+}
+
+static int proxy_channel_alloc(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_bo *bo;
+ void *csme;
+ int err;
+
+ csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
+ if (!csme)
+ return -ENOMEM;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_SYSTEM_BIT |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo)) {
+ kfree(csme);
+ return PTR_ERR(bo);
+ }
+
+ gsc->proxy.bo = bo;
+ gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
+ gsc->proxy.from_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, GSC_PROXY_BUFFER_SIZE);
+ gsc->proxy.to_csme = csme;
+ gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
+
+ err = drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy
+ * @gsc: the GSC uC
+ *
+ * Return: 0 if the initialization was successful, a negative errno otherwise.
+ */
+int xe_gsc_proxy_init(struct xe_gsc *gsc)
+{
+ int err;
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+
+ mutex_init(&gsc->proxy.mutex);
+
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) {
+ xe_gt_info(gt, "can't init GSC proxy due to missing mei component\n");
+ return -ENODEV;
+ }
+
+ /* no multi-tile devices with this feature yet */
+ if (tile->id > 0) {
+ xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id);
+ return -EINVAL;
+ }
+
+ err = proxy_channel_alloc(gsc);
+ if (err)
+ return err;
+
+ err = component_add_typed(xe->drm.dev, &xe_gsc_proxy_component_ops,
+ I915_COMPONENT_GSC_PROXY);
+ if (err < 0) {
+ xe_gt_err(gt, "Failed to add GSC_PROXY component (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ gsc->proxy.component_added = true;
+
+ /* the component must be removed before unload, so can't use drmm for cleanup */
+
+ return 0;
+}
+
+/**
+ * xe_gsc_proxy_remove() - remove the GSC proxy MEI component
+ * @gsc: the GSC uC
+ */
+void xe_gsc_proxy_remove(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ int err = 0;
+
+ if (!gsc->proxy.component_added)
+ return;
+
+ /* disable HECI2 IRQs */
+ xe_pm_runtime_get(xe);
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (err)
+ xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
+
+ /* try do disable irq even if forcewake failed */
+ gsc_proxy_irq_toggle(gsc, false);
+
+ if (!err)
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_pm_runtime_put(xe);
+
+ xe_gsc_wait_for_worker_completion(gsc);
+
+ component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
+ gsc->proxy.component_added = false;
+}
+
+/**
+ * xe_gsc_proxy_start() - start the proxy by submitting the first request
+ * @gsc: the GSC uC
+ *
+ * Return: 0 if the proxy are now enabled, a negative errno otherwise.
+ */
+int xe_gsc_proxy_start(struct xe_gsc *gsc)
+{
+ int err;
+
+ /* enable the proxy interrupt in the GSC shim layer */
+ gsc_proxy_irq_toggle(gsc, true);
+
+ /*
+ * The handling of the first proxy request must be manually triggered to
+ * notify the GSC that we're ready to support the proxy flow.
+ */
+ err = xe_gsc_proxy_request_handler(gsc);
+ if (err)
+ return err;
+
+ if (!gsc_proxy_init_done(gsc)) {
+ xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
new file mode 100644
index 000000000000..908f9441f093
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GSC_PROXY_H_
+#define _XE_GSC_PROXY_H_
+
+#include <linux/types.h>
+
+struct xe_gsc;
+
+int xe_gsc_proxy_init(struct xe_gsc *gsc);
+void xe_gsc_proxy_remove(struct xe_gsc *gsc);
+int xe_gsc_proxy_start(struct xe_gsc *gsc);
+
+int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
+void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.c b/drivers/gpu/drm/xe/xe_gsc_submit.c
index 8c5381e5913f..348994b271be 100644
--- a/drivers/gpu/drm/xe/xe_gsc_submit.c
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.c
@@ -5,6 +5,8 @@
#include "xe_gsc_submit.h"
+#include <linux/poison.h>
+
#include "abi/gsc_command_header_abi.h"
#include "xe_bb.h"
#include "xe_exec_queue.h"
@@ -69,6 +71,17 @@ u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
};
/**
+ * xe_gsc_poison_header - poison the MTL GSC header in memory
+ * @xe: the Xe device
+ * @map: the iosys map to write to
+ * @offset: offset from the start of the map at which the header resides
+ */
+void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
+{
+ xe_map_memset(xe, map, offset, POISON_FREE, GSC_HDR_SIZE);
+};
+
+/**
* xe_gsc_check_and_update_pending - check the pending bit and update the input
* header with the retry handle from the output header
* @xe: the Xe device
@@ -112,11 +125,18 @@ int xe_gsc_read_out_header(struct xe_device *xe,
{
u32 marker = mtl_gsc_header_rd(xe, map, offset, validity_marker);
u32 size = mtl_gsc_header_rd(xe, map, offset, message_size);
+ u32 status = mtl_gsc_header_rd(xe, map, offset, status);
u32 payload_size = size - GSC_HDR_SIZE;
if (marker != GSC_HECI_VALIDITY_MARKER)
return -EPROTO;
+ if (status != 0) {
+ drm_err(&xe->drm, "GSC header readout indicates error: %d\n",
+ status);
+ return -EINVAL;
+ }
+
if (size < GSC_HDR_SIZE || payload_size < min_payload_size)
return -ENODATA;
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.h b/drivers/gpu/drm/xe/xe_gsc_submit.h
index 0801da5d446a..1939855031a6 100644
--- a/drivers/gpu/drm/xe/xe_gsc_submit.h
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.h
@@ -14,6 +14,7 @@ struct xe_gsc;
u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
u8 heci_client_id, u64 host_session_id, u32 payload_size);
+void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset);
bool xe_gsc_check_and_update_pending(struct xe_device *xe,
struct iosys_map *in, u32 offset_in,
diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
index 57fefd66a7ea..138d8cc0f19c 100644
--- a/drivers/gpu/drm/xe/xe_gsc_types.h
+++ b/drivers/gpu/drm/xe/xe_gsc_types.h
@@ -6,12 +6,17 @@
#ifndef _XE_GSC_TYPES_H_
#define _XE_GSC_TYPES_H_
+#include <linux/iosys-map.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
#include "xe_uc_fw_types.h"
struct xe_bo;
struct xe_exec_queue;
+struct i915_gsc_proxy_component;
/**
* struct xe_gsc - GSC
@@ -34,6 +39,34 @@ struct xe_gsc {
/** @work: delayed load and proxy handling work */
struct work_struct work;
+
+ /** @lock: protects access to the work_actions mask */
+ spinlock_t lock;
+
+ /** @work_actions: mask of actions to be performed in the work */
+ u32 work_actions;
+#define GSC_ACTION_FW_LOAD BIT(0)
+#define GSC_ACTION_SW_PROXY BIT(1)
+
+ /** @proxy: sub-structure containing the SW proxy-related variables */
+ struct {
+ /** @proxy.component: struct for communication with mei component */
+ struct i915_gsc_proxy_component *component;
+ /** @proxy.mutex: protects the component binding and usage */
+ struct mutex mutex;
+ /** @proxy.component_added: whether the component has been added */
+ bool component_added;
+ /** @proxy.bo: object to store message to and from the GSC */
+ struct xe_bo *bo;
+ /** @proxy.to_gsc: map of the memory used to send messages to the GSC */
+ struct iosys_map to_gsc;
+ /** @proxy.from_gsc: map of the memory used to recv messages from the GSC */
+ struct iosys_map from_gsc;
+ /** @proxy.to_csme: pointer to the memory used to send messages to CSME */
+ void *to_csme;
+ /** @proxy.from_csme: pointer to the memory used to recv messages from CSME */
+ void *from_csme;
+ } proxy;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 35474ddbaf97..b75f0bf0a9a1 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -78,6 +78,19 @@ void xe_gt_sanitize(struct xe_gt *gt)
gt->uc.guc.submission_state.enabled = false;
}
+/**
+ * xe_gt_remove() - Clean up the GT structures before driver removal
+ * @gt: the GT object
+ *
+ * This function should only act on objects/structures that must be cleaned
+ * before the driver removal callback is complete and therefore can't be
+ * deferred to a drmm action.
+ */
+void xe_gt_remove(struct xe_gt *gt)
+{
+ xe_uc_remove(&gt->uc);
+}
+
static void gt_fini(struct drm_device *drm, void *arg)
{
struct xe_gt *gt = arg;
@@ -235,7 +248,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
return -ENOMEM;
q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
- hwe, EXEC_QUEUE_FLAG_KERNEL);
+ hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
if (IS_ERR(q)) {
err = PTR_ERR(q);
xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
@@ -252,7 +265,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
}
nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
- 1, hwe, EXEC_QUEUE_FLAG_KERNEL);
+ 1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
if (IS_ERR(nop_q)) {
err = PTR_ERR(nop_q);
xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
@@ -302,7 +315,6 @@ int xe_gt_init_early(struct xe_gt *gt)
return err;
xe_gt_topology_init(gt);
- xe_gt_mcr_init(gt);
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
if (err)
@@ -327,7 +339,7 @@ static void dump_pat_on_error(struct xe_gt *gt)
char prefix[32];
snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
- p = drm_debug_printer(prefix);
+ p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
xe_pat_dump(gt, &p);
}
@@ -341,8 +353,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
if (err)
goto err_hw_fence_irq;
- xe_pat_init(gt);
-
if (!xe_gt_is_media_type(gt)) {
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
if (err)
@@ -351,22 +361,8 @@ static int gt_fw_domain_init(struct xe_gt *gt)
xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
}
- err = xe_uc_init(&gt->uc);
- if (err)
- goto err_force_wake;
-
- /* Raise GT freq to speed up HuC/GuC load */
- xe_guc_pc_init_early(&gt->uc.guc.pc);
-
- err = xe_uc_init_hwconfig(&gt->uc);
- if (err)
- goto err_force_wake;
-
xe_gt_idle_sysfs_init(&gt->gtidle);
- /* XXX: Fake that we pull the engine mask from hwconfig blob */
- gt->info.engine_mask = gt->info.__engine_mask;
-
/* Enable per hw engine IRQs */
xe_irq_enable_hwe(gt);
@@ -386,6 +382,12 @@ static int gt_fw_domain_init(struct xe_gt *gt)
/* Initialize CCS mode sysfs after early initialization of HW engines */
xe_gt_ccs_mode_sysfs_init(gt);
+ /*
+ * Stash hardware-reported version. Since this register does not exist
+ * on pre-MTL platforms, reading it there will (correctly) return 0.
+ */
+ gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
+
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
XE_WARN_ON(err);
xe_device_mem_access_put(gt_to_xe(gt));
@@ -428,10 +430,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (err)
goto err_force_wake;
- err = xe_uc_init_post_hwconfig(&gt->uc);
- if (err)
- goto err_force_wake;
-
if (!xe_gt_is_media_type(gt)) {
/*
* USM has its only SA pool to non-block behind user operations
@@ -458,6 +456,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
}
+ err = xe_uc_init_post_hwconfig(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
err = xe_uc_init_hw(&gt->uc);
if (err)
goto err_force_wake;
@@ -487,6 +489,41 @@ err_hw_fence_irq:
return err;
}
+/*
+ * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
+ * enable CTB communication.
+ */
+int xe_gt_init_hwconfig(struct xe_gt *gt)
+{
+ int err;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ goto out;
+
+ xe_gt_mcr_init(gt);
+ xe_pat_init(gt);
+
+ err = xe_uc_init(&gt->uc);
+ if (err)
+ goto out_fw;
+
+ err = xe_uc_init_hwconfig(&gt->uc);
+ if (err)
+ goto out_fw;
+
+ /* XXX: Fake that we pull the engine mask from hwconfig blob */
+ gt->info.engine_mask = gt->info.__engine_mask;
+
+out_fw:
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+out:
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ return err;
+}
+
int xe_gt_init(struct xe_gt *gt)
{
int err;
@@ -622,12 +659,12 @@ static int gt_reset(struct xe_gt *gt)
if (err)
goto err_out;
+ xe_gt_tlb_invalidation_reset(gt);
+
err = do_gt_reset(gt);
if (err)
goto err_out;
- xe_gt_tlb_invalidation_reset(gt);
-
err = do_gt_restart(gt);
if (err)
goto err_out;
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 4486e083f5ef..ed6ea8057e35 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -33,6 +33,7 @@ static inline bool xe_fault_inject_gt_reset(void)
#endif
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
+int xe_gt_init_hwconfig(struct xe_gt *gt);
int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt);
int xe_gt_record_default_lrcs(struct xe_gt *gt);
@@ -41,6 +42,7 @@ int xe_gt_suspend(struct xe_gt *gt);
int xe_gt_resume(struct xe_gt *gt);
void xe_gt_reset_async(struct xe_gt *gt);
void xe_gt_sanitize(struct xe_gt *gt);
+void xe_gt_remove(struct xe_gt *gt);
/**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 8546cd3cc50d..a7ab9ba645f9 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -10,6 +10,7 @@
#include "xe_gt_topology.h"
#include "xe_gt_types.h"
#include "xe_mmio.h"
+#include "xe_sriov.h"
/**
* DOC: GT Multicast/Replicated (MCR) Register Support
@@ -38,6 +39,8 @@
* ``init_steering_*()`` functions is to apply the platform-specific rules for
* each MCR register type to identify a steering target that will select a
* non-terminated instance.
+ *
+ * MCR registers are not available on Virtual Function (VF).
*/
#define STEER_SEMAPHORE XE_REG(0xFD0)
@@ -352,6 +355,9 @@ void xe_gt_mcr_init(struct xe_gt *gt)
BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES);
BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES);
+ if (IS_SRIOV_VF(xe))
+ return;
+
spin_lock_init(&gt->mcr_lock);
if (gt->info.type == XE_GT_TYPE_MEDIA) {
@@ -405,6 +411,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ if (IS_SRIOV_VF(xe))
+ return;
+
if (xe->info.platform == XE_DG2) {
u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
@@ -588,6 +597,8 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr)
u32 val;
bool steer;
+ xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt)));
+
steer = xe_gt_mcr_get_nonterminated_steering(gt, reg_mcr,
&group, &instance);
@@ -619,6 +630,8 @@ u32 xe_gt_mcr_unicast_read(struct xe_gt *gt,
{
u32 val;
+ xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt)));
+
mcr_lock(gt);
val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, group, instance, 0);
mcr_unlock(gt);
@@ -640,6 +653,8 @@ u32 xe_gt_mcr_unicast_read(struct xe_gt *gt,
void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
u32 value, int group, int instance)
{
+ xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt)));
+
mcr_lock(gt);
rw_with_mcr_steering(gt, reg_mcr, MCR_OP_WRITE, group, instance, value);
mcr_unlock(gt);
@@ -658,6 +673,8 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
{
struct xe_reg reg = to_xe_reg(reg_mcr);
+ xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt)));
+
/*
* Synchronize with any unicast operations. Once we have exclusive
* access, the MULTICAST bit should already be set, so there's no need
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 73f08f1924df..c26e4fcca01e 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -285,9 +285,9 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
bool ret = false;
spin_lock_irq(&pf_queue->lock);
- if (pf_queue->head != pf_queue->tail) {
+ if (pf_queue->tail != pf_queue->head) {
desc = (const struct xe_guc_pagefault_desc *)
- (pf_queue->data + pf_queue->head);
+ (pf_queue->data + pf_queue->tail);
pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
@@ -305,7 +305,7 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
PFD_VIRTUAL_ADDR_LO_SHIFT;
- pf_queue->head = (pf_queue->head + PF_MSG_LEN_DW) %
+ pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
PF_QUEUE_NUM_DW;
ret = true;
}
@@ -318,7 +318,7 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
{
lockdep_assert_held(&pf_queue->lock);
- return CIRC_SPACE(pf_queue->tail, pf_queue->head, PF_QUEUE_NUM_DW) <=
+ return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <=
PF_MSG_LEN_DW;
}
@@ -331,6 +331,11 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
u32 asid;
bool full;
+ /*
+ * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
+ */
+ BUILD_BUG_ON(PF_QUEUE_NUM_DW % PF_MSG_LEN_DW);
+
if (unlikely(len != PF_MSG_LEN_DW))
return -EPROTO;
@@ -340,8 +345,8 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
if (!full) {
- memcpy(pf_queue->data + pf_queue->tail, msg, len * sizeof(u32));
- pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW;
+ memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
+ pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
@@ -387,7 +392,7 @@ static void pf_queue_work_func(struct work_struct *w)
send_pagefault_reply(&gt->uc.guc, &reply);
if (time_after(jiffies, threshold) &&
- pf_queue->head != pf_queue->tail) {
+ pf_queue->tail != pf_queue->head) {
queue_work(gt->usm.pf_wq, w);
break;
}
@@ -562,9 +567,9 @@ static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
bool ret = false;
spin_lock(&acc_queue->lock);
- if (acc_queue->head != acc_queue->tail) {
+ if (acc_queue->tail != acc_queue->head) {
desc = (const struct xe_guc_acc_desc *)
- (acc_queue->data + acc_queue->head);
+ (acc_queue->data + acc_queue->tail);
acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
@@ -577,7 +582,7 @@ static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
- acc_queue->head = (acc_queue->head + ACC_MSG_LEN_DW) %
+ acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) %
ACC_QUEUE_NUM_DW;
ret = true;
}
@@ -605,7 +610,7 @@ static void acc_queue_work_func(struct work_struct *w)
}
if (time_after(jiffies, threshold) &&
- acc_queue->head != acc_queue->tail) {
+ acc_queue->tail != acc_queue->head) {
queue_work(gt->usm.acc_wq, w);
break;
}
@@ -616,7 +621,7 @@ static bool acc_queue_full(struct acc_queue *acc_queue)
{
lockdep_assert_held(&acc_queue->lock);
- return CIRC_SPACE(acc_queue->tail, acc_queue->head, ACC_QUEUE_NUM_DW) <=
+ return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <=
ACC_MSG_LEN_DW;
}
@@ -627,6 +632,11 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
u32 asid;
bool full;
+ /*
+ * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0
+ */
+ BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW);
+
if (unlikely(len != ACC_MSG_LEN_DW))
return -EPROTO;
@@ -636,9 +646,9 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
spin_lock(&acc_queue->lock);
full = acc_queue_full(acc_queue);
if (!full) {
- memcpy(acc_queue->data + acc_queue->tail, msg,
+ memcpy(acc_queue->data + acc_queue->head, msg,
len * sizeof(u32));
- acc_queue->tail = (acc_queue->tail + len) % ACC_QUEUE_NUM_DW;
+ acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
queue_work(gt->usm.acc_wq, &acc_queue->worker);
} else {
drm_warn(&gt_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
index 5991bcadd47e..c2b004d3f48e 100644
--- a/drivers/gpu/drm/xe/xe_gt_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -43,4 +43,48 @@
#define xe_gt_WARN_ON_ONCE(_gt, _condition) \
xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "gt_WARN_ON_ONCE", __stringify(_condition))
+static inline void __xe_gt_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_gt *gt = p->arg;
+
+ xe_gt_err(gt, "%pV", vaf);
+}
+
+static inline void __xe_gt_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_gt *gt = p->arg;
+
+ xe_gt_info(gt, "%pV", vaf);
+}
+
+/**
+ * xe_gt_err_printer - Construct a &drm_printer that outputs to xe_gt_err()
+ * @gt: the &xe_gt pointer to use in xe_gt_err()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_gt_err_printer(struct xe_gt *gt)
+{
+ struct drm_printer p = {
+ .printfn = __xe_gt_printfn_err,
+ .arg = gt,
+ };
+ return p;
+}
+
+/**
+ * xe_gt_info_printer - Construct a &drm_printer that outputs to xe_gt_info()
+ * @gt: the &xe_gt pointer to use in xe_gt_info()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_gt_info_printer(struct xe_gt *gt)
+{
+ struct drm_printer p = {
+ .printfn = __xe_gt_printfn_info,
+ .arg = gt,
+ };
+ return p;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_printk.h b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
new file mode 100644
index 000000000000..17624b16300a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PRINTK_H_
+#define _XE_GT_SRIOV_PRINTK_H_
+
+#include "xe_gt_printk.h"
+#include "xe_sriov_printk.h"
+
+#define __xe_gt_sriov_printk(gt, _level, fmt, ...) \
+ xe_gt_printk((gt), _level, "%s" fmt, xe_sriov_printk_prefix(gt_to_xe(gt)), ##__VA_ARGS__)
+
+#define xe_gt_sriov_err(_gt, _fmt, ...) \
+ __xe_gt_sriov_printk(_gt, err, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_sriov_notice(_gt, _fmt, ...) \
+ __xe_gt_sriov_printk(_gt, notice, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_sriov_info(_gt, _fmt, ...) \
+ __xe_gt_sriov_printk(_gt, info, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_sriov_dbg(_gt, _fmt, ...) \
+ __xe_gt_sriov_printk(_gt, dbg, _fmt, ##__VA_ARGS__)
+
+/* for low level noisy debug messages */
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+#define xe_gt_sriov_dbg_verbose(_gt, _fmt, ...) xe_gt_sriov_dbg(_gt, _fmt, ##__VA_ARGS__)
+#else
+#define xe_gt_sriov_dbg_verbose(_gt, _fmt, ...) typecheck(struct xe_gt *, (_gt))
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 7eef23a00d77..e3a4131ebb58 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -8,6 +8,7 @@
#include "abi/guc_actions_abi.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_trace.h"
@@ -30,8 +31,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
break;
trace_xe_gt_tlb_invalidation_fence_timeout(fence);
- drm_err(&gt_to_xe(gt)->drm, "gt%d: TLB invalidation fence timeout, seqno=%d recv=%d",
- gt->info.id, fence->seqno, gt->tlb_invalidation.seqno_recv);
+ xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
+ fence->seqno, gt->tlb_invalidation.seqno_recv);
list_del(&fence->link);
fence->base.error = -ETIME;
@@ -312,9 +313,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
*/
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
{
- struct xe_device *xe = gt_to_xe(gt);
struct xe_guc *guc = &gt->uc.guc;
- struct drm_printer p = drm_err_printer(__func__);
int ret;
/*
@@ -325,8 +324,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
tlb_invalidation_seqno_past(gt, seqno),
TLB_TIMEOUT);
if (!ret) {
- drm_err(&xe->drm, "gt%d: TLB invalidation time'd out, seqno=%d, recv=%d\n",
- gt->info.id, seqno, gt->tlb_invalidation.seqno_recv);
+ struct drm_printer p = xe_gt_err_printer(gt);
+
+ xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
+ seqno, gt->tlb_invalidation.seqno_recv);
xe_guc_ct_print(&guc->ct, &p, true);
return -ETIME;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index a8d7f272c30a..5dc62fe1be49 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -84,7 +84,7 @@ void
xe_gt_topology_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- struct drm_printer p = drm_debug_printer("GT topology");
+ struct drm_printer p;
int num_geometry_regs, num_compute_regs;
get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs);
@@ -107,6 +107,8 @@ xe_gt_topology_init(struct xe_gt *gt)
XE2_GT_COMPUTE_DSS_2);
load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
+ p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
+
xe_gt_topology_dump(gt, &p);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index f74684660475..70c615dd1498 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -103,20 +103,22 @@ struct xe_gt {
/** @info: GT info */
struct {
- /** @type: type of GT */
+ /** @info.type: type of GT */
enum xe_gt_type type;
- /** @id: Unique ID of this GT within the PCI Device */
+ /** @info.id: Unique ID of this GT within the PCI Device */
u8 id;
- /** @reference_clock: clock frequency */
+ /** @info.reference_clock: clock frequency */
u32 reference_clock;
- /** @engine_mask: mask of engines present on GT */
+ /** @info.engine_mask: mask of engines present on GT */
u64 engine_mask;
/**
- * @__engine_mask: mask of engines present on GT read from
+ * @info.__engine_mask: mask of engines present on GT read from
* xe_pci.c, used to fake reading the engine_mask from the
* hwconfig blob.
*/
u64 __engine_mask;
+ /** @info.gmdid: raw GMD_ID value from hardware */
+ u32 gmdid;
} info;
/**
@@ -125,14 +127,14 @@ struct xe_gt {
* specific offset, as well as their own forcewake handling.
*/
struct {
- /** @fw: force wake for GT */
+ /** @mmio.fw: force wake for GT */
struct xe_force_wake fw;
/**
- * @adj_limit: adjust MMIO address if address is below this
+ * @mmio.adj_limit: adjust MMIO address if address is below this
* value
*/
u32 adj_limit;
- /** @adj_offset: offect to add to MMIO address when adjusting */
+ /** @mmio.adj_offset: offect to add to MMIO address when adjusting */
u32 adj_offset;
} mmio;
@@ -144,7 +146,7 @@ struct xe_gt {
/** @reset: state for GT resets */
struct {
/**
- * @worker: work so GT resets can done async allowing to reset
+ * @reset.worker: work so GT resets can done async allowing to reset
* code to safely flush all code paths
*/
struct work_struct worker;
@@ -152,36 +154,37 @@ struct xe_gt {
/** @tlb_invalidation: TLB invalidation state */
struct {
- /** @seqno: TLB invalidation seqno, protected by CT lock */
+ /** @tlb_invalidation.seqno: TLB invalidation seqno, protected by CT lock */
#define TLB_INVALIDATION_SEQNO_MAX 0x100000
int seqno;
/**
- * @seqno_recv: last received TLB invalidation seqno, protected by CT lock
+ * @tlb_invalidation.seqno_recv: last received TLB invalidation seqno,
+ * protected by CT lock
*/
int seqno_recv;
/**
- * @pending_fences: list of pending fences waiting TLB
+ * @tlb_invalidation.pending_fences: list of pending fences waiting TLB
* invaliations, protected by CT lock
*/
struct list_head pending_fences;
/**
- * @pending_lock: protects @pending_fences and updating
- * @seqno_recv.
+ * @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences
+ * and updating @tlb_invalidation.seqno_recv.
*/
spinlock_t pending_lock;
/**
- * @fence_tdr: schedules a delayed call to
+ * @tlb_invalidation.fence_tdr: schedules a delayed call to
* xe_gt_tlb_fence_timeout after the timeut interval is over.
*/
struct delayed_work fence_tdr;
- /** @fence_context: context for TLB invalidation fences */
+ /** @tlb_invalidation.fence_context: context for TLB invalidation fences */
u64 fence_context;
/**
- * @fence_seqno: seqno to TLB invalidation fences, protected by
+ * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
* tlb_invalidation.lock
*/
u32 fence_seqno;
- /** @lock: protects TLB invalidation fences */
+ /** @tlb_invalidation.lock: protects TLB invalidation fences */
spinlock_t lock;
} tlb_invalidation;
@@ -196,7 +199,7 @@ struct xe_gt {
/** @usm: unified shared memory state */
struct {
/**
- * @bb_pool: Pool from which batchbuffers, for USM operations
+ * @usm.bb_pool: Pool from which batchbuffers, for USM operations
* (e.g. migrations, fixing page tables), are allocated.
* Dedicated pool needed so USM operations to not get blocked
* behind any user operations which may have resulted in a
@@ -204,66 +207,67 @@ struct xe_gt {
*/
struct xe_sa_manager *bb_pool;
/**
- * @reserved_bcs_instance: reserved BCS instance used for USM
+ * @usm.reserved_bcs_instance: reserved BCS instance used for USM
* operations (e.g. mmigrations, fixing page tables)
*/
u16 reserved_bcs_instance;
- /** @pf_wq: page fault work queue, unbound, high priority */
+ /** @usm.pf_wq: page fault work queue, unbound, high priority */
struct workqueue_struct *pf_wq;
- /** @acc_wq: access counter work queue, unbound, high priority */
+ /** @usm.acc_wq: access counter work queue, unbound, high priority */
struct workqueue_struct *acc_wq;
/**
- * @pf_queue: Page fault queue used to sync faults so faults can
+ * @usm.pf_queue: Page fault queue used to sync faults so faults can
* be processed not under the GuC CT lock. The queue is sized so
* it can sync all possible faults (1 per physical engine).
* Multiple queues exists for page faults from different VMs are
* be processed in parallel.
*/
struct pf_queue {
- /** @gt: back pointer to GT */
+ /** @usm.pf_queue.gt: back pointer to GT */
struct xe_gt *gt;
#define PF_QUEUE_NUM_DW 128
- /** @data: data in the page fault queue */
+ /** @usm.pf_queue.data: data in the page fault queue */
u32 data[PF_QUEUE_NUM_DW];
/**
- * @head: head pointer in DWs for page fault queue,
- * moved by worker which processes faults.
+ * @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
+ * moved by worker which processes faults (consumer).
*/
- u16 head;
+ u16 tail;
/**
- * @tail: tail pointer in DWs for page fault queue,
- * moved by G2H handler.
+ * @usm.pf_queue.head: head pointer in DWs for page fault queue,
+ * moved by G2H handler (producer).
*/
- u16 tail;
- /** @lock: protects page fault queue */
+ u16 head;
+ /** @usm.pf_queue.lock: protects page fault queue */
spinlock_t lock;
- /** @worker: to process page faults */
+ /** @usm.pf_queue.worker: to process page faults */
struct work_struct worker;
#define NUM_PF_QUEUE 4
} pf_queue[NUM_PF_QUEUE];
/**
- * @acc_queue: Same as page fault queue, cannot process access
+ * @usm.acc_queue: Same as page fault queue, cannot process access
* counters under CT lock.
*/
struct acc_queue {
- /** @gt: back pointer to GT */
+ /** @usm.acc_queue.gt: back pointer to GT */
struct xe_gt *gt;
#define ACC_QUEUE_NUM_DW 128
- /** @data: data in the page fault queue */
+ /** @usm.acc_queue.data: data in the page fault queue */
u32 data[ACC_QUEUE_NUM_DW];
/**
- * @head: head pointer in DWs for page fault queue,
- * moved by worker which processes faults.
+ * @usm.acc_queue.tail: tail pointer in DWs for access counter queue,
+ * moved by worker which processes counters
+ * (consumer).
*/
- u16 head;
+ u16 tail;
/**
- * @tail: tail pointer in DWs for page fault queue,
- * moved by G2H handler.
+ * @usm.acc_queue.head: head pointer in DWs for access counter queue,
+ * moved by G2H handler (producer).
*/
- u16 tail;
- /** @lock: protects page fault queue */
+ u16 head;
+ /** @usm.acc_queue.lock: protects page fault queue */
spinlock_t lock;
- /** @worker: to process access counters */
+ /** @usm.acc_queue.worker: to process access counters */
struct work_struct worker;
#define NUM_ACC_QUEUE 4
} acc_queue[NUM_ACC_QUEUE];
@@ -300,7 +304,7 @@ struct xe_gt {
/** @pcode: GT's PCODE */
struct {
- /** @lock: protecting GT's PCODE mailbox data */
+ /** @pcode.lock: protecting GT's PCODE mailbox data */
struct mutex lock;
} pcode;
@@ -312,32 +316,32 @@ struct xe_gt {
/** @mocs: info */
struct {
- /** @uc_index: UC index */
+ /** @mocs.uc_index: UC index */
u8 uc_index;
- /** @wb_index: WB index, only used on L3_CCS platforms */
+ /** @mocs.wb_index: WB index, only used on L3_CCS platforms */
u8 wb_index;
} mocs;
/** @fuse_topo: GT topology reported by fuse registers */
struct {
- /** @g_dss_mask: dual-subslices usable by geometry */
+ /** @fuse_topo.g_dss_mask: dual-subslices usable by geometry */
xe_dss_mask_t g_dss_mask;
- /** @c_dss_mask: dual-subslices usable by compute */
+ /** @fuse_topo.c_dss_mask: dual-subslices usable by compute */
xe_dss_mask_t c_dss_mask;
- /** @eu_mask_per_dss: EU mask per DSS*/
+ /** @fuse_topo.eu_mask_per_dss: EU mask per DSS*/
xe_eu_mask_t eu_mask_per_dss;
} fuse_topo;
/** @steering: register steering for individual HW units */
struct {
- /* @ranges: register ranges used for this steering type */
+ /** @steering.ranges: register ranges used for this steering type */
const struct xe_mmio_range *ranges;
- /** @group_target: target to steer accesses to */
+ /** @steering.group_target: target to steer accesses to */
u16 group_target;
- /** @instance_target: instance to steer accesses to */
+ /** @steering.instance_target: instance to steer accesses to */
u16 instance_target;
} steering[NUM_STEERING_TYPES];
@@ -349,13 +353,13 @@ struct xe_gt {
/** @wa_active: keep track of active workarounds */
struct {
- /** @gt: bitmap with active GT workarounds */
+ /** @wa_active.gt: bitmap with active GT workarounds */
unsigned long *gt;
- /** @engine: bitmap with active engine workarounds */
+ /** @wa_active.engine: bitmap with active engine workarounds */
unsigned long *engine;
- /** @lrc: bitmap with active LRC workarounds */
+ /** @wa_active.lrc: bitmap with active LRC workarounds */
unsigned long *lrc;
- /** @oob: bitmap with active OOB workaroudns */
+ /** @wa_active.oob: bitmap with active OOB workaroudns */
unsigned long *oob;
} wa_active;
};
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 0a61390c64a7..0d2a2dd13f11 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -7,9 +7,10 @@
#include <drm/drm_managed.h>
+#include <generated/xe_wa_oob.h>
+
#include "abi/guc_actions_abi.h"
#include "abi/guc_errors_abi.h"
-#include "generated/xe_wa_oob.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_guc_regs.h"
#include "xe_bo.h"
@@ -21,9 +22,12 @@
#include "xe_guc_hwconfig.h"
#include "xe_guc_log.h"
#include "xe_guc_pc.h"
+#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
+#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_platform_types.h"
+#include "xe_sriov.h"
#include "xe_uc.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
@@ -129,22 +133,24 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc)
return flags;
}
+#define GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
+
static u32 guc_ctl_wa_flags(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_uc_fw *uc_fw = &guc->fw;
+ struct xe_uc_fw_version *version = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
+
u32 flags = 0;
if (XE_WA(gt, 22012773006))
flags |= GUC_WA_POLLCS;
- if (XE_WA(gt, 16011759253))
- flags |= GUC_WA_GAM_CREDITS;
-
if (XE_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
- if (XE_WA(gt, 22011391025) || XE_WA(gt, 14012197797))
+ if (XE_WA(gt, 22011391025))
flags |= GUC_WA_DUAL_QUEUE;
/*
@@ -155,9 +161,6 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (GRAPHICS_VERx100(xe) < 1270)
flags |= GUC_WA_PRE_PARSER;
- if (XE_WA(gt, 16011777198))
- flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
-
if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
flags |= GUC_WA_CONTEXT_ISOLATION;
@@ -168,6 +171,14 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (XE_WA(gt, 1509372804))
flags |= GUC_WA_RENDER_RST_RC6_EXIT;
+ if (XE_WA(gt, 14018913170)) {
+ if (GUC_VER(version->major, version->minor, version->patch) >= GUC_VER(70, 7, 0))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
+ else
+ drm_dbg(&xe->drm, "Skip WA 14018913170: GUC version expected >= 70.7.0, found %u.%u.%u\n",
+ version->major, version->minor, version->patch);
+ }
+
return flags;
}
@@ -241,11 +252,54 @@ static void guc_fini(struct drm_device *drm, void *arg)
struct xe_guc *guc = arg;
xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
- xe_guc_pc_fini(&guc->pc);
xe_uc_fini_hw(&guc_to_gt(guc)->uc);
xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
}
+/**
+ * xe_guc_comm_init_early - early initialization of GuC communication
+ * @guc: the &xe_guc to initialize
+ *
+ * Must be called prior to first MMIO communication with GuC firmware.
+ */
+void xe_guc_comm_init_early(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (xe_gt_is_media_type(gt))
+ guc->notify_reg = MED_GUC_HOST_INTERRUPT;
+ else
+ guc->notify_reg = GUC_HOST_INTERRUPT;
+}
+
+static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
+{
+ struct xe_tile *tile = gt_to_tile(guc_to_gt(guc));
+ struct xe_device *xe = guc_to_xe(guc);
+ int ret;
+
+ if (!IS_DGFX(guc_to_xe(guc)))
+ return 0;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo);
+ if (ret)
+ return ret;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo);
+ if (ret)
+ return ret;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo);
+ if (ret)
+ return ret;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
int xe_guc_init(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
@@ -272,7 +326,7 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
- ret = xe_guc_pc_init(&guc->pc);
+ ret = xe_guc_relay_init(&guc->relay);
if (ret)
goto out;
@@ -282,10 +336,7 @@ int xe_guc_init(struct xe_guc *guc)
guc_init_params(guc);
- if (xe_gt_is_media_type(gt))
- guc->notify_reg = MED_GUC_HOST_INTERRUPT;
- else
- guc->notify_reg = GUC_HOST_INTERRUPT;
+ xe_guc_comm_init_early(guc);
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
@@ -304,8 +355,18 @@ out:
*/
int xe_guc_init_post_hwconfig(struct xe_guc *guc)
{
+ int ret;
+
+ ret = xe_guc_realloc_post_hwconfig(guc);
+ if (ret)
+ return ret;
+
guc_init_params_post_hwconfig(guc);
+ ret = xe_guc_pc_init(&guc->pc);
+ if (ret)
+ return ret;
+
return xe_guc_ads_init_post_hwconfig(&guc->ads);
}
@@ -429,7 +490,6 @@ static int guc_wait_ucode(struct xe_guc *guc)
if (ret) {
struct drm_device *drm = &xe->drm;
- struct drm_printer p = drm_info_printer(drm->dev);
drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
@@ -451,8 +511,6 @@ static int guc_wait_ucode(struct xe_guc *guc)
SOFT_SCRATCH(13)));
ret = -ENXIO;
}
-
- xe_guc_log_print(&guc->log, &p);
} else {
drm_dbg(&xe->drm, "GuC successfully loaded");
}
@@ -516,6 +574,9 @@ int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
xe_guc_ads_populate_minimal(&guc->ads);
+ /* Raise GT freq to speed up HuC/GuC load */
+ xe_guc_pc_init_early(&guc->pc);
+
ret = __xe_guc_upload(guc);
if (ret)
return ret;
@@ -579,10 +640,20 @@ static void guc_enable_irq(struct xe_guc *guc)
int xe_guc_enable_communication(struct xe_guc *guc)
{
+ struct xe_device *xe = guc_to_xe(guc);
int err;
guc_enable_irq(guc);
+ if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
+ if (err)
+ return err;
+ }
+
xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
ARAT_EXPIRED_INTRMSK, 0);
@@ -650,7 +721,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
- xe_assert(xe, !guc->ct.enabled);
+ xe_assert(xe, !xe_guc_ct_enabled(&guc->ct));
xe_assert(xe, len);
xe_assert(xe, len <= VF_SW_FLAG_COUNT);
xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
@@ -707,8 +778,12 @@ timeout:
if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
GUC_HXG_ORIGIN_GUC))
goto proto;
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
+ GUC_HXG_TYPE_NO_RESPONSE_BUSY)
+ goto proto;
goto timeout;
+ }
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
@@ -832,7 +907,7 @@ int xe_guc_stop(struct xe_guc *guc)
{
int ret;
- xe_guc_ct_disable(&guc->ct);
+ xe_guc_ct_stop(&guc->ct);
ret = xe_guc_submit_stop(guc);
if (ret)
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index d3e49e7fd7c3..94f2dc5f6f90 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -13,6 +13,7 @@
struct drm_printer;
+void xe_guc_comm_init_early(struct xe_guc *guc);
int xe_guc_init(struct xe_guc *guc);
int xe_guc_init_post_hwconfig(struct xe_guc *guc);
int xe_guc_post_load_init(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 390e6f1bf4e1..6ad4c1a90a78 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -273,7 +273,7 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
ads->regset_size = calculate_regset_size(gt);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 24a33fa36496..355edd4d758a 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -9,16 +9,21 @@
#include <linux/circ_buf.h>
#include <linux/delay.h>
+#include <kunit/static_stub.h>
+
#include <drm/drm_managed.h>
#include "abi/guc_actions_abi.h"
+#include "abi/guc_actions_sriov_abi.h"
#include "abi/guc_klvs_abi.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
+#include "xe_gt_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
+#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_pm.h"
@@ -28,6 +33,7 @@
struct g2h_fence {
u32 *response_buffer;
u32 seqno;
+ u32 response_data;
u16 response_len;
u16 error;
u16 hint;
@@ -40,6 +46,7 @@ struct g2h_fence {
static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{
g2h_fence->response_buffer = response_buffer;
+ g2h_fence->response_data = 0;
g2h_fence->response_len = 0;
g2h_fence->fail = false;
g2h_fence->retry = false;
@@ -148,7 +155,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
primelockdep(ct);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -159,6 +166,8 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
if (err)
return err;
+ xe_assert(xe, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
+ ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
@@ -278,12 +287,35 @@ static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
return ret > 0 ? -EPROTO : ret;
}
+static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
+ enum xe_guc_ct_state state)
+{
+ mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
+ spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
+
+ xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
+ state == XE_GUC_CT_STATE_STOPPED);
+
+ ct->g2h_outstanding = 0;
+ ct->state = state;
+
+ spin_unlock_irq(&ct->fast_lock);
+
+ /*
+ * Lockdep doesn't like this under the fast lock and he destroy only
+ * needs to be serialized with the send path which ct lock provides.
+ */
+ xa_destroy(&ct->fence_lookup);
+
+ mutex_unlock(&ct->lock);
+}
+
int xe_guc_ct_enable(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
int err;
- xe_assert(xe, !ct->enabled);
+ xe_assert(xe, !xe_guc_ct_enabled(ct));
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
@@ -300,12 +332,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
if (err)
goto err_out;
- mutex_lock(&ct->lock);
- spin_lock_irq(&ct->fast_lock);
- ct->g2h_outstanding = 0;
- ct->enabled = true;
- spin_unlock_irq(&ct->fast_lock);
- mutex_unlock(&ct->lock);
+ xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
smp_mb();
wake_up_all(&ct->wq);
@@ -319,15 +346,34 @@ err_out:
return err;
}
+static void stop_g2h_handler(struct xe_guc_ct *ct)
+{
+ cancel_work_sync(&ct->g2h_worker);
+}
+
+/**
+ * xe_guc_ct_disable - Set GuC to disabled state
+ * @ct: the &xe_guc_ct
+ *
+ * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
+ * in this transition.
+ */
void xe_guc_ct_disable(struct xe_guc_ct *ct)
{
- mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
- spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
- ct->enabled = false; /* Finally disable CT communication */
- spin_unlock_irq(&ct->fast_lock);
- mutex_unlock(&ct->lock);
+ xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
+ stop_g2h_handler(ct);
+}
- xa_destroy(&ct->fence_lookup);
+/**
+ * xe_guc_ct_stop - Set GuC to stopped state
+ * @ct: the &xe_guc_ct
+ *
+ * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
+ */
+void xe_guc_ct_stop(struct xe_guc_ct *ct)
+{
+ xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
+ stop_g2h_handler(ct);
}
static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
@@ -448,7 +494,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
} else {
cmd[1] =
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
}
@@ -475,13 +521,34 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
return 0;
}
+/*
+ * The CT protocol accepts a 16 bits fence. This field is fully owned by the
+ * driver, the GuC will just copy it to the reply message. Since we need to
+ * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
+ * we use one bit of the seqno as an indicator for that and a rolling counter
+ * for the remaining 15 bits.
+ */
+#define CT_SEQNO_MASK GENMASK(14, 0)
+#define CT_SEQNO_UNTRACKED BIT(15)
+static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
+{
+ u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
+
+ if (!is_g2h_fence)
+ seqno |= CT_SEQNO_UNTRACKED;
+
+ return seqno;
+}
+
static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
struct xe_device *xe = ct_to_xe(ct);
+ u16 seqno;
int ret;
+ xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
xe_assert(xe, !g2h_len || !g2h_fence);
xe_assert(xe, !num_g2h || !g2h_fence);
xe_assert(xe, !g2h_len || num_g2h);
@@ -493,11 +560,18 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
goto out;
}
- if (unlikely(!ct->enabled)) {
+ if (ct->state == XE_GUC_CT_STATE_DISABLED) {
ret = -ENODEV;
goto out;
}
+ if (ct->state == XE_GUC_CT_STATE_STOPPED) {
+ ret = -ECANCELED;
+ goto out;
+ }
+
+ xe_assert(xe, xe_guc_ct_enabled(ct));
+
if (g2h_fence) {
g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
num_g2h = 1;
@@ -505,7 +579,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
if (g2h_fence_needs_alloc(g2h_fence)) {
void *ptr;
- g2h_fence->seqno = (ct->fence_seqno++ & 0xffff);
+ g2h_fence->seqno = next_ct_seqno(ct, true);
ptr = xa_store(&ct->fence_lookup,
g2h_fence->seqno,
g2h_fence, GFP_ATOMIC);
@@ -514,6 +588,10 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
goto out;
}
}
+
+ seqno = g2h_fence->seqno;
+ } else {
+ seqno = next_ct_seqno(ct, false);
}
if (g2h_len)
@@ -523,8 +601,7 @@ retry:
if (unlikely(ret))
goto out_unlock;
- ret = h2g_write(ct, action, len, g2h_fence ? g2h_fence->seqno : 0,
- !!g2h_fence);
+ ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
if (unlikely(ret)) {
if (ret == -EAGAIN)
goto retry;
@@ -682,7 +759,8 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
return false;
#define ct_alive(ct) \
- (ct->enabled && !ct->ctbs.h2g.info.broken && !ct->ctbs.g2h.info.broken)
+ (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
+ !ct->ctbs.g2h.info.broken)
if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
return false;
#undef ct_alive
@@ -752,12 +830,31 @@ retry_same_fence:
ret = -EIO;
}
- return ret > 0 ? 0 : ret;
+ return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret;
}
+/**
+ * xe_guc_ct_send_recv - Send and receive HXG to the GuC
+ * @ct: the &xe_guc_ct
+ * @action: the dword array with `HXG Request`_ message (can't be NULL)
+ * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
+ * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
+ *
+ * Send a `HXG Request`_ message to the GuC over CT communication channel and
+ * blocks until GuC replies with a `HXG Response`_ message.
+ *
+ * For non-blocking communication with GuC use xe_guc_ct_send().
+ *
+ * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
+ *
+ * Return: response length (in dwords) if &response_buffer was not NULL, or
+ * DATA0 from `HXG Response`_ if &response_buffer was NULL, or
+ * a negative error code on failure.
+ */
int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buffer)
{
+ KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
return guc_ct_send_recv(ct, action, len, response_buffer, false);
}
@@ -767,9 +864,20 @@ int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
return guc_ct_send_recv(ct, action, len, response_buffer, true);
}
+static u32 *msg_to_hxg(u32 *msg)
+{
+ return msg + GUC_CTB_MSG_MIN_LEN;
+}
+
+static u32 msg_len_to_hxg_len(u32 len)
+{
+ return len - GUC_CTB_MSG_MIN_LEN;
+}
+
static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
lockdep_assert_held(&ct->lock);
@@ -786,18 +894,41 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- struct xe_device *xe = ct_to_xe(ct);
- u32 response_len = len - GUC_CTB_MSG_MIN_LEN;
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 hxg_len = msg_len_to_hxg_len(len);
u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
- u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]);
+ u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
struct g2h_fence *g2h_fence;
lockdep_assert_held(&ct->lock);
+ /*
+ * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
+ * Those messages should never fail, so if we do get an error back it
+ * means we're likely doing an illegal operation and the GuC is
+ * rejecting it. We have no way to inform the code that submitted the
+ * H2G that the message was rejected, so we need to escalate the
+ * failure to trigger a reset.
+ */
+ if (fence & CT_SEQNO_UNTRACKED) {
+ if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
+ xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
+ fence,
+ FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
+ FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
+ else
+ xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
+ type, fence);
+
+ return -EPROTO;
+ }
+
g2h_fence = xa_erase(&ct->fence_lookup, fence);
if (unlikely(!g2h_fence)) {
/* Don't tear down channel, as send could've timed out */
- drm_warn(&xe->drm, "G2H fence (%u) not found!\n", fence);
+ xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
return 0;
}
@@ -806,18 +937,16 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
g2h_fence->fail = true;
- g2h_fence->error =
- FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[1]);
- g2h_fence->hint =
- FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[1]);
+ g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
+ g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
} else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
g2h_fence->retry = true;
- g2h_fence->reason =
- FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, msg[1]);
+ g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
} else if (g2h_fence->response_buffer) {
- g2h_fence->response_len = response_len;
- memcpy(g2h_fence->response_buffer, msg + GUC_CTB_MSG_MIN_LEN,
- response_len * sizeof(u32));
+ g2h_fence->response_len = hxg_len;
+ memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
+ } else {
+ g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
}
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
@@ -833,14 +962,13 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_device *xe = ct_to_xe(ct);
- u32 hxg, origin, type;
+ u32 *hxg = msg_to_hxg(msg);
+ u32 origin, type;
int ret;
lockdep_assert_held(&ct->lock);
- hxg = msg[1];
-
- origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg);
+ origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
drm_err(&xe->drm,
"G2H channel broken on read, origin=%d, reset required\n",
@@ -850,7 +978,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
return -EPROTO;
}
- type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg);
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
switch (type) {
case GUC_HXG_TYPE_EVENT:
ret = parse_g2h_event(ct, msg, len);
@@ -876,14 +1004,19 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_guc *guc = ct_to_guc(ct);
- u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
- u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN;
- u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN;
+ u32 hxg_len = msg_len_to_hxg_len(len);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 action, adj_len;
+ u32 *payload;
int ret = 0;
- if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT)
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
return 0;
+ action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+ payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
+ adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
+
switch (action) {
case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
ret = xe_guc_sched_done_handler(guc, payload, adj_len);
@@ -920,6 +1053,12 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
ret = xe_guc_access_counter_notify_handler(guc, payload,
adj_len);
break;
+ case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
+ ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
+ ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
+ break;
default:
drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
}
@@ -938,15 +1077,22 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
u32 tail, head, len;
s32 avail;
u32 action;
+ u32 *hxg;
+ xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
lockdep_assert_held(&ct->fast_lock);
- if (!ct->enabled)
+ if (ct->state == XE_GUC_CT_STATE_DISABLED)
return -ENODEV;
+ if (ct->state == XE_GUC_CT_STATE_STOPPED)
+ return -ECANCELED;
+
if (g2h->info.broken)
return -EPIPE;
+ xe_assert(xe, xe_guc_ct_enabled(ct));
+
/* Calculate DW available to read */
tail = desc_read(xe, g2h, tail);
avail = tail - g2h->info.head;
@@ -988,10 +1134,11 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
avail * sizeof(u32));
}
- action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
+ hxg = msg_to_hxg(msg);
+ action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
if (fast_path) {
- if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT)
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
return 0;
switch (action) {
@@ -1017,9 +1164,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_guc *guc = ct_to_guc(ct);
- u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
- u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN;
- u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN;
+ u32 hxg_len = msg_len_to_hxg_len(len);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+ u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
+ u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
int ret = 0;
switch (action) {
@@ -1245,7 +1394,7 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
return NULL;
}
- if (ct->enabled) {
+ if (xe_guc_ct_enabled(ct)) {
snapshot->ct_enabled = true;
snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
@@ -1271,7 +1420,7 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
return;
if (snapshot->ct_enabled) {
- drm_puts(p, "\nH2G CTB (all sizes in DW):\n");
+ drm_puts(p, "H2G CTB (all sizes in DW):\n");
guc_ctb_snapshot_print(&snapshot->h2g, p);
drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
@@ -1280,7 +1429,7 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
drm_printf(p, "\tg2h outstanding: %d\n",
snapshot->g2h_outstanding);
} else {
- drm_puts(p, "\nCT disabled\n");
+ drm_puts(p, "CT disabled\n");
}
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index f15f8a4857e0..5083e099064f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -13,6 +13,7 @@ struct drm_printer;
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
+void xe_guc_ct_stop(struct xe_guc_ct *ct);
void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
struct xe_guc_ct_snapshot *
@@ -22,11 +23,18 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic);
+static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
+{
+ return ct->state == XE_GUC_CT_STATE_ENABLED;
+}
+
static inline void xe_guc_ct_irq_handler(struct xe_guc_ct *ct)
{
+ if (!xe_guc_ct_enabled(ct))
+ return;
+
wake_up_all(&ct->wq);
- if (ct->enabled)
- queue_work(system_unbound_wq, &ct->g2h_worker);
+ queue_work(system_unbound_wq, &ct->g2h_worker);
xe_guc_ct_fast_path(ct);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index d814d4ee3fc6..d29144c9f20b 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -73,6 +73,20 @@ struct xe_guc_ct_snapshot {
};
/**
+ * enum xe_guc_ct_state - CT state
+ * @XE_GUC_CT_STATE_NOT_INITIALIZED: CT not initialized, messages not expected in this state
+ * @XE_GUC_CT_STATE_DISABLED: CT disabled, messages not expected in this state
+ * @XE_GUC_CT_STATE_STOPPED: CT stopped, drop messages without errors
+ * @XE_GUC_CT_STATE_ENABLED: CT enabled, messages sent / received in this state
+ */
+enum xe_guc_ct_state {
+ XE_GUC_CT_STATE_NOT_INITIALIZED = 0,
+ XE_GUC_CT_STATE_DISABLED,
+ XE_GUC_CT_STATE_STOPPED,
+ XE_GUC_CT_STATE_ENABLED,
+};
+
+/**
* struct xe_guc_ct - GuC command transport (CT) layer
*
* Includes a pair of CT buffers for bi-directional communication and tracking
@@ -87,17 +101,17 @@ struct xe_guc_ct {
spinlock_t fast_lock;
/** @ctbs: buffers for sending and receiving commands */
struct {
- /** @send: Host to GuC (H2G, send) channel */
+ /** @ctbs.send: Host to GuC (H2G, send) channel */
struct guc_ctb h2g;
- /** @recv: GuC to Host (G2H, receive) channel */
+ /** @ctbs.recv: GuC to Host (G2H, receive) channel */
struct guc_ctb g2h;
} ctbs;
/** @g2h_outstanding: number of outstanding G2H */
u32 g2h_outstanding;
/** @g2h_worker: worker to process G2H messages */
struct work_struct g2h_worker;
- /** @enabled: CT enabled */
- bool enabled;
+ /** @state: CT state */
+ enum xe_guc_ct_state state;
/** @fence_seqno: G2H fence seqno - 16 bits used by CT */
u32 fence_seqno;
/** @fence_lookup: G2H fence lookup */
diff --git a/drivers/gpu/drm/xe/xe_guc_db_mgr.c b/drivers/gpu/drm/xe/xe_guc_db_mgr.c
new file mode 100644
index 000000000000..8d9a0287df6b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_db_mgr.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_guc_regs.h"
+
+#include "xe_assert.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_db_mgr.h"
+#include "xe_guc_types.h"
+
+/**
+ * DOC: GuC Doorbells
+ *
+ * The GFX doorbell solution provides a mechanism for submission of workload
+ * to the graphics hardware by a ring3 application without the penalty of
+ * ring transition for each workload submission.
+ *
+ * In SR-IOV mode, the doorbells are treated as shared resource and PF must
+ * be able to provision exclusive range of IDs across VFs, which may want to
+ * use this feature.
+ */
+
+static struct xe_guc *dbm_to_guc(struct xe_guc_db_mgr *dbm)
+{
+ return container_of(dbm, struct xe_guc, dbm);
+}
+
+static struct xe_gt *dbm_to_gt(struct xe_guc_db_mgr *dbm)
+{
+ return guc_to_gt(dbm_to_guc(dbm));
+}
+
+static struct xe_device *dbm_to_xe(struct xe_guc_db_mgr *dbm)
+{
+ return gt_to_xe(dbm_to_gt(dbm));
+}
+
+#define dbm_assert(_dbm, _cond) xe_gt_assert(dbm_to_gt(_dbm), _cond)
+#define dbm_mutex(_dbm) (&dbm_to_guc(_dbm)->submission_state.lock)
+
+static void dbm_print_locked(struct xe_guc_db_mgr *dbm, struct drm_printer *p, int indent);
+
+static void __fini_dbm(struct drm_device *drm, void *arg)
+{
+ struct xe_guc_db_mgr *dbm = arg;
+ unsigned int weight;
+
+ mutex_lock(dbm_mutex(dbm));
+
+ weight = bitmap_weight(dbm->bitmap, dbm->count);
+ if (weight) {
+ struct drm_printer p = xe_gt_info_printer(dbm_to_gt(dbm));
+
+ xe_gt_err(dbm_to_gt(dbm), "GuC doorbells manager unclean (%u/%u)\n",
+ weight, dbm->count);
+ dbm_print_locked(dbm, &p, 1);
+ }
+
+ bitmap_free(dbm->bitmap);
+ dbm->bitmap = NULL;
+ dbm->count = 0;
+
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+/**
+ * xe_guc_db_mgr_init() - Initialize GuC Doorbells Manager.
+ * @dbm: the &xe_guc_db_mgr to initialize
+ * @count: number of doorbells to manage
+ *
+ * The bare-metal or PF driver can pass ~0 as &count to indicate that all
+ * doorbells supported by the hardware are available for use.
+ *
+ * Only VF's drivers will have to provide explicit number of doorbells IDs
+ * that they can use.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_db_mgr_init(struct xe_guc_db_mgr *dbm, unsigned int count)
+{
+ int ret;
+
+ if (count == ~0)
+ count = GUC_NUM_DOORBELLS;
+
+ dbm_assert(dbm, !dbm->bitmap);
+ dbm_assert(dbm, count <= GUC_NUM_DOORBELLS);
+
+ if (!count)
+ goto done;
+
+ dbm->bitmap = bitmap_zalloc(count, GFP_KERNEL);
+ if (!dbm->bitmap)
+ return -ENOMEM;
+ dbm->count = count;
+
+ ret = drmm_add_action_or_reset(&dbm_to_xe(dbm)->drm, __fini_dbm, dbm);
+ if (ret)
+ return ret;
+done:
+ xe_gt_dbg(dbm_to_gt(dbm), "using %u doorbell(s)\n", dbm->count);
+ return 0;
+}
+
+static int dbm_reserve_chunk_locked(struct xe_guc_db_mgr *dbm,
+ unsigned int count, unsigned int spare)
+{
+ unsigned int used;
+ int index;
+
+ dbm_assert(dbm, count);
+ dbm_assert(dbm, count <= GUC_NUM_DOORBELLS);
+ dbm_assert(dbm, dbm->count <= GUC_NUM_DOORBELLS);
+ lockdep_assert_held(dbm_mutex(dbm));
+
+ if (!dbm->count)
+ return -ENODATA;
+
+ if (spare) {
+ used = bitmap_weight(dbm->bitmap, dbm->count);
+ if (used + count + spare > dbm->count)
+ return -EDQUOT;
+ }
+
+ index = bitmap_find_next_zero_area(dbm->bitmap, dbm->count, 0, count, 0);
+ if (index >= dbm->count)
+ return -ENOSPC;
+
+ bitmap_set(dbm->bitmap, index, count);
+
+ return index;
+}
+
+static void dbm_release_chunk_locked(struct xe_guc_db_mgr *dbm,
+ unsigned int start, unsigned int count)
+{
+ dbm_assert(dbm, count);
+ dbm_assert(dbm, count <= GUC_NUM_DOORBELLS);
+ dbm_assert(dbm, dbm->count);
+ dbm_assert(dbm, dbm->count <= GUC_NUM_DOORBELLS);
+ lockdep_assert_held(dbm_mutex(dbm));
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ unsigned int n;
+
+ for (n = 0; n < count; n++)
+ dbm_assert(dbm, test_bit(start + n, dbm->bitmap));
+ }
+ bitmap_clear(dbm->bitmap, start, count);
+}
+
+/**
+ * xe_guc_db_mgr_reserve_id_locked() - Reserve a single GuC Doorbell ID.
+ * @dbm: the &xe_guc_db_mgr
+ *
+ * This function expects that submission lock is already taken.
+ *
+ * Return: ID of the allocated GuC doorbell or a negative error code on failure.
+ */
+int xe_guc_db_mgr_reserve_id_locked(struct xe_guc_db_mgr *dbm)
+{
+ return dbm_reserve_chunk_locked(dbm, 1, 0);
+}
+
+/**
+ * xe_guc_db_mgr_release_id_locked() - Release a single GuC Doorbell ID.
+ * @dbm: the &xe_guc_db_mgr
+ * @id: the GuC Doorbell ID to release
+ *
+ * This function expects that submission lock is already taken.
+ */
+void xe_guc_db_mgr_release_id_locked(struct xe_guc_db_mgr *dbm, unsigned int id)
+{
+ return dbm_release_chunk_locked(dbm, id, 1);
+}
+
+/**
+ * xe_guc_db_mgr_reserve_range() - Reserve a range of GuC Doorbell IDs.
+ * @dbm: the &xe_guc_db_mgr
+ * @count: number of GuC doorbell IDs to reserve
+ * @spare: number of GuC doorbell IDs to keep available
+ *
+ * This function is dedicated for the for use by the PF which expects that
+ * allocated range for the VF will be contiguous and that there will be at
+ * least &spare IDs still available for the PF use after this reservation.
+ *
+ * Return: starting ID of the allocated GuC doorbell ID range or
+ * a negative error code on failure.
+ */
+int xe_guc_db_mgr_reserve_range(struct xe_guc_db_mgr *dbm,
+ unsigned int count, unsigned int spare)
+{
+ int ret;
+
+ mutex_lock(dbm_mutex(dbm));
+ ret = dbm_reserve_chunk_locked(dbm, count, spare);
+ mutex_unlock(dbm_mutex(dbm));
+
+ return ret;
+}
+
+/**
+ * xe_guc_db_mgr_release_range() - Release a range of Doorbell IDs.
+ * @dbm: the &xe_guc_db_mgr
+ * @start: the starting ID of GuC doorbell ID range to release
+ * @count: number of GuC doorbell IDs to release
+ */
+void xe_guc_db_mgr_release_range(struct xe_guc_db_mgr *dbm,
+ unsigned int start, unsigned int count)
+{
+ mutex_lock(dbm_mutex(dbm));
+ dbm_release_chunk_locked(dbm, start, count);
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+static void dbm_print_locked(struct xe_guc_db_mgr *dbm, struct drm_printer *p, int indent)
+{
+ unsigned int rs, re;
+ unsigned int total;
+
+ drm_printf_indent(p, indent, "count: %u\n", dbm->count);
+ if (!dbm->bitmap)
+ return;
+
+ total = 0;
+ for_each_clear_bitrange(rs, re, dbm->bitmap, dbm->count) {
+ drm_printf_indent(p, indent, "available range: %u..%u (%u)\n",
+ rs, re - 1, re - rs);
+ total += re - rs;
+ }
+ drm_printf_indent(p, indent, "available total: %u\n", total);
+
+ total = 0;
+ for_each_set_bitrange(rs, re, dbm->bitmap, dbm->count) {
+ drm_printf_indent(p, indent, "reserved range: %u..%u (%u)\n",
+ rs, re - 1, re - rs);
+ total += re - rs;
+ }
+ drm_printf_indent(p, indent, "reserved total: %u\n", total);
+}
+
+/**
+ * xe_guc_db_mgr_print() - Print status of GuC Doorbells Manager.
+ * @dbm: the &xe_guc_db_mgr to print
+ * @p: the &drm_printer to print to
+ * @indent: tab indentation level
+ */
+void xe_guc_db_mgr_print(struct xe_guc_db_mgr *dbm,
+ struct drm_printer *p, int indent)
+{
+ mutex_lock(dbm_mutex(dbm));
+ dbm_print_locked(dbm, p, indent);
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_db_mgr_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_db_mgr.h b/drivers/gpu/drm/xe/xe_guc_db_mgr.h
new file mode 100644
index 000000000000..c250fa0ca9d6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_db_mgr.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GUC_DB_MGR_H_
+#define _XE_GUC_DB_MGR_H_
+
+struct drm_printer;
+struct xe_guc_db_mgr;
+
+int xe_guc_db_mgr_init(struct xe_guc_db_mgr *dbm, unsigned int count);
+
+int xe_guc_db_mgr_reserve_id_locked(struct xe_guc_db_mgr *dbm);
+void xe_guc_db_mgr_release_id_locked(struct xe_guc_db_mgr *dbm, unsigned int id);
+
+int xe_guc_db_mgr_reserve_range(struct xe_guc_db_mgr *dbm, unsigned int count, unsigned int spare);
+void xe_guc_db_mgr_release_range(struct xe_guc_db_mgr *dbm, unsigned int start, unsigned int count);
+
+void xe_guc_db_mgr_print(struct xe_guc_db_mgr *dbm, struct drm_printer *p, int indent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 4dd5a88a7826..c281fdbfd2d6 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -97,6 +97,7 @@ struct guc_update_exec_queue_policy {
#define GUC_WA_POLLCS BIT(18)
#define GUC_WA_RENDER_RST_RC6_EXIT BIT(19)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
+#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
index 2a13a00917f8..ea49f3885c10 100644
--- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
@@ -78,7 +78,7 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
return -EINVAL;
bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_hxg_helpers.h b/drivers/gpu/drm/xe/xe_guc_hxg_helpers.h
new file mode 100644
index 000000000000..aeeb573c6842
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_hxg_helpers.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GUC_HXG_HELPERS_H_
+#define _XE_GUC_HXG_HELPERS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "abi/guc_messages_abi.h"
+
+/**
+ * hxg_sizeof - Queries size of the object or type (in HXG units).
+ * @T: the object or type
+ *
+ * Force a compilation error if actual size is not aligned to HXG unit (u32).
+ *
+ * Return: size in dwords (u32).
+ */
+#define hxg_sizeof(T) (sizeof(T) / sizeof(u32) + BUILD_BUG_ON_ZERO(sizeof(T) % sizeof(u32)))
+
+static inline const char *guc_hxg_type_to_string(unsigned int type)
+{
+ switch (type) {
+ case GUC_HXG_TYPE_REQUEST:
+ return "request";
+ case GUC_HXG_TYPE_FAST_REQUEST:
+ return "fast-request";
+ case GUC_HXG_TYPE_EVENT:
+ return "event";
+ case GUC_HXG_TYPE_NO_RESPONSE_BUSY:
+ return "busy";
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
+ return "retry";
+ case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ return "failure";
+ case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+ return "response";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool guc_hxg_type_is_action(unsigned int type)
+{
+ switch (type) {
+ case GUC_HXG_TYPE_REQUEST:
+ case GUC_HXG_TYPE_FAST_REQUEST:
+ case GUC_HXG_TYPE_EVENT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool guc_hxg_type_is_reply(unsigned int type)
+{
+ switch (type) {
+ case GUC_HXG_TYPE_NO_RESPONSE_BUSY:
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
+ case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline u32 guc_hxg_msg_encode_success(u32 *msg, u32 data0)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
+ FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, data0);
+
+ return GUC_HXG_RESPONSE_MSG_MIN_LEN;
+}
+
+static inline u32 guc_hxg_msg_encode_failure(u32 *msg, u32 error, u32 hint)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_FAILURE) |
+ FIELD_PREP(GUC_HXG_FAILURE_MSG_0_HINT, hint) |
+ FIELD_PREP(GUC_HXG_FAILURE_MSG_0_ERROR, error);
+
+ return GUC_HXG_FAILURE_MSG_LEN;
+}
+
+static inline u32 guc_hxg_msg_encode_busy(u32 *msg, u32 counter)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_NO_RESPONSE_BUSY) |
+ FIELD_PREP(GUC_HXG_BUSY_MSG_0_COUNTER, counter);
+
+ return GUC_HXG_BUSY_MSG_LEN;
+}
+
+static inline u32 guc_hxg_msg_encode_retry(u32 *msg, u32 reason)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_NO_RESPONSE_RETRY) |
+ FIELD_PREP(GUC_HXG_RETRY_MSG_0_REASON, reason);
+
+ return GUC_HXG_RETRY_MSG_LEN;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index bcd2f4d34081..45135c3520e5 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -84,7 +84,7 @@ int xe_guc_log_init(struct xe_guc_log *log)
struct xe_bo *bo;
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index d91702592520..2839d685631b 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -956,10 +956,12 @@ out:
/**
* xe_guc_pc_fini - Finalize GuC's Power Conservation component
- * @pc: Xe_GuC_PC instance
+ * @drm: DRM device
+ * @arg: opaque pointer that should point to Xe_GuC_PC instance
*/
-void xe_guc_pc_fini(struct xe_guc_pc *pc)
+static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
{
+ struct xe_guc_pc *pc = arg;
struct xe_device *xe = pc_to_xe(pc);
if (xe->info.skip_guc_pc) {
@@ -969,9 +971,10 @@ void xe_guc_pc_fini(struct xe_guc_pc *pc)
return;
}
+ xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
XE_WARN_ON(xe_guc_pc_stop(pc));
- mutex_destroy(&pc->freq_lock);
+ xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
}
/**
@@ -985,11 +988,14 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+ int err;
if (xe->info.skip_guc_pc)
return 0;
- mutex_init(&pc->freq_lock);
+ err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
+ if (err)
+ return err;
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
@@ -998,5 +1004,10 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
return PTR_ERR(bo);
pc->bo = bo;
+
+ err = drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index cecad8e9300b..d3680d89490e 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -9,7 +9,6 @@
#include "xe_guc_pc_types.h"
int xe_guc_pc_init(struct xe_guc_pc *pc);
-void xe_guc_pc_fini(struct xe_guc_pc *pc);
int xe_guc_pc_start(struct xe_guc_pc *pc);
int xe_guc_pc_stop(struct xe_guc_pc *pc);
int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
new file mode 100644
index 000000000000..c0a2d8d5d3b3
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_relay.c
@@ -0,0 +1,941 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+
+#include <kunit/static_stub.h>
+#include <kunit/test-bug.h>
+
+#include "abi/guc_actions_sriov_abi.h"
+#include "abi/guc_relay_actions_abi.h"
+#include "abi/guc_relay_communication_abi.h"
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_hxg_helpers.h"
+#include "xe_guc_relay.h"
+#include "xe_guc_relay_types.h"
+#include "xe_sriov.h"
+
+/*
+ * How long should we wait for the response?
+ * XXX this value is subject for the profiling.
+ */
+#define RELAY_TIMEOUT_MSEC (2500)
+
+static void relays_worker_fn(struct work_struct *w);
+
+static struct xe_guc *relay_to_guc(struct xe_guc_relay *relay)
+{
+ return container_of(relay, struct xe_guc, relay);
+}
+
+static struct xe_guc_ct *relay_to_ct(struct xe_guc_relay *relay)
+{
+ return &relay_to_guc(relay)->ct;
+}
+
+static struct xe_gt *relay_to_gt(struct xe_guc_relay *relay)
+{
+ return guc_to_gt(relay_to_guc(relay));
+}
+
+static struct xe_device *relay_to_xe(struct xe_guc_relay *relay)
+{
+ return gt_to_xe(relay_to_gt(relay));
+}
+
+#define relay_assert(relay, condition) xe_gt_assert(relay_to_gt(relay), condition)
+#define relay_notice(relay, msg...) xe_gt_sriov_notice(relay_to_gt(relay), "relay: " msg)
+#define relay_debug(relay, msg...) xe_gt_sriov_dbg_verbose(relay_to_gt(relay), "relay: " msg)
+
+static int relay_get_totalvfs(struct xe_guc_relay *relay)
+{
+ struct xe_device *xe = relay_to_xe(relay);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ KUNIT_STATIC_STUB_REDIRECT(relay_get_totalvfs, relay);
+ return IS_SRIOV_VF(xe) ? 0 : pci_sriov_get_totalvfs(pdev);
+}
+
+static bool relay_is_ready(struct xe_guc_relay *relay)
+{
+ return mempool_initialized(&relay->pool);
+}
+
+static u32 relay_get_next_rid(struct xe_guc_relay *relay)
+{
+ u32 rid;
+
+ spin_lock(&relay->lock);
+ rid = ++relay->last_rid;
+ spin_unlock(&relay->lock);
+
+ return rid;
+}
+
+/**
+ * struct relay_transaction - internal data used to handle transactions
+ *
+ * Relation between struct relay_transaction members::
+ *
+ * <-------------------- GUC_CTB_MAX_DWORDS -------------->
+ * <-------- GUC_RELAY_MSG_MAX_LEN --->
+ * <--- offset ---> <--- request_len ------->
+ * +----------------+-------------------------+----------+--+
+ * | | | | |
+ * +----------------+-------------------------+----------+--+
+ * ^ ^
+ * / /
+ * request_buf request
+ *
+ * <-------------------- GUC_CTB_MAX_DWORDS -------------->
+ * <-------- GUC_RELAY_MSG_MAX_LEN --->
+ * <--- offset ---> <--- response_len --->
+ * +----------------+----------------------+-------------+--+
+ * | | | | |
+ * +----------------+----------------------+-------------+--+
+ * ^ ^
+ * / /
+ * response_buf response
+ */
+struct relay_transaction {
+ /**
+ * @incoming: indicates whether this transaction represents an incoming
+ * request from the remote VF/PF or this transaction
+ * represents outgoing request to the remote VF/PF.
+ */
+ bool incoming;
+
+ /**
+ * @remote: PF/VF identifier of the origin (or target) of the relay
+ * request message.
+ */
+ u32 remote;
+
+ /** @rid: identifier of the VF/PF relay message. */
+ u32 rid;
+
+ /**
+ * @request: points to the inner VF/PF request message, copied to the
+ * #response_buf starting at #offset.
+ */
+ u32 *request;
+
+ /** @request_len: length of the inner VF/PF request message. */
+ u32 request_len;
+
+ /**
+ * @response: points to the placeholder buffer where inner VF/PF
+ * response will be located, for outgoing transaction
+ * this could be caller's buffer (if provided) otherwise
+ * it points to the #response_buf starting at #offset.
+ */
+ u32 *response;
+
+ /**
+ * @response_len: length of the inner VF/PF response message (only
+ * if #status is 0), initially set to the size of the
+ * placeholder buffer where response message will be
+ * copied.
+ */
+ u32 response_len;
+
+ /**
+ * @offset: offset to the start of the inner VF/PF relay message inside
+ * buffers; this offset is equal the length of the outer GuC
+ * relay header message.
+ */
+ u32 offset;
+
+ /**
+ * @request_buf: buffer with VF/PF request message including outer
+ * transport message.
+ */
+ u32 request_buf[GUC_CTB_MAX_DWORDS];
+
+ /**
+ * @response_buf: buffer with VF/PF response message including outer
+ * transport message.
+ */
+ u32 response_buf[GUC_CTB_MAX_DWORDS];
+
+ /**
+ * @reply: status of the reply, 0 means that data pointed by the
+ * #response is valid.
+ */
+ int reply;
+
+ /** @done: completion of the outgoing transaction. */
+ struct completion done;
+
+ /** @link: transaction list link */
+ struct list_head link;
+};
+
+static u32 prepare_pf2guc(u32 *msg, u32 target, u32 rid)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_PF2GUC_RELAY_TO_VF);
+ msg[1] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, target);
+ msg[2] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, rid);
+
+ return PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN;
+}
+
+static u32 prepare_vf2guc(u32 *msg, u32 rid)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_VF2GUC_RELAY_TO_PF);
+ msg[1] = FIELD_PREP(VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID, rid);
+
+ return VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN;
+}
+
+static struct relay_transaction *
+__relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u32 rid,
+ const u32 *action, u32 action_len, u32 *resp, u32 resp_size)
+{
+ struct relay_transaction *txn;
+
+ relay_assert(relay, action_len >= GUC_RELAY_MSG_MIN_LEN);
+ relay_assert(relay, action_len <= GUC_RELAY_MSG_MAX_LEN);
+ relay_assert(relay, !(!!resp ^ !!resp_size));
+ relay_assert(relay, resp_size <= GUC_RELAY_MSG_MAX_LEN);
+ relay_assert(relay, resp_size == 0 || resp_size >= GUC_RELAY_MSG_MIN_LEN);
+
+ if (unlikely(!relay_is_ready(relay)))
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * For incoming requests we can't use GFP_KERNEL as those are delivered
+ * with CTB lock held which is marked as used in the reclaim path.
+ * Btw, that's one of the reason why we use mempool here!
+ */
+ txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL);
+ if (!txn)
+ return ERR_PTR(-ENOMEM);
+
+ txn->incoming = incoming;
+ txn->remote = remote;
+ txn->rid = rid;
+ txn->offset = remote ?
+ prepare_pf2guc(incoming ? txn->response_buf : txn->request_buf, remote, rid) :
+ prepare_vf2guc(incoming ? txn->response_buf : txn->request_buf, rid);
+
+ relay_assert(relay, txn->offset);
+ relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->request_buf));
+ relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->response_buf));
+
+ txn->request = txn->request_buf + txn->offset;
+ memcpy(&txn->request_buf[txn->offset], action, sizeof(u32) * action_len);
+ txn->request_len = action_len;
+
+ txn->response = resp ?: txn->response_buf + txn->offset;
+ txn->response_len = resp_size ?: GUC_RELAY_MSG_MAX_LEN;
+ txn->reply = -ENOMSG;
+ INIT_LIST_HEAD(&txn->link);
+ init_completion(&txn->done);
+
+ return txn;
+}
+
+static struct relay_transaction *
+relay_new_transaction(struct xe_guc_relay *relay, u32 target, const u32 *action, u32 len,
+ u32 *resp, u32 resp_size)
+{
+ u32 rid = relay_get_next_rid(relay);
+
+ return __relay_get_transaction(relay, false, target, rid, action, len, resp, resp_size);
+}
+
+static struct relay_transaction *
+relay_new_incoming_transaction(struct xe_guc_relay *relay, u32 origin, u32 rid,
+ const u32 *action, u32 len)
+{
+ return __relay_get_transaction(relay, true, origin, rid, action, len, NULL, 0);
+}
+
+static void relay_release_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn)
+{
+ relay_assert(relay, list_empty(&txn->link));
+
+ txn->offset = 0;
+ txn->response = NULL;
+ txn->reply = -ESTALE;
+ mempool_free(txn, &relay->pool);
+}
+
+static int relay_send_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn)
+{
+ u32 len = txn->incoming ? txn->response_len : txn->request_len;
+ u32 *buf = txn->incoming ? txn->response_buf : txn->request_buf;
+ u32 *msg = buf + txn->offset;
+ int ret;
+
+ relay_assert(relay, txn->offset);
+ relay_assert(relay, txn->offset + len <= GUC_CTB_MAX_DWORDS);
+ relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN);
+ relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN);
+
+ relay_debug(relay, "sending %s.%u to %u = %*ph\n",
+ guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])),
+ txn->rid, txn->remote, (int)sizeof(u32) * len, msg);
+
+ ret = xe_guc_ct_send_block(relay_to_ct(relay), buf, len + txn->offset);
+
+ if (unlikely(ret > 0)) {
+ relay_notice(relay, "Unexpected data=%d from GuC, wrong ABI?\n", ret);
+ ret = -EPROTO;
+ }
+ if (unlikely(ret < 0)) {
+ relay_notice(relay, "Failed to send %s.%x to GuC (%pe) %*ph ...\n",
+ guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, buf[0])),
+ FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, buf[0]),
+ ERR_PTR(ret), (int)sizeof(u32) * txn->offset, buf);
+ relay_notice(relay, "Failed to send %s.%u to %u (%pe) %*ph\n",
+ guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])),
+ txn->rid, txn->remote, ERR_PTR(ret), (int)sizeof(u32) * len, msg);
+ }
+
+ return ret;
+}
+
+static void __fini_relay(struct drm_device *drm, void *arg)
+{
+ struct xe_guc_relay *relay = arg;
+
+ mempool_exit(&relay->pool);
+}
+
+/**
+ * xe_guc_relay_init - Initialize a &xe_guc_relay
+ * @relay: the &xe_guc_relay to initialize
+ *
+ * Initialize remaining members of &xe_guc_relay that may depend
+ * on the SR-IOV mode.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_relay_init(struct xe_guc_relay *relay)
+{
+ const int XE_RELAY_MEMPOOL_MIN_NUM = 1;
+ struct xe_device *xe = relay_to_xe(relay);
+ int err;
+
+ relay_assert(relay, !relay_is_ready(relay));
+
+ if (!IS_SRIOV(xe))
+ return 0;
+
+ spin_lock_init(&relay->lock);
+ INIT_WORK(&relay->worker, relays_worker_fn);
+ INIT_LIST_HEAD(&relay->pending_relays);
+ INIT_LIST_HEAD(&relay->incoming_actions);
+
+ err = mempool_init_kmalloc_pool(&relay->pool, XE_RELAY_MEMPOOL_MIN_NUM +
+ relay_get_totalvfs(relay),
+ sizeof(struct relay_transaction));
+ if (err)
+ return err;
+
+ relay_debug(relay, "using mempool with %d elements\n", relay->pool.min_nr);
+
+ return drmm_add_action_or_reset(&xe->drm, __fini_relay, relay);
+}
+
+static u32 to_relay_error(int err)
+{
+ /* XXX: assume that relay errors match errno codes */
+ return err < 0 ? -err : GUC_RELAY_ERROR_UNDISCLOSED;
+}
+
+static int from_relay_error(u32 error)
+{
+ /* XXX: assume that relay errors match errno codes */
+ return error ? -error : -ENODATA;
+}
+
+static u32 sanitize_relay_error(u32 error)
+{
+ /* XXX TBD if generic error codes will be allowed */
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ error = GUC_RELAY_ERROR_UNDISCLOSED;
+ return error;
+}
+
+static u32 sanitize_relay_error_hint(u32 hint)
+{
+ /* XXX TBD if generic error codes will be allowed */
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ hint = 0;
+ return hint;
+}
+
+static u32 prepare_error_reply(u32 *msg, u32 error, u32 hint)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_FAILURE) |
+ FIELD_PREP(GUC_HXG_FAILURE_MSG_0_HINT, hint) |
+ FIELD_PREP(GUC_HXG_FAILURE_MSG_0_ERROR, error);
+
+ XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_ERROR, error));
+ XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_HINT, hint));
+
+ return GUC_HXG_FAILURE_MSG_LEN;
+}
+
+static void relay_testonly_nop(struct xe_guc_relay *relay)
+{
+ KUNIT_STATIC_STUB_REDIRECT(relay_testonly_nop, relay);
+}
+
+static int relay_send_message_and_wait(struct xe_guc_relay *relay,
+ struct relay_transaction *txn,
+ u32 *buf, u32 buf_size)
+{
+ unsigned long timeout = msecs_to_jiffies(RELAY_TIMEOUT_MSEC);
+ u32 *msg = &txn->request_buf[txn->offset];
+ u32 len = txn->request_len;
+ u32 type, action, data0;
+ int ret;
+ long n;
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]);
+ action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
+ data0 = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
+
+ relay_debug(relay, "%s.%u to %u action %#x:%u\n",
+ guc_hxg_type_to_string(type),
+ txn->rid, txn->remote, action, data0);
+
+ /* list ordering does not need to match RID ordering */
+ spin_lock(&relay->lock);
+ list_add_tail(&txn->link, &relay->pending_relays);
+ spin_unlock(&relay->lock);
+
+resend:
+ ret = relay_send_transaction(relay, txn);
+ if (unlikely(ret < 0))
+ goto unlink;
+
+wait:
+ n = wait_for_completion_timeout(&txn->done, timeout);
+ if (unlikely(n == 0 && txn->reply)) {
+ ret = -ETIME;
+ goto unlink;
+ }
+
+ relay_debug(relay, "%u.%u reply %d after %u msec\n",
+ txn->remote, txn->rid, txn->reply, jiffies_to_msecs(timeout - n));
+ if (unlikely(txn->reply)) {
+ reinit_completion(&txn->done);
+ if (txn->reply == -EAGAIN)
+ goto resend;
+ if (txn->reply == -EBUSY) {
+ relay_testonly_nop(relay);
+ goto wait;
+ }
+ if (txn->reply > 0)
+ ret = from_relay_error(txn->reply);
+ else
+ ret = txn->reply;
+ goto unlink;
+ }
+
+ relay_debug(relay, "%u.%u response %*ph\n", txn->remote, txn->rid,
+ (int)sizeof(u32) * txn->response_len, txn->response);
+ relay_assert(relay, txn->response_len >= GUC_RELAY_MSG_MIN_LEN);
+ ret = txn->response_len;
+
+unlink:
+ spin_lock(&relay->lock);
+ list_del_init(&txn->link);
+ spin_unlock(&relay->lock);
+
+ if (unlikely(ret < 0)) {
+ relay_notice(relay, "Unsuccessful %s.%u %#x:%u to %u (%pe) %*ph\n",
+ guc_hxg_type_to_string(type), txn->rid,
+ action, data0, txn->remote, ERR_PTR(ret),
+ (int)sizeof(u32) * len, msg);
+ }
+
+ return ret;
+}
+
+static int relay_send_to(struct xe_guc_relay *relay, u32 target,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size)
+{
+ struct relay_transaction *txn;
+ int ret;
+
+ relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN);
+ relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_HOST);
+ relay_assert(relay, guc_hxg_type_is_action(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])));
+
+ if (unlikely(!relay_is_ready(relay)))
+ return -ENODEV;
+
+ txn = relay_new_transaction(relay, target, msg, len, buf, buf_size);
+ if (IS_ERR(txn))
+ return PTR_ERR(txn);
+
+ switch (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])) {
+ case GUC_HXG_TYPE_REQUEST:
+ ret = relay_send_message_and_wait(relay, txn, buf, buf_size);
+ break;
+ case GUC_HXG_TYPE_FAST_REQUEST:
+ relay_assert(relay, !GUC_HXG_TYPE_FAST_REQUEST);
+ fallthrough;
+ case GUC_HXG_TYPE_EVENT:
+ ret = relay_send_transaction(relay, txn);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ relay_release_transaction(relay, txn);
+ return ret;
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * xe_guc_relay_send_to_vf - Send a message to the VF.
+ * @relay: the &xe_guc_relay which will send the message
+ * @target: target VF number
+ * @msg: request message to be sent
+ * @len: length of the request message (in dwords, can't be 0)
+ * @buf: placeholder for the response message
+ * @buf_size: size of the response message placeholder (in dwords)
+ *
+ * This function can only be used by the driver running in the SR-IOV PF mode.
+ *
+ * Return: Non-negative response length (in dwords) or
+ * a negative error code on failure.
+ */
+int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size)
+{
+ relay_assert(relay, IS_SRIOV_PF(relay_to_xe(relay)));
+
+ return relay_send_to(relay, target, msg, len, buf, buf_size);
+}
+#endif
+
+/**
+ * xe_guc_relay_send_to_pf - Send a message to the PF.
+ * @relay: the &xe_guc_relay which will send the message
+ * @msg: request message to be sent
+ * @len: length of the message (in dwords, can't be 0)
+ * @buf: placeholder for the response message
+ * @buf_size: size of the response message placeholder (in dwords)
+ *
+ * This function can only be used by driver running in SR-IOV VF mode.
+ *
+ * Return: Non-negative response length (in dwords) or
+ * a negative error code on failure.
+ */
+int xe_guc_relay_send_to_pf(struct xe_guc_relay *relay,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size)
+{
+ relay_assert(relay, IS_SRIOV_VF(relay_to_xe(relay)));
+
+ return relay_send_to(relay, PFID, msg, len, buf, buf_size);
+}
+
+static int relay_handle_reply(struct xe_guc_relay *relay, u32 origin,
+ u32 rid, int reply, const u32 *msg, u32 len)
+{
+ struct relay_transaction *pending;
+ int err = -ESRCH;
+
+ spin_lock(&relay->lock);
+ list_for_each_entry(pending, &relay->pending_relays, link) {
+ if (pending->remote != origin || pending->rid != rid) {
+ relay_debug(relay, "%u.%u still awaits response\n",
+ pending->remote, pending->rid);
+ continue;
+ }
+ err = 0; /* found! */
+ if (reply == 0) {
+ if (len > pending->response_len) {
+ reply = -ENOBUFS;
+ err = -ENOBUFS;
+ } else {
+ memcpy(pending->response, msg, 4 * len);
+ pending->response_len = len;
+ }
+ }
+ pending->reply = reply;
+ complete_all(&pending->done);
+ break;
+ }
+ spin_unlock(&relay->lock);
+
+ return err;
+}
+
+static int relay_handle_failure(struct xe_guc_relay *relay, u32 origin,
+ u32 rid, const u32 *msg, u32 len)
+{
+ int error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[0]);
+ u32 hint __maybe_unused = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[0]);
+
+ relay_assert(relay, len);
+ relay_debug(relay, "%u.%u error %#x (%pe) hint %u debug %*ph\n",
+ origin, rid, error, ERR_PTR(-error), hint, 4 * (len - 1), msg + 1);
+
+ return relay_handle_reply(relay, origin, rid, error ?: -EREMOTEIO, NULL, 0);
+}
+
+static int relay_testloop_action_handler(struct xe_guc_relay *relay, u32 origin,
+ const u32 *msg, u32 len, u32 *response, u32 size)
+{
+ static ktime_t last_reply = 0;
+ u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]);
+ u32 action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
+ u32 opcode = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
+ ktime_t now = ktime_get();
+ bool busy;
+ int ret;
+
+ relay_assert(relay, guc_hxg_type_is_action(type));
+ relay_assert(relay, action == GUC_RELAY_ACTION_VFXPF_TESTLOOP);
+
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV))
+ return -ECONNREFUSED;
+
+ if (!last_reply)
+ last_reply = now;
+ busy = ktime_before(now, ktime_add_ms(last_reply, 2 * RELAY_TIMEOUT_MSEC));
+ if (!busy)
+ last_reply = now;
+
+ switch (opcode) {
+ case VFXPF_TESTLOOP_OPCODE_NOP:
+ if (type == GUC_HXG_TYPE_EVENT)
+ return 0;
+ return guc_hxg_msg_encode_success(response, 0);
+ case VFXPF_TESTLOOP_OPCODE_BUSY:
+ if (type == GUC_HXG_TYPE_EVENT)
+ return -EPROTO;
+ msleep(RELAY_TIMEOUT_MSEC / 8);
+ if (busy)
+ return -EINPROGRESS;
+ return guc_hxg_msg_encode_success(response, 0);
+ case VFXPF_TESTLOOP_OPCODE_RETRY:
+ if (type == GUC_HXG_TYPE_EVENT)
+ return -EPROTO;
+ msleep(RELAY_TIMEOUT_MSEC / 8);
+ if (busy)
+ return guc_hxg_msg_encode_retry(response, 0);
+ return guc_hxg_msg_encode_success(response, 0);
+ case VFXPF_TESTLOOP_OPCODE_ECHO:
+ if (type == GUC_HXG_TYPE_EVENT)
+ return -EPROTO;
+ if (size < len)
+ return -ENOBUFS;
+ ret = guc_hxg_msg_encode_success(response, len);
+ memcpy(response + ret, msg + ret, (len - ret) * sizeof(u32));
+ return len;
+ case VFXPF_TESTLOOP_OPCODE_FAIL:
+ return -EHWPOISON;
+ default:
+ break;
+ }
+
+ relay_notice(relay, "Unexpected action %#x opcode %#x\n", action, opcode);
+ return -EBADRQC;
+}
+
+static int relay_action_handler(struct xe_guc_relay *relay, u32 origin,
+ const u32 *msg, u32 len, u32 *response, u32 size)
+{
+ u32 type;
+ int ret;
+
+ relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN);
+
+ if (FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]) == GUC_RELAY_ACTION_VFXPF_TESTLOOP)
+ return relay_testloop_action_handler(relay, origin, msg, len, response, size);
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]);
+
+ /* XXX: PF services will be added later */
+ ret = -EOPNOTSUPP;
+
+ if (type == GUC_HXG_TYPE_EVENT)
+ relay_assert(relay, ret <= 0);
+
+ return ret;
+}
+
+static struct relay_transaction *relay_dequeue_transaction(struct xe_guc_relay *relay)
+{
+ struct relay_transaction *txn;
+
+ spin_lock(&relay->lock);
+ txn = list_first_entry_or_null(&relay->incoming_actions, struct relay_transaction, link);
+ if (txn)
+ list_del_init(&txn->link);
+ spin_unlock(&relay->lock);
+
+ return txn;
+}
+
+static void relay_process_incoming_action(struct xe_guc_relay *relay)
+{
+ struct relay_transaction *txn;
+ bool again = false;
+ u32 type;
+ int ret;
+
+ txn = relay_dequeue_transaction(relay);
+ if (!txn)
+ return;
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, txn->request_buf[txn->offset]);
+
+ ret = relay_action_handler(relay, txn->remote,
+ txn->request_buf + txn->offset, txn->request_len,
+ txn->response_buf + txn->offset,
+ ARRAY_SIZE(txn->response_buf) - txn->offset);
+
+ if (ret == -EINPROGRESS) {
+ again = true;
+ ret = guc_hxg_msg_encode_busy(txn->response_buf + txn->offset, 0);
+ }
+
+ if (ret > 0) {
+ txn->response_len = ret;
+ ret = relay_send_transaction(relay, txn);
+ }
+
+ if (ret < 0) {
+ u32 error = to_relay_error(ret);
+
+ relay_notice(relay, "Failed to handle %s.%u from %u (%pe) %*ph\n",
+ guc_hxg_type_to_string(type), txn->rid, txn->remote,
+ ERR_PTR(ret), 4 * txn->request_len, txn->request_buf + txn->offset);
+
+ txn->response_len = prepare_error_reply(txn->response_buf + txn->offset,
+ txn->remote ?
+ sanitize_relay_error(error) : error,
+ txn->remote ?
+ sanitize_relay_error_hint(-ret) : -ret);
+ ret = relay_send_transaction(relay, txn);
+ again = false;
+ }
+
+ if (again) {
+ spin_lock(&relay->lock);
+ list_add(&txn->link, &relay->incoming_actions);
+ spin_unlock(&relay->lock);
+ return;
+ }
+
+ if (unlikely(ret < 0))
+ relay_notice(relay, "Failed to process action.%u (%pe) %*ph\n",
+ txn->rid, ERR_PTR(ret), 4 * txn->request_len,
+ txn->request_buf + txn->offset);
+
+ relay_release_transaction(relay, txn);
+}
+
+static bool relay_needs_worker(struct xe_guc_relay *relay)
+{
+ return !list_empty(&relay->incoming_actions);
+}
+
+static void relay_kick_worker(struct xe_guc_relay *relay)
+{
+ KUNIT_STATIC_STUB_REDIRECT(relay_kick_worker, relay);
+ queue_work(relay_to_xe(relay)->sriov.wq, &relay->worker);
+}
+
+static void relays_worker_fn(struct work_struct *w)
+{
+ struct xe_guc_relay *relay = container_of(w, struct xe_guc_relay, worker);
+
+ relay_process_incoming_action(relay);
+
+ if (relay_needs_worker(relay))
+ relay_kick_worker(relay);
+}
+
+static int relay_queue_action_msg(struct xe_guc_relay *relay, u32 origin, u32 rid,
+ const u32 *msg, u32 len)
+{
+ struct relay_transaction *txn;
+
+ txn = relay_new_incoming_transaction(relay, origin, rid, msg, len);
+ if (IS_ERR(txn))
+ return PTR_ERR(txn);
+
+ spin_lock(&relay->lock);
+ list_add_tail(&txn->link, &relay->incoming_actions);
+ spin_unlock(&relay->lock);
+
+ relay_kick_worker(relay);
+ return 0;
+}
+
+static int relay_process_msg(struct xe_guc_relay *relay, u32 origin, u32 rid,
+ const u32 *msg, u32 len)
+{
+ u32 type;
+ int err;
+
+ if (unlikely(len < GUC_HXG_MSG_MIN_LEN))
+ return -EPROTO;
+
+ if (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_HOST)
+ return -EPROTO;
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]);
+ relay_debug(relay, "received %s.%u from %u = %*ph\n",
+ guc_hxg_type_to_string(type), rid, origin, 4 * len, msg);
+
+ switch (type) {
+ case GUC_HXG_TYPE_REQUEST:
+ case GUC_HXG_TYPE_FAST_REQUEST:
+ case GUC_HXG_TYPE_EVENT:
+ err = relay_queue_action_msg(relay, origin, rid, msg, len);
+ break;
+ case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+ err = relay_handle_reply(relay, origin, rid, 0, msg, len);
+ break;
+ case GUC_HXG_TYPE_NO_RESPONSE_BUSY:
+ err = relay_handle_reply(relay, origin, rid, -EBUSY, NULL, 0);
+ break;
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
+ err = relay_handle_reply(relay, origin, rid, -EAGAIN, NULL, 0);
+ break;
+ case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ err = relay_handle_failure(relay, origin, rid, msg, len);
+ break;
+ default:
+ err = -EBADRQC;
+ }
+
+ if (unlikely(err))
+ relay_notice(relay, "Failed to process %s.%u from %u (%pe) %*ph\n",
+ guc_hxg_type_to_string(type), rid, origin,
+ ERR_PTR(err), 4 * len, msg);
+
+ return err;
+}
+
+/**
+ * xe_guc_relay_process_guc2vf - Handle relay notification message from the GuC.
+ * @relay: the &xe_guc_relay which will handle the message
+ * @msg: message to be handled
+ * @len: length of the message (in dwords)
+ *
+ * This function will handle relay messages received from the GuC.
+ *
+ * This function is can only be used if driver is running in SR-IOV mode.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_relay_process_guc2vf(struct xe_guc_relay *relay, const u32 *msg, u32 len)
+{
+ u32 rid;
+
+ relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT);
+ relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) ==
+ XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF);
+
+ if (unlikely(!IS_SRIOV_VF(relay_to_xe(relay)) && !kunit_get_current_test()))
+ return -EPERM;
+
+ if (unlikely(!relay_is_ready(relay)))
+ return -ENODEV;
+
+ if (unlikely(len < GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN))
+ return -EPROTO;
+
+ if (unlikely(len > GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN))
+ return -EMSGSIZE;
+
+ if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0])))
+ return -EPFNOSUPPORT;
+
+ rid = FIELD_GET(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, msg[1]);
+
+ return relay_process_msg(relay, PFID, rid,
+ msg + GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN,
+ len - GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN);
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * xe_guc_relay_process_guc2pf - Handle relay notification message from the GuC.
+ * @relay: the &xe_guc_relay which will handle the message
+ * @msg: message to be handled
+ * @len: length of the message (in dwords)
+ *
+ * This function will handle relay messages received from the GuC.
+ *
+ * This function can only be used if driver is running in SR-IOV PF mode.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len)
+{
+ u32 origin, rid;
+ int err;
+
+ relay_assert(relay, len >= GUC_HXG_EVENT_MSG_MIN_LEN);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT);
+ relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) ==
+ XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF);
+
+ if (unlikely(!IS_SRIOV_PF(relay_to_xe(relay)) && !kunit_get_current_test()))
+ return -EPERM;
+
+ if (unlikely(!relay_is_ready(relay)))
+ return -ENODEV;
+
+ if (unlikely(len < GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN))
+ return -EPROTO;
+
+ if (unlikely(len > GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN))
+ return -EMSGSIZE;
+
+ if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0])))
+ return -EPFNOSUPPORT;
+
+ origin = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, msg[1]);
+ rid = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, msg[2]);
+
+ if (unlikely(origin > relay_get_totalvfs(relay)))
+ return -ENOENT;
+
+ err = relay_process_msg(relay, origin, rid,
+ msg + GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN,
+ len - GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN);
+
+ return err;
+}
+#endif
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_relay_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.h b/drivers/gpu/drm/xe/xe_guc_relay.h
new file mode 100644
index 000000000000..385429aa188a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_relay.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GUC_RELAY_H_
+#define _XE_GUC_RELAY_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct xe_guc_relay;
+
+int xe_guc_relay_init(struct xe_guc_relay *relay);
+
+int xe_guc_relay_send_to_pf(struct xe_guc_relay *relay,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size);
+
+int xe_guc_relay_process_guc2vf(struct xe_guc_relay *relay, const u32 *msg, u32 len);
+
+#ifdef CONFIG_PCI_IOV
+int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size);
+int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len);
+#else
+static inline int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size)
+{
+ return -ENODEV;
+}
+static inline int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_relay_types.h b/drivers/gpu/drm/xe/xe_guc_relay_types.h
new file mode 100644
index 000000000000..5999fcb77e96
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_relay_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GUC_RELAY_TYPES_H_
+#define _XE_GUC_RELAY_TYPES_H_
+
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct xe_guc_relay - Data used by the VF-PF Relay Communication over GuC.
+ */
+struct xe_guc_relay {
+ /**@lock: protects all internal data. */
+ spinlock_t lock;
+
+ /** @worker: dispatches incoming action messages. */
+ struct work_struct worker;
+
+ /** @pending_relays: list of sent requests that await a response. */
+ struct list_head pending_relays;
+
+ /** @incoming_actions: list of incoming relay action messages to process. */
+ struct list_head incoming_actions;
+
+ /** @pool: pool of the relay message buffers. */
+ mempool_t pool;
+
+ /** @last_rid: last Relay-ID used while sending a message. */
+ u32 last_rid;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index f22ae717b0b2..ff77bc8da1b2 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -23,6 +23,7 @@
#include "xe_force_wake.h"
#include "xe_gpu_scheduler.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
@@ -311,7 +312,7 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
q->guc->id - GUC_ID_START_MLRC,
order_base_2(q->width));
else
- ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
+ ida_free(&guc->submission_state.guc_ids, q->guc->id);
}
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
@@ -335,8 +336,8 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
order_base_2(q->width));
} else {
- ret = ida_simple_get(&guc->submission_state.guc_ids, 0,
- GUC_ID_NUMBER_SLRC, GFP_NOWAIT);
+ ret = ida_alloc_max(&guc->submission_state.guc_ids,
+ GUC_ID_NUMBER_SLRC - 1, GFP_NOWAIT);
}
if (ret < 0)
return ret;
@@ -811,7 +812,8 @@ static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
static void simple_error_capture(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct drm_printer p = drm_err_printer("");
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_printer p = drm_err_printer(&xe->drm, NULL);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
u32 adj_logical_mask = q->logical_mask;
@@ -928,13 +930,15 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
int i = 0;
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
- xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
- xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)));
-
drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
xe_sched_job_seqno(job), q->guc->id, q->flags);
+ xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
+ "Kernel-submitted job timed out\n");
+ xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
+ "VM job timed out on non-killed execqueue\n");
+
simple_error_capture(q);
- xe_devcoredump(q);
+ xe_devcoredump(job);
} else {
drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx",
xe_sched_job_seqno(job), q->guc->id, q->flags);
@@ -1216,7 +1220,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
init_waitqueue_head(&ge->suspend_wait);
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
- q->hwe->eclass->sched_props.job_timeout_ms;
+ q->sched_props.job_timeout_ms;
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc),
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
@@ -1348,21 +1352,6 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
return 0;
}
-static int guc_exec_queue_set_job_timeout(struct xe_exec_queue *q, u32 job_timeout_ms)
-{
- struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
-
- xe_assert(xe, !exec_queue_registered(q));
- xe_assert(xe, !exec_queue_banned(q));
- xe_assert(xe, !exec_queue_killed(q));
-
- sched->base.timeout = job_timeout_ms;
-
- return 0;
-}
-
static int guc_exec_queue_suspend(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
@@ -1413,7 +1402,6 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.set_priority = guc_exec_queue_set_priority,
.set_timeslice = guc_exec_queue_set_timeslice,
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
- .set_job_timeout = guc_exec_queue_set_job_timeout,
.suspend = guc_exec_queue_suspend,
.suspend_wait = guc_exec_queue_suspend_wait,
.resume = guc_exec_queue_resume,
@@ -1794,7 +1782,7 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
/**
* xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
- * @q: Xe exec queue.
+ * @job: faulty Xe scheduled job.
*
* This can be printed out in a later stage like during dev_coredump
* analysis.
@@ -1803,21 +1791,17 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
* caller, using `xe_guc_exec_queue_snapshot_free`.
*/
struct xe_guc_submit_exec_queue_snapshot *
-xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
+xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
{
- struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q = job->q;
struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_sched_job *job;
struct xe_guc_submit_exec_queue_snapshot *snapshot;
int i;
snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
- if (!snapshot) {
- drm_err(&xe->drm, "Skipping GuC Engine snapshot entirely.\n");
+ if (!snapshot)
return NULL;
- }
snapshot->guc.id = q->guc->id;
memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
@@ -1833,9 +1817,7 @@ xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot),
GFP_ATOMIC);
- if (!snapshot->lrc) {
- drm_err(&xe->drm, "Skipping GuC Engine LRC snapshot.\n");
- } else {
+ if (snapshot->lrc) {
for (i = 0; i < q->width; ++i) {
struct xe_lrc *lrc = q->lrc + i;
@@ -1863,17 +1845,17 @@ xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
sizeof(struct pending_list_snapshot),
GFP_ATOMIC);
- if (!snapshot->pending_list) {
- drm_err(&xe->drm, "Skipping GuC Engine pending_list snapshot.\n");
- } else {
+ if (snapshot->pending_list) {
+ struct xe_sched_job *job_iter;
+
i = 0;
- list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
snapshot->pending_list[i].seqno =
- xe_sched_job_seqno(job);
+ xe_sched_job_seqno(job_iter);
snapshot->pending_list[i].fence =
- dma_fence_is_signaled(job->fence) ? 1 : 0;
+ dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
snapshot->pending_list[i].finished =
- dma_fence_is_signaled(&job->drm.s_fence->finished)
+ dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
? 1 : 0;
i++;
}
@@ -1959,10 +1941,28 @@ void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *s
static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
{
struct xe_guc_submit_exec_queue_snapshot *snapshot;
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_job *job;
+ bool found = false;
- snapshot = xe_guc_exec_queue_snapshot_capture(q);
+ spin_lock(&sched->base.job_list_lock);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ if (job->q == q) {
+ xe_sched_job_get(job);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&sched->base.job_list_lock);
+
+ if (!found)
+ return;
+
+ snapshot = xe_guc_exec_queue_snapshot_capture(job);
xe_guc_exec_queue_snapshot_print(snapshot, p);
xe_guc_exec_queue_snapshot_free(snapshot);
+
+ xe_sched_job_put(job);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index fc97869c5b86..723dc2bd8df9 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -9,8 +9,8 @@
#include <linux/types.h>
struct drm_printer;
-struct xe_exec_queue;
struct xe_guc;
+struct xe_sched_job;
int xe_guc_submit_init(struct xe_guc *guc);
@@ -27,7 +27,7 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
struct xe_guc_submit_exec_queue_snapshot *
-xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
+xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job);
void
xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit_types.h b/drivers/gpu/drm/xe/xe_guc_submit_types.h
index 649b0a852692..72fc0f42b0a5 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit_types.h
@@ -102,9 +102,9 @@ struct xe_guc_submit_exec_queue_snapshot {
/** @sched_props: scheduling properties */
struct {
- /** @timeslice_us: timeslice period in micro-seconds */
+ /** @sched_props.timeslice_us: timeslice period in micro-seconds */
u32 timeslice_us;
- /** @preempt_timeout_us: preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
} sched_props;
@@ -118,11 +118,11 @@ struct xe_guc_submit_exec_queue_snapshot {
/** @guc: GuC Engine Snapshot */
struct {
- /** @wqi_head: work queue item head */
+ /** @guc.wqi_head: work queue item head */
u32 wqi_head;
- /** @wqi_tail: work queue item tail */
+ /** @guc.wqi_tail: work queue item tail */
u32 wqi_tail;
- /** @id: GuC id for this exec_queue */
+ /** @guc.id: GuC id for this exec_queue */
u16 id;
} guc;
@@ -133,13 +133,13 @@ struct xe_guc_submit_exec_queue_snapshot {
bool parallel_execution;
/** @parallel: snapshot of the useful parallel scratch */
struct {
- /** @wq_desc: Workqueue description */
+ /** @parallel.wq_desc: Workqueue description */
struct {
- /** @head: Workqueue Head */
+ /** @parallel.wq_desc.head: Workqueue Head */
u32 head;
- /** @tail: Workqueue Tail */
+ /** @parallel.wq_desc.tail: Workqueue Tail */
u32 tail;
- /** @status: Workqueue Status */
+ /** @parallel.wq_desc.status: Workqueue Status */
u32 status;
} wq_desc;
/** @wq: Workqueue Items */
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index cd80802e8918..edcd1a950bd3 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -15,9 +15,23 @@
#include "xe_guc_fwif.h"
#include "xe_guc_log_types.h"
#include "xe_guc_pc_types.h"
+#include "xe_guc_relay_types.h"
#include "xe_uc_fw_types.h"
/**
+ * struct xe_guc_db_mgr - GuC Doorbells Manager.
+ *
+ * Note: GuC Doorbells Manager is relying on &xe_guc::submission_state.lock
+ * to protect its members.
+ */
+struct xe_guc_db_mgr {
+ /** @count: number of doorbells to manage */
+ unsigned int count;
+ /** @bitmap: bitmap to track allocated doorbells */
+ unsigned long *bitmap;
+};
+
+/**
* struct xe_guc - Graphic micro controller
*/
struct xe_guc {
@@ -31,45 +45,50 @@ struct xe_guc {
struct xe_guc_ct ct;
/** @pc: GuC Power Conservation */
struct xe_guc_pc pc;
+ /** @dbm: GuC Doorbell Manager */
+ struct xe_guc_db_mgr dbm;
/** @submission_state: GuC submission state */
struct {
- /** @exec_queue_lookup: Lookup an xe_engine from guc_id */
+ /** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */
struct xarray exec_queue_lookup;
- /** @guc_ids: used to allocate new guc_ids, single-lrc */
+ /** @submission_state.guc_ids: used to allocate new guc_ids, single-lrc */
struct ida guc_ids;
- /** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
+ /** @submission_state.guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
unsigned long *guc_ids_bitmap;
- /** @stopped: submissions are stopped */
+ /** @submission_state.stopped: submissions are stopped */
atomic_t stopped;
- /** @lock: protects submission state */
+ /** @submission_state.lock: protects submission state */
struct mutex lock;
- /** @suspend: suspend fence state */
+ /** @submission_state.suspend: suspend fence state */
struct {
- /** @lock: suspend fences lock */
+ /** @submission_state.suspend.lock: suspend fences lock */
spinlock_t lock;
- /** @context: suspend fences context */
+ /** @submission_state.suspend.context: suspend fences context */
u64 context;
- /** @seqno: suspend fences seqno */
+ /** @submission_state.suspend.seqno: suspend fences seqno */
u32 seqno;
} suspend;
#ifdef CONFIG_PROVE_LOCKING
#define NUM_SUBMIT_WQ 256
- /** @submit_wq_pool: submission ordered workqueues pool */
+ /** @submission_state.submit_wq_pool: submission ordered workqueues pool */
struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
- /** @submit_wq_idx: submission ordered workqueue index */
+ /** @submission_state.submit_wq_idx: submission ordered workqueue index */
int submit_wq_idx;
#endif
- /** @enabled: submission is enabled */
+ /** @submission_state.enabled: submission is enabled */
bool enabled;
} submission_state;
/** @hwconfig: Hardware config state */
struct {
- /** @bo: buffer object of the hardware config */
+ /** @hwconfig.bo: buffer object of the hardware config */
struct xe_bo *bo;
- /** @size: size of the hardware config */
+ /** @hwconfig.size: size of the hardware config */
u32 size;
} hwconfig;
+ /** @relay: GuC Relay Communication used in SR-IOV */
+ struct xe_guc_relay relay;
+
/**
* @notify_reg: Register which is written to notify GuC of H2G messages
*/
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index bfdd33b9b23b..1c9d38b6f5f1 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -29,7 +29,7 @@ static void heci_gsc_irq_unmask(struct irq_data *d)
/* generic irq handling */
}
-static struct irq_chip heci_gsc_irq_chip = {
+static const struct irq_chip heci_gsc_irq_chip = {
.name = "gsc_irq_chip",
.irq_mask = heci_gsc_irq_mask,
.irq_unmask = heci_gsc_irq_unmask,
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index eca109791c6a..b545f850087c 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -112,6 +112,25 @@ out:
return ret;
}
+int xe_huc_init_post_hwconfig(struct xe_huc *huc)
+{
+ struct xe_tile *tile = gt_to_tile(huc_to_gt(huc));
+ struct xe_device *xe = huc_to_xe(huc);
+ int ret;
+
+ if (!IS_DGFX(huc_to_xe(huc)))
+ return 0;
+
+ if (!xe_uc_fw_is_loadable(&huc->fw))
+ return 0;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
int xe_huc_upload(struct xe_huc *huc)
{
if (!xe_uc_fw_is_loadable(&huc->fw))
diff --git a/drivers/gpu/drm/xe/xe_huc.h b/drivers/gpu/drm/xe/xe_huc.h
index 532017230287..3ab56cc14b00 100644
--- a/drivers/gpu/drm/xe/xe_huc.h
+++ b/drivers/gpu/drm/xe/xe_huc.h
@@ -17,6 +17,7 @@ enum xe_huc_auth_types {
};
int xe_huc_init(struct xe_huc *huc);
+int xe_huc_init_post_hwconfig(struct xe_huc *huc);
int xe_huc_upload(struct xe_huc *huc);
int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type);
bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 1fa5cf5eea97..b5e83ea172f3 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -25,6 +25,7 @@
#include "xe_reg_sr.h"
#include "xe_rtp.h"
#include "xe_sched_job.h"
+#include "xe_sriov.h"
#include "xe_tuning.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
@@ -34,6 +35,7 @@ struct engine_info {
const char *name;
unsigned int class : 8;
unsigned int instance : 8;
+ unsigned int irq_offset : 8;
enum xe_force_wake_domains domain;
u32 mmio_base;
};
@@ -43,6 +45,7 @@ static const struct engine_info engine_infos[] = {
.name = "rcs0",
.class = XE_ENGINE_CLASS_RENDER,
.instance = 0,
+ .irq_offset = ilog2(INTR_RCS0),
.domain = XE_FW_RENDER,
.mmio_base = RENDER_RING_BASE,
},
@@ -50,6 +53,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs0",
.class = XE_ENGINE_CLASS_COPY,
.instance = 0,
+ .irq_offset = ilog2(INTR_BCS(0)),
.domain = XE_FW_RENDER,
.mmio_base = BLT_RING_BASE,
},
@@ -57,6 +61,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs1",
.class = XE_ENGINE_CLASS_COPY,
.instance = 1,
+ .irq_offset = ilog2(INTR_BCS(1)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS1_RING_BASE,
},
@@ -64,6 +69,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs2",
.class = XE_ENGINE_CLASS_COPY,
.instance = 2,
+ .irq_offset = ilog2(INTR_BCS(2)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS2_RING_BASE,
},
@@ -71,6 +77,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs3",
.class = XE_ENGINE_CLASS_COPY,
.instance = 3,
+ .irq_offset = ilog2(INTR_BCS(3)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS3_RING_BASE,
},
@@ -78,6 +85,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs4",
.class = XE_ENGINE_CLASS_COPY,
.instance = 4,
+ .irq_offset = ilog2(INTR_BCS(4)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS4_RING_BASE,
},
@@ -85,6 +93,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs5",
.class = XE_ENGINE_CLASS_COPY,
.instance = 5,
+ .irq_offset = ilog2(INTR_BCS(5)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS5_RING_BASE,
},
@@ -92,12 +101,14 @@ static const struct engine_info engine_infos[] = {
.name = "bcs6",
.class = XE_ENGINE_CLASS_COPY,
.instance = 6,
+ .irq_offset = ilog2(INTR_BCS(6)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS6_RING_BASE,
},
[XE_HW_ENGINE_BCS7] = {
.name = "bcs7",
.class = XE_ENGINE_CLASS_COPY,
+ .irq_offset = ilog2(INTR_BCS(7)),
.instance = 7,
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS7_RING_BASE,
@@ -106,6 +117,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs8",
.class = XE_ENGINE_CLASS_COPY,
.instance = 8,
+ .irq_offset = ilog2(INTR_BCS8),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS8_RING_BASE,
},
@@ -114,6 +126,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs0",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 0,
+ .irq_offset = 32 + ilog2(INTR_VCS(0)),
.domain = XE_FW_MEDIA_VDBOX0,
.mmio_base = BSD_RING_BASE,
},
@@ -121,6 +134,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs1",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 1,
+ .irq_offset = 32 + ilog2(INTR_VCS(1)),
.domain = XE_FW_MEDIA_VDBOX1,
.mmio_base = BSD2_RING_BASE,
},
@@ -128,6 +142,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs2",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 2,
+ .irq_offset = 32 + ilog2(INTR_VCS(2)),
.domain = XE_FW_MEDIA_VDBOX2,
.mmio_base = BSD3_RING_BASE,
},
@@ -135,6 +150,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs3",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 3,
+ .irq_offset = 32 + ilog2(INTR_VCS(3)),
.domain = XE_FW_MEDIA_VDBOX3,
.mmio_base = BSD4_RING_BASE,
},
@@ -142,6 +158,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs4",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 4,
+ .irq_offset = 32 + ilog2(INTR_VCS(4)),
.domain = XE_FW_MEDIA_VDBOX4,
.mmio_base = XEHP_BSD5_RING_BASE,
},
@@ -149,6 +166,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs5",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 5,
+ .irq_offset = 32 + ilog2(INTR_VCS(5)),
.domain = XE_FW_MEDIA_VDBOX5,
.mmio_base = XEHP_BSD6_RING_BASE,
},
@@ -156,6 +174,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs6",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 6,
+ .irq_offset = 32 + ilog2(INTR_VCS(6)),
.domain = XE_FW_MEDIA_VDBOX6,
.mmio_base = XEHP_BSD7_RING_BASE,
},
@@ -163,6 +182,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs7",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 7,
+ .irq_offset = 32 + ilog2(INTR_VCS(7)),
.domain = XE_FW_MEDIA_VDBOX7,
.mmio_base = XEHP_BSD8_RING_BASE,
},
@@ -170,6 +190,7 @@ static const struct engine_info engine_infos[] = {
.name = "vecs0",
.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
.instance = 0,
+ .irq_offset = 32 + ilog2(INTR_VECS(0)),
.domain = XE_FW_MEDIA_VEBOX0,
.mmio_base = VEBOX_RING_BASE,
},
@@ -177,6 +198,7 @@ static const struct engine_info engine_infos[] = {
.name = "vecs1",
.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
.instance = 1,
+ .irq_offset = 32 + ilog2(INTR_VECS(1)),
.domain = XE_FW_MEDIA_VEBOX1,
.mmio_base = VEBOX2_RING_BASE,
},
@@ -184,6 +206,7 @@ static const struct engine_info engine_infos[] = {
.name = "vecs2",
.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
.instance = 2,
+ .irq_offset = 32 + ilog2(INTR_VECS(2)),
.domain = XE_FW_MEDIA_VEBOX2,
.mmio_base = XEHP_VEBOX3_RING_BASE,
},
@@ -191,6 +214,7 @@ static const struct engine_info engine_infos[] = {
.name = "vecs3",
.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
.instance = 3,
+ .irq_offset = 32 + ilog2(INTR_VECS(3)),
.domain = XE_FW_MEDIA_VEBOX3,
.mmio_base = XEHP_VEBOX4_RING_BASE,
},
@@ -198,6 +222,7 @@ static const struct engine_info engine_infos[] = {
.name = "ccs0",
.class = XE_ENGINE_CLASS_COMPUTE,
.instance = 0,
+ .irq_offset = ilog2(INTR_CCS(0)),
.domain = XE_FW_RENDER,
.mmio_base = COMPUTE0_RING_BASE,
},
@@ -205,6 +230,7 @@ static const struct engine_info engine_infos[] = {
.name = "ccs1",
.class = XE_ENGINE_CLASS_COMPUTE,
.instance = 1,
+ .irq_offset = ilog2(INTR_CCS(1)),
.domain = XE_FW_RENDER,
.mmio_base = COMPUTE1_RING_BASE,
},
@@ -212,6 +238,7 @@ static const struct engine_info engine_infos[] = {
.name = "ccs2",
.class = XE_ENGINE_CLASS_COMPUTE,
.instance = 2,
+ .irq_offset = ilog2(INTR_CCS(2)),
.domain = XE_FW_RENDER,
.mmio_base = COMPUTE2_RING_BASE,
},
@@ -219,6 +246,7 @@ static const struct engine_info engine_infos[] = {
.name = "ccs3",
.class = XE_ENGINE_CLASS_COMPUTE,
.instance = 3,
+ .irq_offset = ilog2(INTR_CCS(3)),
.domain = XE_FW_RENDER,
.mmio_base = COMPUTE3_RING_BASE,
},
@@ -289,6 +317,19 @@ static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
xe_rtp_match_first_render_or_compute(gt, hwe);
}
+static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ if (GRAPHICS_VER(gt_to_xe(gt)) < 20)
+ return false;
+
+ if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
+ hwe->class != XE_ENGINE_CLASS_RENDER)
+ return false;
+
+ return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
+}
+
void
xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
{
@@ -319,6 +360,14 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
RCU_MODE_FIXED_SLICE_CCS_MODE))
},
+ /* Disable WMTP if HW doesn't support it */
+ { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
+ XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
+ XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
+ PREEMPT_GPGPU_LEVEL_MASK,
+ PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
+ },
{}
};
@@ -397,6 +446,7 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
hwe->class = info->class;
hwe->instance = info->instance;
hwe->mmio_base = info->mmio_base;
+ hwe->irq_offset = info->irq_offset;
hwe->domain = info->domain;
hwe->name = info->name;
hwe->fence_irq = &gt->fence_irq[info->class];
@@ -700,7 +750,7 @@ struct xe_hw_engine_snapshot *
xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
{
struct xe_hw_engine_snapshot *snapshot;
- int len;
+ u64 val;
if (!xe_hw_engine_is_valid(hwe))
return NULL;
@@ -710,11 +760,7 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
if (!snapshot)
return NULL;
- len = strlen(hwe->name) + 1;
- snapshot->name = kzalloc(len, GFP_ATOMIC);
- if (snapshot->name)
- strscpy(snapshot->name, hwe->name, len);
-
+ snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
snapshot->class = hwe->class;
snapshot->logical_instance = hwe->logical_instance;
snapshot->forcewake.domain = hwe->domain;
@@ -722,19 +768,35 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
hwe->domain);
snapshot->mmio_base = hwe->mmio_base;
- snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
- snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe,
- RING_HWS_PGA(0));
- snapshot->reg.ring_execlist_status_lo =
+ /* no more VF accessible data below this point */
+ if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
+ return snapshot;
+
+ snapshot->reg.ring_execlist_status =
hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
- snapshot->reg.ring_execlist_status_hi =
- hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
- snapshot->reg.ring_execlist_sq_contents_lo =
- hw_engine_mmio_read32(hwe,
- RING_EXECLIST_SQ_CONTENTS_LO(0));
- snapshot->reg.ring_execlist_sq_contents_hi =
- hw_engine_mmio_read32(hwe,
- RING_EXECLIST_SQ_CONTENTS_HI(0));
+ val = hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
+ snapshot->reg.ring_execlist_status |= val << 32;
+
+ snapshot->reg.ring_execlist_sq_contents =
+ hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0));
+ val = hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0));
+ snapshot->reg.ring_execlist_sq_contents |= val << 32;
+
+ snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
+ val = hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
+ snapshot->reg.ring_acthd |= val << 32;
+
+ snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
+ val = hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
+ snapshot->reg.ring_bbaddr |= val << 32;
+
+ snapshot->reg.ring_dma_fadd =
+ hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
+ val = hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
+ snapshot->reg.ring_dma_fadd |= val << 32;
+
+ snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
+ snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe, RING_HWS_PGA(0));
snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0));
snapshot->reg.ring_head =
hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
@@ -748,16 +810,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0));
snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0));
snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0));
- snapshot->reg.ring_acthd_udw =
- hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
- snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
- snapshot->reg.ring_bbaddr_udw =
- hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
- snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
- snapshot->reg.ring_dma_fadd_udw =
- hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
- snapshot->reg.ring_dma_fadd =
- hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0));
if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
@@ -786,33 +838,25 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
snapshot->forcewake.domain, snapshot->forcewake.ref);
drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam);
drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga);
- drm_printf(p, "\tRING_EXECLIST_STATUS_LO: 0x%08x\n",
- snapshot->reg.ring_execlist_status_lo);
- drm_printf(p, "\tRING_EXECLIST_STATUS_HI: 0x%08x\n",
- snapshot->reg.ring_execlist_status_hi);
- drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_LO: 0x%08x\n",
- snapshot->reg.ring_execlist_sq_contents_lo);
- drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_HI: 0x%08x\n",
- snapshot->reg.ring_execlist_sq_contents_hi);
+ drm_printf(p, "\tRING_EXECLIST_STATUS: 0x%016llx\n",
+ snapshot->reg.ring_execlist_status);
+ drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS: 0x%016llx\n",
+ snapshot->reg.ring_execlist_sq_contents);
drm_printf(p, "\tRING_START: 0x%08x\n", snapshot->reg.ring_start);
- drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head);
- drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail);
+ drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head);
+ drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail);
drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl);
drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode);
drm_printf(p, "\tRING_MODE: 0x%08x\n",
snapshot->reg.ring_mode);
- drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr);
- drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr);
- drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr);
- drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir);
- drm_printf(p, "\tACTHD: 0x%08x_%08x\n", snapshot->reg.ring_acthd_udw,
- snapshot->reg.ring_acthd);
- drm_printf(p, "\tBBADDR: 0x%08x_%08x\n", snapshot->reg.ring_bbaddr_udw,
- snapshot->reg.ring_bbaddr);
- drm_printf(p, "\tDMA_FADDR: 0x%08x_%08x\n",
- snapshot->reg.ring_dma_fadd_udw,
- snapshot->reg.ring_dma_fadd);
- drm_printf(p, "\tIPEHR: 0x%08x\n\n", snapshot->reg.ipehr);
+ drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr);
+ drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr);
+ drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr);
+ drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir);
+ drm_printf(p, "\tACTHD: 0x%016llx\n", snapshot->reg.ring_acthd);
+ drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr);
+ drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd);
+ drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr);
if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
drm_printf(p, "\tRCU_MODE: 0x%08x\n",
snapshot->reg.rcu_mode);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index e49bc14f0ecf..2345fb42fa39 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -73,7 +73,7 @@ static ssize_t job_timeout_max_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_max);
}
-static struct kobj_attribute job_timeout_max_attr =
+static const struct kobj_attribute job_timeout_max_attr =
__ATTR(job_timeout_max, 0644, job_timeout_max_show, job_timeout_max_store);
static ssize_t job_timeout_min_store(struct kobject *kobj,
@@ -109,7 +109,7 @@ static ssize_t job_timeout_min_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_min);
}
-static struct kobj_attribute job_timeout_min_attr =
+static const struct kobj_attribute job_timeout_min_attr =
__ATTR(job_timeout_min, 0644, job_timeout_min_show, job_timeout_min_store);
static ssize_t job_timeout_store(struct kobject *kobj,
@@ -142,7 +142,7 @@ static ssize_t job_timeout_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_ms);
}
-static struct kobj_attribute job_timeout_attr =
+static const struct kobj_attribute job_timeout_attr =
__ATTR(job_timeout_ms, 0644, job_timeout_show, job_timeout_store);
static ssize_t job_timeout_default(struct kobject *kobj,
@@ -153,7 +153,7 @@ static ssize_t job_timeout_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.job_timeout_ms);
}
-static struct kobj_attribute job_timeout_def =
+static const struct kobj_attribute job_timeout_def =
__ATTR(job_timeout_ms, 0444, job_timeout_default, NULL);
static ssize_t job_timeout_min_default(struct kobject *kobj,
@@ -164,7 +164,7 @@ static ssize_t job_timeout_min_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.job_timeout_min);
}
-static struct kobj_attribute job_timeout_min_def =
+static const struct kobj_attribute job_timeout_min_def =
__ATTR(job_timeout_min, 0444, job_timeout_min_default, NULL);
static ssize_t job_timeout_max_default(struct kobject *kobj,
@@ -175,7 +175,7 @@ static ssize_t job_timeout_max_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.job_timeout_max);
}
-static struct kobj_attribute job_timeout_max_def =
+static const struct kobj_attribute job_timeout_max_def =
__ATTR(job_timeout_max, 0444, job_timeout_max_default, NULL);
static ssize_t timeslice_duration_store(struct kobject *kobj,
@@ -234,7 +234,7 @@ static ssize_t timeslice_duration_max_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.timeslice_max);
}
-static struct kobj_attribute timeslice_duration_max_attr =
+static const struct kobj_attribute timeslice_duration_max_attr =
__ATTR(timeslice_duration_max, 0644, timeslice_duration_max_show,
timeslice_duration_max_store);
@@ -272,7 +272,7 @@ static ssize_t timeslice_duration_min_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.timeslice_min);
}
-static struct kobj_attribute timeslice_duration_min_attr =
+static const struct kobj_attribute timeslice_duration_min_attr =
__ATTR(timeslice_duration_min, 0644, timeslice_duration_min_show,
timeslice_duration_min_store);
@@ -284,7 +284,7 @@ static ssize_t timeslice_duration_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.timeslice_us);
}
-static struct kobj_attribute timeslice_duration_attr =
+static const struct kobj_attribute timeslice_duration_attr =
__ATTR(timeslice_duration_us, 0644, timeslice_duration_show,
timeslice_duration_store);
@@ -296,7 +296,7 @@ static ssize_t timeslice_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.timeslice_us);
}
-static struct kobj_attribute timeslice_duration_def =
+static const struct kobj_attribute timeslice_duration_def =
__ATTR(timeslice_duration_us, 0444, timeslice_default, NULL);
static ssize_t timeslice_min_default(struct kobject *kobj,
@@ -307,7 +307,7 @@ static ssize_t timeslice_min_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.timeslice_min);
}
-static struct kobj_attribute timeslice_duration_min_def =
+static const struct kobj_attribute timeslice_duration_min_def =
__ATTR(timeslice_duration_min, 0444, timeslice_min_default, NULL);
static ssize_t timeslice_max_default(struct kobject *kobj,
@@ -318,7 +318,7 @@ static ssize_t timeslice_max_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.timeslice_max);
}
-static struct kobj_attribute timeslice_duration_max_def =
+static const struct kobj_attribute timeslice_duration_max_def =
__ATTR(timeslice_duration_max, 0444, timeslice_max_default, NULL);
static ssize_t preempt_timeout_store(struct kobject *kobj,
@@ -351,7 +351,7 @@ static ssize_t preempt_timeout_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_us);
}
-static struct kobj_attribute preempt_timeout_attr =
+static const struct kobj_attribute preempt_timeout_attr =
__ATTR(preempt_timeout_us, 0644, preempt_timeout_show, preempt_timeout_store);
static ssize_t preempt_timeout_default(struct kobject *kobj,
@@ -363,7 +363,7 @@ static ssize_t preempt_timeout_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_us);
}
-static struct kobj_attribute preempt_timeout_def =
+static const struct kobj_attribute preempt_timeout_def =
__ATTR(preempt_timeout_us, 0444, preempt_timeout_default, NULL);
static ssize_t preempt_timeout_min_default(struct kobject *kobj,
@@ -375,7 +375,7 @@ static ssize_t preempt_timeout_min_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_min);
}
-static struct kobj_attribute preempt_timeout_min_def =
+static const struct kobj_attribute preempt_timeout_min_def =
__ATTR(preempt_timeout_min, 0444, preempt_timeout_min_default, NULL);
static ssize_t preempt_timeout_max_default(struct kobject *kobj,
@@ -387,7 +387,7 @@ static ssize_t preempt_timeout_max_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_max);
}
-static struct kobj_attribute preempt_timeout_max_def =
+static const struct kobj_attribute preempt_timeout_max_def =
__ATTR(preempt_timeout_max, 0444, preempt_timeout_max_default, NULL);
static ssize_t preempt_timeout_max_store(struct kobject *kobj,
@@ -423,7 +423,7 @@ static ssize_t preempt_timeout_max_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_max);
}
-static struct kobj_attribute preempt_timeout_max_attr =
+static const struct kobj_attribute preempt_timeout_max_attr =
__ATTR(preempt_timeout_max, 0644, preempt_timeout_max_show,
preempt_timeout_max_store);
@@ -460,7 +460,7 @@ static ssize_t preempt_timeout_min_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_min);
}
-static struct kobj_attribute preempt_timeout_min_attr =
+static const struct kobj_attribute preempt_timeout_min_attr =
__ATTR(preempt_timeout_min, 0644, preempt_timeout_min_show,
preempt_timeout_min_store);
@@ -477,7 +477,7 @@ static const struct attribute *defaults[] = {
NULL
};
-static const struct attribute *files[] = {
+static const struct attribute * const files[] = {
&job_timeout_attr.attr,
&job_timeout_min_attr.attr,
&job_timeout_max_attr.attr,
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index 39908dec042a..d7f828c76cc5 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -79,23 +79,23 @@ struct xe_hw_engine_class_intf {
* @defaults: default scheduling properties
*/
struct {
- /** @set_job_timeout: Set job timeout in ms for engine */
+ /** @sched_props.set_job_timeout: Set job timeout in ms for engine */
u32 job_timeout_ms;
- /** @job_timeout_min: Min job timeout in ms for engine */
+ /** @sched_props.job_timeout_min: Min job timeout in ms for engine */
u32 job_timeout_min;
- /** @job_timeout_max: Max job timeout in ms for engine */
+ /** @sched_props.job_timeout_max: Max job timeout in ms for engine */
u32 job_timeout_max;
- /** @timeslice_us: timeslice period in micro-seconds */
+ /** @sched_props.timeslice_us: timeslice period in micro-seconds */
u32 timeslice_us;
- /** @timeslice_min: min timeslice period in micro-seconds */
+ /** @sched_props.timeslice_min: min timeslice period in micro-seconds */
u32 timeslice_min;
- /** @timeslice_max: max timeslice period in micro-seconds */
+ /** @sched_props.timeslice_max: max timeslice period in micro-seconds */
u32 timeslice_max;
- /** @preempt_timeout_us: preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
- /** @preempt_timeout_min: min preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_min: min preemption timeout in micro-seconds */
u32 preempt_timeout_min;
- /** @preempt_timeout_max: max preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_max: max preemption timeout in micro-seconds */
u32 preempt_timeout_max;
} sched_props, defaults;
};
@@ -116,6 +116,8 @@ struct xe_hw_engine {
u16 instance;
/** @logical_instance: logical instance of this hw engine */
u16 logical_instance;
+ /** @irq_offset: IRQ offset of this hw engine */
+ u16 irq_offset;
/** @mmio_base: MMIO base address of this hw engine*/
u32 mmio_base;
/**
@@ -162,62 +164,52 @@ struct xe_hw_engine_snapshot {
u16 logical_instance;
/** @forcewake: Force Wake information snapshot */
struct {
- /** @domain: force wake domain of this hw engine */
+ /** @forcewake.domain: force wake domain of this hw engine */
enum xe_force_wake_domains domain;
- /** @ref: Forcewake ref for the above domain */
+ /** @forcewake.ref: Forcewake ref for the above domain */
int ref;
} forcewake;
/** @mmio_base: MMIO base address of this hw engine*/
u32 mmio_base;
/** @reg: Useful MMIO register snapshot */
struct {
- /** @ring_hwstam: RING_HWSTAM */
+ /** @reg.ring_execlist_status: RING_EXECLIST_STATUS */
+ u64 ring_execlist_status;
+ /** @reg.ring_execlist_sq_contents: RING_EXECLIST_SQ_CONTENTS */
+ u64 ring_execlist_sq_contents;
+ /** @reg.ring_acthd: RING_ACTHD */
+ u64 ring_acthd;
+ /** @reg.ring_bbaddr: RING_BBADDR */
+ u64 ring_bbaddr;
+ /** @reg.ring_dma_fadd: RING_DMA_FADD */
+ u64 ring_dma_fadd;
+ /** @reg.ring_hwstam: RING_HWSTAM */
u32 ring_hwstam;
- /** @ring_hws_pga: RING_HWS_PGA */
+ /** @reg.ring_hws_pga: RING_HWS_PGA */
u32 ring_hws_pga;
- /** @ring_execlist_status_lo: RING_EXECLIST_STATUS_LO */
- u32 ring_execlist_status_lo;
- /** @ring_execlist_status_hi: RING_EXECLIST_STATUS_HI */
- u32 ring_execlist_status_hi;
- /** @ring_execlist_sq_contents_lo: RING_EXECLIST_SQ_CONTENTS */
- u32 ring_execlist_sq_contents_lo;
- /** @ring_execlist_sq_contents_hi: RING_EXECLIST_SQ_CONTENTS + 4 */
- u32 ring_execlist_sq_contents_hi;
- /** @ring_start: RING_START */
+ /** @reg.ring_start: RING_START */
u32 ring_start;
- /** @ring_head: RING_HEAD */
+ /** @reg.ring_head: RING_HEAD */
u32 ring_head;
- /** @ring_tail: RING_TAIL */
+ /** @reg.ring_tail: RING_TAIL */
u32 ring_tail;
- /** @ring_ctl: RING_CTL */
+ /** @reg.ring_ctl: RING_CTL */
u32 ring_ctl;
- /** @ring_mi_mode: RING_MI_MODE */
+ /** @reg.ring_mi_mode: RING_MI_MODE */
u32 ring_mi_mode;
- /** @ring_mode: RING_MODE */
+ /** @reg.ring_mode: RING_MODE */
u32 ring_mode;
- /** @ring_imr: RING_IMR */
+ /** @reg.ring_imr: RING_IMR */
u32 ring_imr;
- /** @ring_esr: RING_ESR */
+ /** @reg.ring_esr: RING_ESR */
u32 ring_esr;
- /** @ring_emr: RING_EMR */
+ /** @reg.ring_emr: RING_EMR */
u32 ring_emr;
- /** @ring_eir: RING_EIR */
+ /** @reg.ring_eir: RING_EIR */
u32 ring_eir;
- /** @ring_acthd_udw: RING_ACTHD_UDW */
- u32 ring_acthd_udw;
- /** @ring_acthd: RING_ACTHD */
- u32 ring_acthd;
- /** @ring_bbaddr_udw: RING_BBADDR_UDW */
- u32 ring_bbaddr_udw;
- /** @ring_bbaddr: RING_BBADDR */
- u32 ring_bbaddr;
- /** @ring_dma_fadd_udw: RING_DMA_FADD_UDW */
- u32 ring_dma_fadd_udw;
- /** @ring_dma_fadd: RING_DMA_FADD */
- u32 ring_dma_fadd;
- /** @ipehr: IPEHR */
+ /** @reg.ipehr: IPEHR */
u32 ipehr;
- /** @rcu_mode: RCU_MODE */
+ /** @reg.rcu_mode: RCU_MODE */
u32 rcu_mode;
} reg;
};
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 174ed2185481..b82233a41606 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -10,12 +10,14 @@
#include <drm/drm_managed.h>
#include "regs/xe_gt_regs.h"
#include "regs/xe_mchbar_regs.h"
+#include "regs/xe_pcode_regs.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hwmon.h"
#include "xe_mmio.h"
#include "xe_pcode.h"
#include "xe_pcode_api.h"
+#include "xe_sriov.h"
enum xe_hwmon_reg {
REG_PKG_RAPL_LIMIT,
@@ -77,32 +79,32 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
switch (hwmon_reg) {
case REG_PKG_RAPL_LIMIT:
- if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_RAPL_LIMIT;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
reg = PVC_GT0_PACKAGE_RAPL_LIMIT;
+ else if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_RAPL_LIMIT;
break;
case REG_PKG_POWER_SKU:
- if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_POWER_SKU;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
reg = PVC_GT0_PACKAGE_POWER_SKU;
+ else if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_POWER_SKU;
break;
case REG_PKG_POWER_SKU_UNIT:
- if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
reg = PVC_GT0_PACKAGE_POWER_SKU_UNIT;
+ else if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
break;
case REG_GT_PERF_STATUS:
if (xe->info.platform == XE_DG2)
reg = GT_PERF_STATUS;
break;
case REG_PKG_ENERGY_STATUS:
- if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_ENERGY_STATUS;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
reg = PVC_GT0_PLATFORM_ENERGY_STATUS;
+ else if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_ENERGY_STATUS;
break;
default:
drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
@@ -402,7 +404,7 @@ static const struct attribute_group *hwmon_groups[] = {
NULL
};
-static const struct hwmon_channel_info *hwmon_info[] = {
+static const struct hwmon_channel_info * const hwmon_info[] = {
HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
@@ -745,6 +747,10 @@ void xe_hwmon_register(struct xe_device *xe)
if (!IS_DGFX(xe))
return;
+ /* hwmon is not available on VFs */
+ if (IS_SRIOV_VF(xe))
+ return;
+
hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
return;
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index d1f5ba4bb745..2f5d179e0d00 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -9,15 +9,18 @@
#include <drm/drm_managed.h>
+#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_device.h"
-#include "xe_display.h"
#include "xe_drv.h"
+#include "xe_gsc_proxy.h"
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
+#include "xe_memirq.h"
#include "xe_mmio.h"
+#include "xe_sriov.h"
/*
* Interrupt registers for a unit are always consecutive and ordered
@@ -129,6 +132,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
u32 ccs_mask, bcs_mask;
u32 irqs, dmask, smask;
u32 gsc_mask = 0;
+ u32 heci_mask = 0;
if (xe_device_uc_enabled(xe)) {
irqs = GT_RENDER_USER_INTERRUPT |
@@ -178,14 +182,23 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask);
xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask);
- if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER))
+ /*
+ * the heci2 interrupt is enabled via the same register as the
+ * GSCCS interrupts, but it has its own mask register.
+ */
+ if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
gsc_mask = irqs;
- else if (HAS_HECI_GSCFI(xe))
+ heci_mask = GSC_IRQ_INTF(1);
+ } else if (HAS_HECI_GSCFI(xe)) {
gsc_mask = GSC_IRQ_INTF(1);
+ }
+
if (gsc_mask) {
- xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask);
+ xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask);
}
+ if (heci_mask)
+ xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
}
}
@@ -232,6 +245,8 @@ gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
return xe_guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
return xe_guc_irq_handler(&gt->uc.guc, iir);
+ if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
+ return xe_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
if (instance != OTHER_GUC_INSTANCE &&
instance != OTHER_MEDIA_GUC_INSTANCE) {
@@ -249,15 +264,23 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
if (MEDIA_VER(xe) < 13)
return tile->primary_gt;
- if (class == XE_ENGINE_CLASS_VIDEO_DECODE ||
- class == XE_ENGINE_CLASS_VIDEO_ENHANCE)
+ switch (class) {
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
return tile->media_gt;
-
- if (class == XE_ENGINE_CLASS_OTHER &&
- (instance == OTHER_MEDIA_GUC_INSTANCE || instance == OTHER_GSC_INSTANCE))
- return tile->media_gt;
-
- return tile->primary_gt;
+ case XE_ENGINE_CLASS_OTHER:
+ switch (instance) {
+ case OTHER_MEDIA_GUC_INSTANCE:
+ case OTHER_GSC_INSTANCE:
+ case OTHER_GSC_HECI2_INSTANCE:
+ return tile->media_gt;
+ default:
+ break;
+ };
+ fallthrough;
+ default:
+ return tile->primary_gt;
+ }
}
static void gt_irq_handler(struct xe_tile *tile,
@@ -419,7 +442,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* irq as device is inaccessible.
*/
if (master_ctl == REG_GENMASK(31, 0)) {
- dev_dbg(tile_to_xe(tile)->drm.dev,
+ drm_dbg(&tile_to_xe(tile)->drm,
"Ignore this IRQ as device might be in DPC containment.\n");
return IRQ_HANDLED;
}
@@ -484,6 +507,7 @@ static void gt_irq_reset(struct xe_tile *tile)
HAS_HECI_GSCFI(tile_to_xe(tile))) {
xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
}
xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
@@ -498,6 +522,9 @@ static void xelp_irq_reset(struct xe_tile *tile)
gt_irq_reset(tile);
+ if (IS_SRIOV_VF(tile_to_xe(tile)))
+ return;
+
mask_and_disable(tile, PCU_IRQ_OFFSET);
}
@@ -508,6 +535,9 @@ static void dg1_irq_reset(struct xe_tile *tile)
gt_irq_reset(tile);
+ if (IS_SRIOV_VF(tile_to_xe(tile)))
+ return;
+
mask_and_disable(tile, PCU_IRQ_OFFSET);
}
@@ -518,11 +548,34 @@ static void dg1_irq_reset_mstr(struct xe_tile *tile)
xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
}
+static void vf_irq_reset(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ if (GRAPHICS_VERx100(xe) < 1210)
+ xelp_intr_disable(xe);
+ else
+ xe_assert(xe, xe_device_has_memirq(xe));
+
+ for_each_tile(tile, xe, id) {
+ if (xe_device_has_memirq(xe))
+ xe_memirq_reset(&tile->sriov.vf.memirq);
+ else
+ gt_irq_reset(tile);
+ }
+}
+
static void xe_irq_reset(struct xe_device *xe)
{
struct xe_tile *tile;
u8 id;
+ if (IS_SRIOV_VF(xe))
+ return vf_irq_reset(xe);
+
for_each_tile(tile, xe, id) {
if (GRAPHICS_VERx100(xe) >= 1210)
dg1_irq_reset(tile);
@@ -545,8 +598,26 @@ static void xe_irq_reset(struct xe_device *xe)
}
}
+static void vf_irq_postinstall(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ for_each_tile(tile, xe, id)
+ if (xe_device_has_memirq(xe))
+ xe_memirq_postinstall(&tile->sriov.vf.memirq);
+
+ if (GRAPHICS_VERx100(xe) < 1210)
+ xelp_intr_enable(xe, true);
+ else
+ xe_assert(xe, xe_device_has_memirq(xe));
+}
+
static void xe_irq_postinstall(struct xe_device *xe)
{
+ if (IS_SRIOV_VF(xe))
+ return vf_irq_postinstall(xe);
+
xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
/*
@@ -563,8 +634,30 @@ static void xe_irq_postinstall(struct xe_device *xe)
xelp_intr_enable(xe, true);
}
+static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ unsigned int id;
+
+ spin_lock(&xe->irq.lock);
+ if (!xe->irq.enabled) {
+ spin_unlock(&xe->irq.lock);
+ return IRQ_NONE;
+ }
+ spin_unlock(&xe->irq.lock);
+
+ for_each_tile(tile, xe, id)
+ xe_memirq_handler(&tile->sriov.vf.memirq);
+
+ return IRQ_HANDLED;
+}
+
static irq_handler_t xe_irq_handler(struct xe_device *xe)
{
+ if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
+ return vf_mem_irq_handler;
+
if (GRAPHICS_VERx100(xe) >= 1210)
return dg1_irq_handler;
else
@@ -590,8 +683,9 @@ static void irq_uninstall(struct drm_device *drm, void *arg)
int xe_irq_install(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ unsigned int irq_flags = PCI_IRQ_MSIX;
irq_handler_t irq_handler;
- int err, irq;
+ int err, irq, nvec;
irq_handler = xe_irq_handler(xe);
if (!irq_handler) {
@@ -601,7 +695,19 @@ int xe_irq_install(struct xe_device *xe)
xe_irq_reset(xe);
- err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ nvec = pci_msix_vec_count(pdev);
+ if (nvec <= 0) {
+ if (nvec == -EINVAL) {
+ /* MSIX capability is not supported in the device, using MSI */
+ irq_flags = PCI_IRQ_MSI;
+ nvec = 1;
+ } else {
+ drm_err(&xe->drm, "MSIX: Failed getting count\n");
+ return nvec;
+ }
+ }
+
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
if (err < 0) {
drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
return err;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 0ec5ad2539f1..8c85e90220de 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -19,6 +19,8 @@
#include "xe_gt_printk.h"
#include "xe_hw_fence.h"
#include "xe_map.h"
+#include "xe_memirq.h"
+#include "xe_sriov.h"
#include "xe_vm.h"
#define LRC_VALID (1 << 0)
@@ -532,6 +534,27 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
/* TODO: Timestamp */
}
+static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
+{
+ struct xe_memirq *memirq = &gt_to_tile(hwe->gt)->sriov.vf.memirq;
+ struct xe_device *xe = gt_to_xe(hwe->gt);
+
+ if (!IS_SRIOV_VF(xe) || !xe_device_has_memirq(xe))
+ return;
+
+ regs[CTX_LRM_INT_MASK_ENABLE] = MI_LOAD_REGISTER_MEM |
+ MI_LRI_LRM_CS_MMIO | MI_LRM_USE_GGTT;
+ regs[CTX_INT_MASK_ENABLE_REG] = RING_IMR(0).addr;
+ regs[CTX_INT_MASK_ENABLE_PTR] = xe_memirq_enable_ptr(memirq);
+
+ regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED;
+ regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr;
+ regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq);
+ regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr;
+ regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq);
+}
+
static int lrc_ring_mi_mode(struct xe_hw_engine *hwe)
{
struct xe_device *xe = gt_to_xe(hwe->gt);
@@ -667,6 +690,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
regs = data + LRC_PPHWSP_SIZE;
set_offsets(regs, reg_offsets(xe, hwe->class), hwe);
set_context_control(regs, hwe);
+ set_memory_based_intr(regs, hwe);
reset_stop_ring(regs, hwe);
return data;
@@ -964,6 +988,20 @@ static int dump_mi_command(struct drm_printer *p,
drm_printf(p, " - %#6x = %#010x\n", dw[i], dw[i + 1]);
return numdw;
+ case MI_LOAD_REGISTER_MEM & MI_OPCODE:
+ drm_printf(p, "[%#010x] MI_LOAD_REGISTER_MEM: %s%s\n",
+ inst_header,
+ dw[0] & MI_LRI_LRM_CS_MMIO ? "CS_MMIO " : "",
+ dw[0] & MI_LRM_USE_GGTT ? "USE_GGTT " : "");
+ if (numdw == 4)
+ drm_printf(p, " - %#6x = %#010llx\n",
+ dw[1], ((u64)(dw[3]) << 32 | (u64)(dw[2])));
+ else
+ drm_printf(p, " - %*ph (%s)\n",
+ (int)sizeof(u32) * (numdw - 1), dw + 1,
+ numdw < 4 ? "truncated" : "malformed");
+ return numdw;
+
case MI_FORCE_WAKEUP:
drm_printf(p, "[%#010x] MI_FORCE_WAKEUP\n", inst_header);
return numdw;
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 78220336062c..24f20ed66fd1 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -28,11 +28,11 @@ struct xe_lrc {
/** @ring: submission ring state */
struct {
- /** @size: size of submission ring */
+ /** @ring.size: size of submission ring */
u32 size;
- /** @tail: tail of submission ring */
+ /** @ring.tail: tail of submission ring */
u32 tail;
- /** @old_tail: shadow of tail */
+ /** @ring.old_tail: shadow of tail */
u32 old_tail;
} ring;
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
new file mode 100644
index 000000000000..76e95535d7f6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "regs/xe_regs.h"
+
+#include "xe_assert.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_gt.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_hw_engine.h"
+#include "xe_map.h"
+#include "xe_memirq.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+
+#define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition)
+#define memirq_debug(m, msg...) xe_sriov_dbg_verbose(memirq_to_xe(m), "MEMIRQ: " msg)
+
+static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq)
+{
+ return container_of(memirq, struct xe_tile, sriov.vf.memirq);
+}
+
+static struct xe_device *memirq_to_xe(struct xe_memirq *memirq)
+{
+ return tile_to_xe(memirq_to_tile(memirq));
+}
+
+static const char *guc_name(struct xe_guc *guc)
+{
+ return xe_gt_is_media_type(guc_to_gt(guc)) ? "media GuC" : "GuC";
+}
+
+/**
+ * DOC: Memory Based Interrupts
+ *
+ * MMIO register based interrupts infrastructure used for non-virtualized mode
+ * or SRIOV-8 (which supports 8 Virtual Functions) does not scale efficiently
+ * to allow delivering interrupts to a large number of Virtual machines or
+ * containers. Memory based interrupt status reporting provides an efficient
+ * and scalable infrastructure.
+ *
+ * For memory based interrupt status reporting hardware sequence is:
+ * * Engine writes the interrupt event to memory
+ * (Pointer to memory location is provided by SW. This memory surface must
+ * be mapped to system memory and must be marked as un-cacheable (UC) on
+ * Graphics IP Caches)
+ * * Engine triggers an interrupt to host.
+ */
+
+/**
+ * DOC: Memory Based Interrupts Page Layout
+ *
+ * `Memory Based Interrupts`_ requires three different objects, which are
+ * called "page" in the specs, even if they aren't page-sized or aligned.
+ *
+ * To simplify the code we allocate a single page size object and then use
+ * offsets to embedded "pages". The address of those "pages" are then
+ * programmed in the HW via LRI and LRM in the context image.
+ *
+ * - _`Interrupt Status Report Page`: this page contains the interrupt
+ * status vectors for each unit. Each bit in the interrupt vectors is
+ * converted to a byte, with the byte being set to 0xFF when an
+ * interrupt is triggered; interrupt vectors are 16b big so each unit
+ * gets 16B. One space is reserved for each bit in one of the
+ * GT_INTR_DWx registers, so this object needs a total of 1024B.
+ * This object needs to be 4KiB aligned.
+ *
+ * - _`Interrupt Source Report Page`: this is the equivalent of the
+ * GEN11_GT_INTR_DWx registers, with each bit in those registers being
+ * mapped to a byte here. The offsets are the same, just bytes instead
+ * of bits. This object needs to be cacheline aligned.
+ *
+ * - Interrupt Mask: the HW needs a location to fetch the interrupt
+ * mask vector to be used by the LRM in the context, so we just use
+ * the next available space in the interrupt page.
+ *
+ * ::
+ *
+ * 0x0000 +===========+ <== Interrupt Status Report Page
+ * | |
+ * | | ____ +----+----------------+
+ * | | / | 0 | USER INTERRUPT |
+ * +-----------+ __/ | 1 | |
+ * | HWE(n) | __ | | CTX SWITCH |
+ * +-----------+ \ | | WAIT SEMAPHORE |
+ * | | \____ | 15 | |
+ * | | +----+----------------+
+ * | |
+ * 0x0400 +===========+ <== Interrupt Source Report Page
+ * | HWE(0) |
+ * | HWE(1) |
+ * | |
+ * | HWE(x) |
+ * 0x0440 +===========+ <== Interrupt Enable Mask
+ * | |
+ * | |
+ * +-----------+
+ */
+
+static void __release_xe_bo(struct drm_device *drm, void *arg)
+{
+ struct xe_bo *bo = arg;
+
+ xe_bo_unpin_map_no_vm(bo);
+}
+
+static int memirq_alloc_pages(struct xe_memirq *memirq)
+{
+ struct xe_device *xe = memirq_to_xe(memirq);
+ struct xe_tile *tile = memirq_to_tile(memirq);
+ struct xe_bo *bo;
+ int err;
+
+ BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET, SZ_64));
+ BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET, SZ_4K));
+
+ /* XXX: convert to managed bo */
+ bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_SYSTEM_BIT |
+ XE_BO_CREATE_GGTT_BIT |
+ XE_BO_NEEDS_UC |
+ XE_BO_NEEDS_CPU_ACCESS);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto out;
+ }
+
+ memirq_assert(memirq, !xe_bo_is_vram(bo));
+ memirq_assert(memirq, !memirq->bo);
+
+ iosys_map_memset(&bo->vmap, 0, 0, SZ_4K);
+
+ memirq->bo = bo;
+ memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET);
+ memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET);
+ memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET);
+
+ memirq_assert(memirq, !memirq->source.is_iomem);
+ memirq_assert(memirq, !memirq->status.is_iomem);
+ memirq_assert(memirq, !memirq->mask.is_iomem);
+
+ memirq_debug(memirq, "page offsets: source %#x status %#x\n",
+ xe_memirq_source_ptr(memirq), xe_memirq_status_ptr(memirq));
+
+ return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo);
+
+out:
+ xe_sriov_err(memirq_to_xe(memirq),
+ "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
+ return err;
+}
+
+static void memirq_set_enable(struct xe_memirq *memirq, bool enable)
+{
+ iosys_map_wr(&memirq->mask, 0, u32, enable ? GENMASK(15, 0) : 0);
+
+ memirq->enabled = enable;
+}
+
+/**
+ * xe_memirq_init - Initialize data used by `Memory Based Interrupts`_.
+ * @memirq: the &xe_memirq to initialize
+ *
+ * Allocate `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
+ * used by `Memory Based Interrupts`_.
+ *
+ * These allocations are managed and will be implicitly released on unload.
+ *
+ * Note: This function shall be called only by the VF driver.
+ *
+ * If this function fails then VF driver won't be able to operate correctly.
+ * If `Memory Based Interrupts`_ are not used this function will return 0.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_memirq_init(struct xe_memirq *memirq)
+{
+ struct xe_device *xe = memirq_to_xe(memirq);
+ int err;
+
+ memirq_assert(memirq, IS_SRIOV_VF(xe));
+
+ if (!xe_device_has_memirq(xe))
+ return 0;
+
+ err = memirq_alloc_pages(memirq);
+ if (unlikely(err))
+ return err;
+
+ /* we need to start with all irqs enabled */
+ memirq_set_enable(memirq, true);
+
+ return 0;
+}
+
+/**
+ * xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_.
+ * @memirq: the &xe_memirq to query
+ *
+ * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * and xe_memirq_init() didn't fail.
+ *
+ * Return: GGTT's offset of the `Interrupt Source Report Page`_.
+ */
+u32 xe_memirq_source_ptr(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, memirq->bo);
+
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET;
+}
+
+/**
+ * xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_.
+ * @memirq: the &xe_memirq to query
+ *
+ * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * and xe_memirq_init() didn't fail.
+ *
+ * Return: GGTT's offset of the `Interrupt Status Report Page`_.
+ */
+u32 xe_memirq_status_ptr(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, memirq->bo);
+
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET;
+}
+
+/**
+ * xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask.
+ * @memirq: the &xe_memirq to query
+ *
+ * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * and xe_memirq_init() didn't fail.
+ *
+ * Return: GGTT's offset of the Interrupt Enable Mask.
+ */
+u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, memirq->bo);
+
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET;
+}
+
+/**
+ * xe_memirq_init_guc - Prepare GuC for `Memory Based Interrupts`_.
+ * @memirq: the &xe_memirq
+ * @guc: the &xe_guc to setup
+ *
+ * Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
+ * to be used by the GuC when `Memory Based Interrupts`_ are required.
+ *
+ * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * and xe_memirq_init() didn't fail.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
+{
+ bool is_media = xe_gt_is_media_type(guc_to_gt(guc));
+ u32 offset = is_media ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
+ u32 source, status;
+ int err;
+
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, memirq->bo);
+
+ source = xe_memirq_source_ptr(memirq) + offset;
+ status = xe_memirq_status_ptr(memirq) + offset * SZ_16;
+
+ err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY,
+ source);
+ if (unlikely(err))
+ goto failed;
+
+ err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_KEY,
+ status);
+ if (unlikely(err))
+ goto failed;
+
+ return 0;
+
+failed:
+ xe_sriov_err(memirq_to_xe(memirq),
+ "Failed to setup report pages in %s (%pe)\n",
+ guc_name(guc), ERR_PTR(err));
+ return err;
+}
+
+/**
+ * xe_memirq_reset - Disable processing of `Memory Based Interrupts`_.
+ * @memirq: struct xe_memirq
+ *
+ * This is part of the driver IRQ setup flow.
+ *
+ * This function shall only be used by the VF driver on platforms that use
+ * `Memory Based Interrupts`_.
+ */
+void xe_memirq_reset(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+
+ if (memirq->bo)
+ memirq_set_enable(memirq, false);
+}
+
+/**
+ * xe_memirq_postinstall - Enable processing of `Memory Based Interrupts`_.
+ * @memirq: the &xe_memirq
+ *
+ * This is part of the driver IRQ setup flow.
+ *
+ * This function shall only be used by the VF driver on platforms that use
+ * `Memory Based Interrupts`_.
+ */
+void xe_memirq_postinstall(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+
+ if (memirq->bo)
+ memirq_set_enable(memirq, true);
+}
+
+static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
+ u16 offset, const char *name)
+{
+ u8 value;
+
+ value = iosys_map_rd(vector, offset, u8);
+ if (value) {
+ if (value != 0xff)
+ xe_sriov_err_ratelimited(memirq_to_xe(memirq),
+ "Unexpected memirq value %#x from %s at %u\n",
+ value, name, offset);
+ iosys_map_wr(vector, offset, u8, 0x00);
+ }
+
+ return value;
+}
+
+static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
+ struct xe_hw_engine *hwe)
+{
+ memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
+
+ if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name))
+ xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT);
+}
+
+static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status,
+ struct xe_guc *guc)
+{
+ const char *name = guc_name(guc);
+
+ memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, status->vaddr);
+
+ if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
+ xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
+}
+
+/**
+ * xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
+ * @memirq: the &xe_memirq
+ *
+ * This function reads and dispatches `Memory Based Interrupts`.
+ */
+void xe_memirq_handler(struct xe_memirq *memirq)
+{
+ struct xe_device *xe = memirq_to_xe(memirq);
+ struct xe_tile *tile = memirq_to_tile(memirq);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct iosys_map map;
+ unsigned int gtid;
+ struct xe_gt *gt;
+
+ if (!memirq->bo)
+ return;
+
+ memirq_assert(memirq, !memirq->source.is_iomem);
+ memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr);
+ memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr + 32);
+
+ for_each_gt(gt, xe, gtid) {
+ if (gt->tile != tile)
+ continue;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (memirq_received(memirq, &memirq->source, hwe->irq_offset, "SRC")) {
+ map = IOSYS_MAP_INIT_OFFSET(&memirq->status,
+ hwe->irq_offset * SZ_16);
+ memirq_dispatch_engine(memirq, &map, hwe);
+ }
+ }
+ }
+
+ /* GuC and media GuC (if present) must be checked separately */
+
+ if (memirq_received(memirq, &memirq->source, ilog2(INTR_GUC), "SRC")) {
+ map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_GUC) * SZ_16);
+ memirq_dispatch_guc(memirq, &map, &tile->primary_gt->uc.guc);
+ }
+
+ if (!tile->media_gt)
+ return;
+
+ if (memirq_received(memirq, &memirq->source, ilog2(INTR_MGUC), "SRC")) {
+ map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_MGUC) * SZ_16);
+ memirq_dispatch_guc(memirq, &map, &tile->media_gt->uc.guc);
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h
new file mode 100644
index 000000000000..2d40d03c3095
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_memirq.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MEMIRQ_H_
+#define _XE_MEMIRQ_H_
+
+#include <linux/types.h>
+
+struct xe_guc;
+struct xe_memirq;
+
+int xe_memirq_init(struct xe_memirq *memirq);
+
+u32 xe_memirq_source_ptr(struct xe_memirq *memirq);
+u32 xe_memirq_status_ptr(struct xe_memirq *memirq);
+u32 xe_memirq_enable_ptr(struct xe_memirq *memirq);
+
+void xe_memirq_reset(struct xe_memirq *memirq);
+void xe_memirq_postinstall(struct xe_memirq *memirq);
+void xe_memirq_handler(struct xe_memirq *memirq);
+
+int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_memirq_types.h b/drivers/gpu/drm/xe/xe_memirq_types.h
new file mode 100644
index 000000000000..625b6b8736cc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_memirq_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MEMIRQ_TYPES_H_
+#define _XE_MEMIRQ_TYPES_H_
+
+#include <linux/iosys-map.h>
+
+struct xe_bo;
+
+/* ISR */
+#define XE_MEMIRQ_STATUS_OFFSET 0x0
+/* IIR */
+#define XE_MEMIRQ_SOURCE_OFFSET 0x400
+/* IMR */
+#define XE_MEMIRQ_ENABLE_OFFSET 0x440
+
+/**
+ * struct xe_memirq - Data used by the `Memory Based Interrupts`_.
+ *
+ * @bo: buffer object with `Memory Based Interrupts Page Layout`_.
+ * @source: iosys pointer to `Interrupt Source Report Page`_.
+ * @status: iosys pointer to `Interrupt Status Report Page`_.
+ * @mask: iosys pointer to Interrupt Enable Mask.
+ * @enabled: internal flag used to control processing of the interrupts.
+ */
+struct xe_memirq {
+ struct xe_bo *bo;
+ struct iosys_map source;
+ struct iosys_map status;
+ struct iosys_map mask;
+ bool enabled;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 70480c305602..a66fdf2d2991 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -12,7 +12,8 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
-#include "generated/xe_wa_oob.h"
+#include <generated/xe_wa_oob.h>
+
#include "instructions/xe_mi_commands.h"
#include "regs/xe_gpu_commands.h"
#include "tests/xe_test.h"
@@ -71,6 +72,16 @@ struct xe_migrate {
#define NUM_KERNEL_PDE 17
#define NUM_PT_SLOTS 32
#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
+#define MAX_NUM_PTE 512
+
+/*
+ * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
+ * legal value accepted. Since that instruction field is always stored in
+ * (val-2) format, this translates to 0x400 dwords for the true maximum length
+ * of the instruction. Subtracting the instruction header (1 dword) and
+ * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
+ */
+#define MAX_PTE_PER_SDI 0x1FE
/**
* xe_tile_migrate_engine() - Get this tile's migrate engine.
@@ -360,7 +371,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
- EXEC_QUEUE_FLAG_HIGH_PRIORITY);
+ EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
} else {
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
@@ -457,7 +468,7 @@ static u32 pte_update_size(struct xe_migrate *m,
*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
/* MI_STORE_DATA_IMM */
- cmds += 3 * DIV_ROUND_UP(num_4k_pages, 0x1ff);
+ cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
/* PDE qwords */
cmds += num_4k_pages * 2;
@@ -492,7 +503,7 @@ static void emit_pte(struct xe_migrate *m,
ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
while (ptes) {
- u32 chunk = min(0x1ffU, ptes);
+ u32 chunk = min(MAX_PTE_PER_SDI, ptes);
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = ofs;
@@ -1111,7 +1122,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
* This shouldn't be possible in practice.. might change when 16K
* pages are used. Hence the assert.
*/
- xe_tile_assert(tile, update->qwords <= 0x1ff);
+ xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
if (!ppgtt_ofs)
ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
xe_bo_addr(update->pt_bo, 0,
@@ -1120,7 +1131,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
do {
u64 addr = ppgtt_ofs + ofs * 8;
- chunk = min(update->qwords, 0x1ffU);
+ chunk = min(size, MAX_PTE_PER_SDI);
/* Ensure populatefn can do memset64 by aligning bb->cs */
if (!(bb->len & 1))
@@ -1299,7 +1310,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
batch_size = 6 + num_updates * 2;
for (i = 0; i < num_updates; i++) {
- u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, 0x1ff);
+ u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
/* align noop + MI_STORE_DATA_IMM cmd prefix */
batch_size += 4 * num_cmds + updates[i].qwords * 2;
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 5f6b53ea5528..e3db3a178760 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -20,6 +20,7 @@
#include "xe_gt_mcr.h"
#include "xe_macros.h"
#include "xe_module.h"
+#include "xe_sriov.h"
#include "xe_tile.h"
#define XEHP_MTCFG_ADDR XE_REG(0x101800)
@@ -363,13 +364,19 @@ static int xe_verify_lmem_ready(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
+ if (!IS_DGFX(xe))
+ return 0;
+
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
/*
* The boot firmware initializes local memory and assesses its health.
* If memory training fails, the punit will have been instructed to
* keep the GT powered down; we won't be able to communicate with it
* and we should not continue with driver initialization.
*/
- if (IS_DGFX(xe) && !(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) {
+ if (!(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) {
drm_err(&xe->drm, "VRAM not initialized by firmware\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index ef79552e4f2f..609d997b3e9b 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -13,6 +13,7 @@
#include "xe_gt_mcr.h"
#include "xe_mmio.h"
#include "xe_platform_types.h"
+#include "xe_sriov.h"
#include "xe_step_types.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
@@ -290,18 +291,6 @@ static const struct xe_mocs_entry dg2_mocs_desc[] = {
MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
};
-static const struct xe_mocs_entry dg2_mocs_desc_g10_ax[] = {
- /* Wa_14011441408: Set Go to Memory for MOCS#0 */
- MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
- /* UC - Coherent; GO:Memory */
- MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
- /* UC - Non-Coherent; GO:Memory */
- MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
-
- /* WB - LC */
- MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
-};
-
static const struct xe_mocs_entry pvc_mocs_desc[] = {
/* Error */
MOCS_ENTRY(0, 0, L3_3_WB),
@@ -409,15 +398,8 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
info->unused_entries_index = 1;
break;
case XE_DG2:
- if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G10 &&
- xe->info.step.graphics >= STEP_A0 &&
- xe->info.step.graphics <= STEP_B0) {
- info->size = ARRAY_SIZE(dg2_mocs_desc_g10_ax);
- info->table = dg2_mocs_desc_g10_ax;
- } else {
- info->size = ARRAY_SIZE(dg2_mocs_desc);
- info->table = dg2_mocs_desc;
- }
+ info->size = ARRAY_SIZE(dg2_mocs_desc);
+ info->table = dg2_mocs_desc;
info->uc_index = 1;
info->n_entries = XELP_NUM_MOCS_ENTRIES;
info->unused_entries_index = 3;
@@ -558,6 +540,9 @@ void xe_mocs_init(struct xe_gt *gt)
struct xe_mocs_info table;
unsigned int flags;
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
/*
* MOCS settings are split between "GLOB_MOCS" and/or "LNCFCMOCS"
* registers depending on platform.
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 1ff6bc79e7d4..e148934d554b 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -13,6 +13,7 @@
#include "xe_gt.h"
#include "xe_gt_mcr.h"
#include "xe_mmio.h"
+#include "xe_sriov.h"
#define _PAT_ATS 0x47fc
#define _PAT_INDEX(index) _PICK_EVEN_2RANGES(index, 8, \
@@ -433,6 +434,10 @@ void xe_pat_init_early(struct xe_device *xe)
drm_err(&xe->drm, "Missing PAT table for platform with graphics version %d.%02d!\n",
GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
}
+
+ /* VFs can't program nor dump PAT settings */
+ if (IS_SRIOV_VF(xe))
+ xe->pat.ops = NULL;
}
void xe_pat_init(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index dcc5ded1558e..557f2d88a8c1 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -15,9 +15,9 @@
#include <drm/drm_drv.h>
#include <drm/xe_pciids.h>
+#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
#include "xe_device.h"
-#include "xe_display.h"
#include "xe_drv.h"
#include "xe_gt.h"
#include "xe_macros.h"
@@ -165,7 +165,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
.has_asid = 1, \
.has_flat_ccs = 1, \
.has_range_tlb_invalidation = 1, \
- .has_usm = 0 /* FIXME: implementation missing */, \
+ .has_usm = 1, \
.va_bits = 48, \
.vm_max_level = 4, \
.hw_engine_mask = \
@@ -340,14 +340,14 @@ static const struct xe_device_desc lnl_desc = {
__diag_pop();
/* Map of GMD_ID values to graphics IP */
-static struct gmdid_map graphics_ip_map[] = {
+static const struct gmdid_map graphics_ip_map[] = {
{ 1270, &graphics_xelpg },
{ 1271, &graphics_xelpg },
{ 2004, &graphics_xe2 },
};
/* Map of GMD_ID values to media IP */
-static struct gmdid_map media_ip_map[] = {
+static const struct gmdid_map media_ip_map[] = {
{ 1300, &media_xelpmp },
{ 2000, &media_xe2 },
};
@@ -774,6 +774,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
str_yes_no(xe_device_has_sriov(xe)),
xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+ xe_pm_init_early(xe);
+
err = xe_device_probe(xe);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index 5935cfe30204..f153ce96f69a 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -42,6 +42,13 @@
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define PCODE_FREQUENCY_CONFIG 0x6e
+/* Frequency Config Sub Commands (param1) */
+#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
+#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
+/* Domain IDs (param2) */
+#define PCODE_MBOX_DOMAIN_HBM 0x2
+
struct pcode_err_decode {
int errno;
const char *str;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index b429c2876a76..ab283e9a8b4e 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -10,11 +10,11 @@
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_placement.h>
+#include "display/xe_display.h"
#include "xe_bo.h"
#include "xe_bo_evict.h"
#include "xe_device.h"
#include "xe_device_sysfs.h"
-#include "xe_display.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_guc.h"
@@ -125,17 +125,26 @@ int xe_pm_resume(struct xe_device *xe)
return 0;
}
-static bool xe_pm_pci_d3cold_capable(struct pci_dev *pdev)
+static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_dev *root_pdev;
root_pdev = pcie_find_root_port(pdev);
if (!root_pdev)
return false;
- /* D3Cold requires PME capability and _PR3 power resource */
- if (!pci_pme_capable(root_pdev, PCI_D3cold) || !pci_pr3_present(root_pdev))
+ /* D3Cold requires PME capability */
+ if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
+ drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
return false;
+ }
+
+ /* D3Cold requires _PR3 power resource */
+ if (!pci_pr3_present(root_pdev)) {
+ drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
+ return false;
+ }
return true;
}
@@ -163,17 +172,21 @@ static void xe_pm_runtime_init(struct xe_device *xe)
pm_runtime_put(dev);
}
-void xe_pm_init(struct xe_device *xe)
+void xe_pm_init_early(struct xe_device *xe)
{
- struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
+ drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
+}
+void xe_pm_init(struct xe_device *xe)
+{
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
return;
drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
- xe->d3cold.capable = xe_pm_pci_d3cold_capable(pdev);
+ xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
if (xe->d3cold.capable) {
xe_device_sysfs_init(xe);
@@ -214,6 +227,7 @@ struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
int xe_pm_runtime_suspend(struct xe_device *xe)
{
+ struct xe_bo *bo, *on;
struct xe_gt *gt;
u8 id;
int err = 0;
@@ -247,6 +261,16 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
*/
lock_map_acquire(&xe_device_mem_access_lockdep_map);
+ /*
+ * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
+ * also checks and delets bo entry from user fault list.
+ */
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ list_for_each_entry_safe(bo, on,
+ &xe->mem_access.vram_userfault.list, vram_userfault_link)
+ xe_bo_runtime_pm_release_mmap_offset(bo);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+
if (xe->d3cold.allowed) {
err = xe_bo_evict_all(xe);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 6b9031f7af24..64a97c6726a7 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -20,6 +20,7 @@ struct xe_device;
int xe_pm_suspend(struct xe_device *xe);
int xe_pm_resume(struct xe_device *xe);
+void xe_pm_init_early(struct xe_device *xe);
void xe_pm_init(struct xe_device *xe);
void xe_pm_runtime_fini(struct xe_device *xe);
int xe_pm_runtime_suspend(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 6653c045f3c9..7f54bc3e389d 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -877,8 +877,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries, u32 *num_entries,
- bool rebind)
+ struct xe_vm_pgtable_update *entries, u32 *num_entries)
{
int err;
@@ -1234,7 +1233,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
"Preparing bind, with range [%llx...%llx) engine %p.\n",
xe_vma_start(vma), xe_vma_end(vma), q);
- err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind);
+ err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
if (err)
goto err;
xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 7e924faeeea0..92bb06c0586e 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -198,7 +198,7 @@ static int query_engines(struct xe_device *xe,
return -EINVAL;
}
- engines = kmalloc(size, GFP_KERNEL);
+ engines = kzalloc(size, GFP_KERNEL);
if (!engines)
return -ENOMEM;
@@ -212,14 +212,10 @@ static int query_engines(struct xe_device *xe,
engines->engines[i].instance.engine_instance =
hwe->logical_instance;
engines->engines[i].instance.gt_id = gt->info.id;
- engines->engines[i].instance.pad = 0;
- memset(engines->engines[i].reserved, 0,
- sizeof(engines->engines[i].reserved));
i++;
}
- engines->pad = 0;
engines->num_engines = i;
if (copy_to_user(query_ptr, engines, size)) {
@@ -520,6 +516,49 @@ static int query_gt_topology(struct xe_device *xe,
return 0;
}
+static int
+query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query)
+{
+ struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data);
+ size_t size = sizeof(struct drm_xe_query_uc_fw_version);
+ struct drm_xe_query_uc_fw_version resp;
+ struct xe_uc_fw_version *version = NULL;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&resp, query_ptr, size))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved))
+ return -EINVAL;
+
+ switch (resp.uc_type) {
+ case XE_QUERY_UC_TYPE_GUC_SUBMISSION: {
+ struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc;
+
+ version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ resp.branch_ver = 0;
+ resp.major_ver = version->major;
+ resp.minor_ver = version->minor;
+ resp.patch_ver = version->patch;
+
+ if (copy_to_user(query_ptr, &resp, size))
+ return -EFAULT;
+
+ return 0;
+}
+
static int (* const xe_query_funcs[])(struct xe_device *xe,
struct drm_xe_device_query *query) = {
query_engines,
@@ -529,6 +568,7 @@ static int (* const xe_query_funcs[])(struct xe_device *xe,
query_hwconfig,
query_gt_topology,
query_engine_cycles,
+ query_uc_fw_version,
};
int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 87adefb56024..440ac572f6e5 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -231,7 +231,7 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe)
if (err)
goto err_force_wake;
- p = drm_debug_printer(KBUILD_MODNAME);
+ p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
xa_for_each(&sr->xa, reg, entry) {
if (slot == RING_MAX_NONPRIV_SLOTS) {
xe_gt_err(gt,
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index e66ae1bdaf9c..3fa2ece7d228 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -7,9 +7,11 @@
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
#include "xe_gt_types.h"
#include "xe_platform_types.h"
#include "xe_rtp.h"
+#include "xe_step.h"
#undef XE_REG_MCR
#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
@@ -56,6 +58,12 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
RING_FORCE_TO_NONPRIV_DENY,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ { XE_RTP_NAME("16020183090"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(WHITELIST(CSBE_DEBUG_STATUS(RENDER_RING_BASE), 0))
+ },
+
{}
};
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 1e4c06eacd98..c4edffcd4a32 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -5,7 +5,8 @@
#include "xe_ring_ops.h"
-#include "generated/xe_wa_oob.h"
+#include <generated/xe_wa_oob.h>
+
#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gpu_commands.h"
@@ -113,6 +114,19 @@ static int emit_flush_invalidate(u32 flag, u32 *dw, int i)
return i;
}
+static int
+emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset, u32 value)
+{
+ dw[i++] = GFX_OP_PIPE_CONTROL(6) | bit_group_0;
+ dw[i++] = bit_group_1;
+ dw[i++] = offset;
+ dw[i++] = 0;
+ dw[i++] = value;
+ dw[i++] = 0;
+
+ return i;
+}
+
static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
int i)
{
@@ -131,14 +145,7 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
flags &= ~mask_flags;
- dw[i++] = GFX_OP_PIPE_CONTROL(6);
- dw[i++] = flags;
- dw[i++] = LRC_PPHWSP_SCRATCH_ADDR;
- dw[i++] = 0;
- dw[i++] = 0;
- dw[i++] = 0;
-
- return i;
+ return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_SCRATCH_ADDR, 0);
}
static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
@@ -174,14 +181,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
- dw[i++] = GFX_OP_PIPE_CONTROL(6) | PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
- dw[i++] = flags;
- dw[i++] = 0;
- dw[i++] = 0;
- dw[i++] = 0;
- dw[i++] = 0;
-
- return i;
+ return emit_pipe_control(dw, i, PIPE_CONTROL0_HDC_PIPELINE_FLUSH, flags, 0, 0);
}
static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int i)
@@ -189,14 +189,9 @@ static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int
if (hwe->class != XE_ENGINE_CLASS_RENDER)
return i;
- if (XE_WA(hwe->gt, 16020292621)) {
- dw[i++] = GFX_OP_PIPE_CONTROL(6);
- dw[i++] = PIPE_CONTROL_LRI_POST_SYNC;
- dw[i++] = RING_NOPID(hwe->mmio_base).addr;
- dw[i++] = 0;
- dw[i++] = 0;
- dw[i++] = 0;
- }
+ if (XE_WA(hwe->gt, 16020292621))
+ i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_LRI_POST_SYNC,
+ RING_NOPID(hwe->mmio_base).addr, 0);
return i;
}
@@ -204,16 +199,13 @@ static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int
static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
int i)
{
- dw[i++] = GFX_OP_PIPE_CONTROL(6);
- dw[i++] = (stall_only ? PIPE_CONTROL_CS_STALL :
- PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL) |
- PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE;
- dw[i++] = addr;
- dw[i++] = 0;
- dw[i++] = value;
- dw[i++] = 0; /* We're thrashing one extra dword. */
+ u32 flags = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE;
- return i;
+ if (!stall_only)
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+
+ return emit_pipe_control(dw, i, 0, flags, addr, value);
}
static u32 get_ppgtt_flag(struct xe_sched_job *job)
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 4e2ccad0e52f..8151ddafb940 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -277,3 +277,41 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
return drm_sched_job_add_dependency(&job->drm, fence);
}
+
+struct xe_sched_job_snapshot *
+xe_sched_job_snapshot_capture(struct xe_sched_job *job)
+{
+ struct xe_exec_queue *q = job->q;
+ struct xe_device *xe = q->gt->tile->xe;
+ struct xe_sched_job_snapshot *snapshot;
+ size_t len = sizeof(*snapshot) + (sizeof(u64) * q->width);
+ u16 i;
+
+ snapshot = kzalloc(len, GFP_ATOMIC);
+ if (!snapshot)
+ return NULL;
+
+ snapshot->batch_addr_len = q->width;
+ for (i = 0; i < q->width; i++)
+ snapshot->batch_addr[i] = xe_device_uncanonicalize_addr(xe, job->batch_addr[i]);
+
+ return snapshot;
+}
+
+void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot)
+{
+ kfree(snapshot);
+}
+
+void
+xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ u16 i;
+
+ if (!snapshot)
+ return;
+
+ for (i = 0; i < snapshot->batch_addr_len; i++)
+ drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]);
+}
diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h
index 34f475ba7f50..f1a660648cf0 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.h
+++ b/drivers/gpu/drm/xe/xe_sched_job.h
@@ -8,6 +8,7 @@
#include "xe_sched_job_types.h"
+struct drm_printer;
struct xe_vm;
#define XE_SCHED_HANG_LIMIT 1
@@ -77,4 +78,8 @@ xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
bool xe_sched_job_is_migration(struct xe_exec_queue *q);
+struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job *job);
+void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot);
+void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index 71213ba9735b..b1d83da50a53 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -30,11 +30,11 @@ struct xe_sched_job {
struct dma_fence *fence;
/** @user_fence: write back value when BB is complete */
struct {
- /** @used: user fence is used */
+ /** @user_fence.used: user fence is used */
bool used;
- /** @addr: address to write to */
+ /** @user_fence.addr: address to write to */
u64 addr;
- /** @value: write back value */
+ /** @user_fence.value: write back value */
u64 value;
} user_fence;
/** @migrate_flush_flags: Additional flush flags for migration jobs */
@@ -43,4 +43,9 @@ struct xe_sched_job {
u64 batch_addr[];
};
+struct xe_sched_job_snapshot {
+ u16 batch_addr_len;
+ u64 batch_addr[];
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index 42a0e0c917a0..f295d91886b1 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_managed.h>
+
#include "xe_assert.h"
#include "xe_sriov.h"
@@ -53,3 +55,33 @@ void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov)
drm_info(&xe->drm, "Running in %s mode\n",
xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
}
+
+static void fini_sriov(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = arg;
+
+ destroy_workqueue(xe->sriov.wq);
+ xe->sriov.wq = NULL;
+}
+
+/**
+ * xe_sriov_init - Initialize SR-IOV specific data.
+ * @xe: the &xe_device to initialize
+ *
+ * In this function we create dedicated workqueue that will be used
+ * by the SR-IOV specific workers.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_init(struct xe_device *xe)
+{
+ if (!IS_SRIOV(xe))
+ return 0;
+
+ xe_assert(xe, !xe->sriov.wq);
+ xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
+ if (!xe->sriov.wq)
+ return -ENOMEM;
+
+ return drmm_add_action_or_reset(&xe->drm, fini_sriov, xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h
index 5af73a3172b0..1545552162c9 100644
--- a/drivers/gpu/drm/xe/xe_sriov.h
+++ b/drivers/gpu/drm/xe/xe_sriov.h
@@ -13,6 +13,7 @@
const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode);
void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov);
+int xe_sriov_init(struct xe_device *xe);
static inline enum xe_sriov_mode xe_device_sriov_mode(struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index 999a4311b98b..1a138108d139 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -9,6 +9,18 @@
#include <linux/build_bug.h>
/**
+ * VFID - Virtual Function Identifier
+ * @n: VF number
+ *
+ * Helper macro to represent Virtual Function (VF) Identifier.
+ * VFID(0) is used as alias to the PFID that represents Physical Function.
+ *
+ * Note: According to PCI spec, SR-IOV VF's numbers are 1-based (VF1, VF2, ...).
+ */
+#define VFID(n) (n)
+#define PFID VFID(0)
+
+/**
* enum xe_sriov_mode - SR-IOV mode
* @XE_SRIOV_MODE_NONE: bare-metal mode (non-virtualized)
* @XE_SRIOV_MODE_PF: SR-IOV Physical Function (PF) mode
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index 0f8d3e7fce46..0662968d7bcb 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -9,6 +9,7 @@
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
+#include "xe_vram_freq.h"
static void xe_tile_sysfs_kobj_release(struct kobject *kobj)
{
@@ -50,6 +51,8 @@ void xe_tile_sysfs_init(struct xe_tile *tile)
tile->sysfs = &kt->base;
+ xe_vram_freq_sysfs_init(tile);
+
err = drmm_add_action_or_reset(&xe->drm, tile_sysfs_fini, tile);
if (err)
drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 95163c303f3e..3b97633d81d8 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -12,6 +12,7 @@
#include <linux/tracepoint.h>
#include <linux/types.h>
+#include "xe_bo.h"
#include "xe_bo_types.h"
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
@@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(u64, fence)
+ __field(struct xe_gt_tlb_invalidation_fence *, fence)
__field(int, seqno)
),
TP_fast_assign(
- __entry->fence = (u64)fence;
+ __entry->fence = fence;
__entry->seqno = fence->seqno;
),
- TP_printk("fence=0x%016llx, seqno=%d",
+ TP_printk("fence=%p, seqno=%d",
__entry->fence, __entry->seqno)
);
@@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_STRUCT__entry(
__field(size_t, size)
__field(u32, flags)
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
),
TP_fast_assign(
__entry->size = bo->size;
__entry->flags = bo->flags;
- __entry->vm = (unsigned long)bo->vm;
+ __entry->vm = bo->vm;
),
- TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
+ TP_printk("size=%zu, flags=0x%02x, vm=%p",
__entry->size, __entry->flags, __entry->vm)
);
@@ -100,9 +101,27 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
-DEFINE_EVENT(xe_bo, xe_bo_move,
- TP_PROTO(struct xe_bo *bo),
- TP_ARGS(bo)
+TRACE_EVENT(xe_bo_move,
+ TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement),
+ TP_ARGS(bo, new_placement, old_placement),
+ TP_STRUCT__entry(
+ __field(struct xe_bo *, bo)
+ __field(size_t, size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ __array(char, device_id, 12)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->size = bo->size;
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
+ ),
+ TP_printk("migrate object %p [size %zu] from %s to %s device_id:%s",
+ __entry->bo, __entry->size, xe_mem_type_to_name[__entry->old_placement],
+ xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
);
DECLARE_EVENT_CLASS(xe_exec_queue,
@@ -327,16 +346,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
TP_STRUCT__entry(
__field(u64, ctx)
__field(u32, seqno)
- __field(u64, fence)
+ __field(struct xe_hw_fence *, fence)
),
TP_fast_assign(
__entry->ctx = fence->dma.context;
__entry->seqno = fence->dma.seqno;
- __entry->fence = (unsigned long)fence;
+ __entry->fence = fence;
),
- TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
+ TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
__entry->ctx, __entry->fence, __entry->seqno)
);
@@ -365,7 +384,7 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_ARGS(vma),
TP_STRUCT__entry(
- __field(u64, vma)
+ __field(struct xe_vma *, vma)
__field(u32, asid)
__field(u64, start)
__field(u64, end)
@@ -373,14 +392,14 @@ DECLARE_EVENT_CLASS(xe_vma,
),
TP_fast_assign(
- __entry->vma = (unsigned long)vma;
+ __entry->vma = vma;
__entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = xe_vma_start(vma);
__entry->end = xe_vma_end(vma) - 1;
__entry->ptr = xe_vma_userptr(vma);
),
- TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
+ TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
__entry->vma, __entry->asid, __entry->start,
__entry->end, __entry->ptr)
)
@@ -465,16 +484,16 @@ DECLARE_EVENT_CLASS(xe_vm,
TP_ARGS(vm),
TP_STRUCT__entry(
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
__field(u32, asid)
),
TP_fast_assign(
- __entry->vm = (unsigned long)vm;
+ __entry->vm = vm;
__entry->asid = vm->usm.asid;
),
- TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm,
+ TP_printk("vm=%p, asid=0x%05x", __entry->vm,
__entry->asid)
);
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index e5d7d5e2bec1..3107d2a12426 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -11,7 +11,8 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
-#include "generated/xe_wa_oob.h"
+#include <generated/xe_wa_oob.h>
+
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
@@ -19,6 +20,7 @@
#include "xe_gt.h"
#include "xe_mmio.h"
#include "xe_res_cursor.h"
+#include "xe_sriov.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
@@ -205,7 +207,9 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
u64 stolen_size, io_size, pgsize;
int err;
- if (IS_DGFX(xe))
+ if (IS_SRIOV_VF(xe))
+ stolen_size = 0;
+ else if (IS_DGFX(xe))
stolen_size = detect_bar2_dgfx(xe, mgr);
else if (GRAPHICS_VERx100(xe) >= 1270)
stolen_size = detect_bar2_integrated(xe, mgr);
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 53ccd338fd8c..5c83c75bc497 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -37,7 +37,14 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_ACTIONS(FIELD_SET(XE2LPM_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
-
+ { XE_RTP_NAME("Tuning: Compression Overfetch"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX)),
+ },
+ { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
+ },
{}
};
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 25e1ddfd2f86..7033f8c1b431 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -7,8 +7,10 @@
#include "xe_device.h"
#include "xe_gsc.h"
+#include "xe_gsc_proxy.h"
#include "xe_gt.h"
#include "xe_guc.h"
+#include "xe_guc_db_mgr.h"
#include "xe_guc_pc.h"
#include "xe_guc_submit.h"
#include "xe_huc.h"
@@ -30,13 +32,15 @@ uc_to_xe(struct xe_uc *uc)
/* Should be called once at driver load only */
int xe_uc_init(struct xe_uc *uc)
{
+ struct xe_device *xe = uc_to_xe(uc);
int ret;
+ xe_device_mem_access_get(xe);
+
/*
* We call the GuC/HuC/GSC init functions even if GuC submission is off
* to correctly move our tracking of the FW state to "disabled".
*/
-
ret = xe_guc_init(&uc->guc);
if (ret)
goto err;
@@ -50,7 +54,7 @@ int xe_uc_init(struct xe_uc *uc)
goto err;
if (!xe_device_uc_enabled(uc_to_xe(uc)))
- return 0;
+ goto err;
ret = xe_wopcm_init(&uc->wopcm);
if (ret)
@@ -60,9 +64,17 @@ int xe_uc_init(struct xe_uc *uc)
if (ret)
goto err;
+ ret = xe_guc_db_mgr_init(&uc->guc.dbm, ~0);
+ if (ret)
+ goto err;
+
+ xe_device_mem_access_put(xe);
+
return 0;
err:
+ xe_device_mem_access_put(xe);
+
return ret;
}
@@ -88,6 +100,10 @@ int xe_uc_init_post_hwconfig(struct xe_uc *uc)
if (err)
return err;
+ err = xe_huc_init_post_hwconfig(&uc->huc);
+ if (err)
+ return err;
+
return xe_gsc_init_post_hwconfig(&uc->gsc);
}
@@ -256,3 +272,16 @@ int xe_uc_suspend(struct xe_uc *uc)
return xe_guc_suspend(&uc->guc);
}
+
+/**
+ * xe_uc_remove() - Clean up the UC structures before driver removal
+ * @uc: the UC object
+ *
+ * This function should only act on objects/structures that must be cleaned
+ * before the driver removal callback is complete and therefore can't be
+ * deferred to a drmm action.
+ */
+void xe_uc_remove(struct xe_uc *uc)
+{
+ xe_gsc_remove(&uc->gsc);
+}
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index 5d5110c0c834..e4d4e3c99f0e 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -20,5 +20,6 @@ int xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
+void xe_uc_remove(struct xe_uc *uc);
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 9dff96dfe455..a9d25b3fa67c 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -92,6 +92,7 @@ struct uc_fw_entry {
const char *path;
u16 major;
u16 minor;
+ u16 patch;
bool full_ver_required;
};
};
@@ -102,14 +103,15 @@ struct fw_blobs_by_type {
};
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 7)) \
- fw_def(DG2, major_ver(i915, guc, dg2, 70, 5)) \
- fw_def(DG1, major_ver(i915, guc, dg1, 70, 5)) \
- fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 5)) \
- fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 5)) \
- fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 5)) \
- fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 5)) \
- fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 5))
+ fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 19, 2)) \
+ fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 19, 2)) \
+ fw_def(DG2, major_ver(i915, guc, dg2, 70, 19, 2)) \
+ fw_def(DG1, major_ver(i915, guc, dg1, 70, 19, 2)) \
+ fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 19, 2)) \
+ fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 19, 2)) \
+ fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 19, 2)) \
+ fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) \
+ fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
@@ -121,24 +123,24 @@ struct fw_blobs_by_type {
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0))
+ fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
#define fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c) \
MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a ## . ## b ## . ## c))
-#define fw_filename_major_ver(dir_, uc_, shortname_, a, b) \
+#define fw_filename_major_ver(dir_, uc_, shortname_, a, b, c) \
MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a))
#define fw_filename_no_ver(dir_, uc_, shortname_) \
MAKE_FW_PATH(dir_, uc_, shortname_, "")
#define uc_fw_entry_mmp_ver(dir_, uc_, shortname_, a, b, c) \
{ fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c), \
- a, b, true }
-#define uc_fw_entry_major_ver(dir_, uc_, shortname_, a, b) \
- { fw_filename_major_ver(dir_, uc_, shortname_, a, b), \
- a, b }
+ a, b, c, true }
+#define uc_fw_entry_major_ver(dir_, uc_, shortname_, a, b, c) \
+ { fw_filename_major_ver(dir_, uc_, shortname_, a, b, c), \
+ a, b, c }
#define uc_fw_entry_no_ver(dir_, uc_, shortname_) \
{ fw_filename_no_ver(dir_, uc_, shortname_), \
0, 0 }
@@ -221,6 +223,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
uc_fw->path = entries[i].path;
uc_fw->versions.wanted.major = entries[i].major;
uc_fw->versions.wanted.minor = entries[i].minor;
+ uc_fw->versions.wanted.patch = entries[i].patch;
uc_fw->full_ver_required = entries[i].full_ver_required;
if (uc_fw->type == XE_UC_FW_TYPE_GSC)
@@ -340,19 +343,22 @@ int xe_uc_fw_check_version_requirements(struct xe_uc_fw *uc_fw)
* Otherwise, at least the major version.
*/
if (wanted->major != found->major ||
- (uc_fw->full_ver_required && wanted->minor != found->minor)) {
- drm_notice(&xe->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ (uc_fw->full_ver_required &&
+ ((wanted->minor != found->minor) ||
+ (wanted->patch != found->patch)))) {
+ drm_notice(&xe->drm, "%s firmware %s: unexpected version: %u.%u.%u != %u.%u.%u\n",
xe_uc_fw_type_repr(uc_fw->type), uc_fw->path,
- found->major, found->minor,
- wanted->major, wanted->minor);
+ found->major, found->minor, found->patch,
+ wanted->major, wanted->minor, wanted->patch);
goto fail;
}
- if (wanted->minor > found->minor) {
- drm_notice(&xe->drm, "%s firmware (%u.%u) is recommended, but only (%u.%u) was found in %s\n",
+ if (wanted->minor > found->minor ||
+ (wanted->minor == found->minor && wanted->patch > found->patch)) {
+ drm_notice(&xe->drm, "%s firmware (%u.%u.%u) is recommended, but only (%u.%u.%u) was found in %s\n",
xe_uc_fw_type_repr(uc_fw->type),
- wanted->major, wanted->minor,
- found->major, found->minor,
+ wanted->major, wanted->minor, wanted->patch,
+ found->major, found->minor, found->patch,
uc_fw->path);
drm_info(&xe->drm, "Consider updating your linux-firmware pkg or downloading from %s\n",
XE_UC_FIRMWARE_URL);
@@ -652,14 +658,18 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
xe_assert(xe, !uc_fw->path);
uc_fw_auto_select(xe, uc_fw);
+ uc_fw_override(uc_fw);
xe_uc_fw_change_status(uc_fw, uc_fw->path ?
XE_UC_FIRMWARE_SELECTED :
XE_UC_FIRMWARE_NOT_SUPPORTED);
- if (!xe_uc_fw_is_supported(uc_fw))
+ if (!xe_uc_fw_is_supported(uc_fw)) {
+ if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
+ drm_err(&xe->drm, "No GuC firmware defined for platform\n");
+ return -ENOENT;
+ }
return 0;
-
- uc_fw_override(uc_fw);
+ }
/* an empty path means the firmware is disabled */
if (!xe_device_uc_enabled(xe) || !(*uc_fw->path)) {
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index ee914a5d8523..bc800b696866 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -124,11 +124,14 @@ struct xe_uc_fw {
/** @versions: FW versions wanted and found */
struct {
- /** @wanted: firmware version wanted by platform */
+ /** @versions.wanted: firmware version wanted by platform */
struct xe_uc_fw_version wanted;
- /** @wanted_type: type of firmware version wanted (release vs compatibility) */
+ /**
+ * @versions.wanted_type: type of firmware version wanted
+ * (release vs compatibility)
+ */
enum xe_uc_fw_version_types wanted_type;
- /** @found: fw versions found in firmware blob */
+ /** @versions.found: fw versions found in firmware blob */
struct xe_uc_fw_version found[XE_UC_FW_VER_TYPE_COUNT];
} versions;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 921ca28d49dd..e3bde897f6e8 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -13,11 +13,14 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
+#include <linux/ascii85.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/swap.h>
+#include <generated/xe_wa_oob.h>
+
#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
@@ -34,7 +37,6 @@
#include "xe_res_cursor.h"
#include "xe_sync.h"
#include "xe_trace.h"
-#include "generated/xe_wa_oob.h"
#include "xe_wa.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
@@ -792,6 +794,7 @@ static void xe_vma_free(struct xe_vma *vma)
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
+#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
@@ -804,6 +807,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
u8 id;
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
+ bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -838,6 +842,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.va.range = end - start + 1;
if (read_only)
vma->gpuva.flags |= XE_VMA_READ_ONLY;
+ if (dumpable)
+ vma->gpuva.flags |= XE_VMA_DUMPABLE;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -1051,7 +1057,9 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
xe_assert(vm->xe, xe_vma_vm(vma) == vm);
lockdep_assert_held(&vm->lock);
+ mutex_lock(&vm->snap_mutex);
err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
+ mutex_unlock(&vm->snap_mutex);
XE_WARN_ON(err); /* Shouldn't be possible */
return err;
@@ -1062,7 +1070,9 @@ static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
xe_assert(vm->xe, xe_vma_vm(vma) == vm);
lockdep_assert_held(&vm->lock);
+ mutex_lock(&vm->snap_mutex);
drm_gpuva_remove(&vma->gpuva);
+ mutex_unlock(&vm->snap_mutex);
if (vm->usm.last_fault_vma == vma)
vm->usm.last_fault_vma = NULL;
}
@@ -1081,7 +1091,7 @@ static struct drm_gpuva_op *xe_vm_op_alloc(void)
static void xe_vm_free(struct drm_gpuvm *gpuvm);
-static struct drm_gpuvm_ops gpuvm_ops = {
+static const struct drm_gpuvm_ops gpuvm_ops = {
.op_alloc = xe_vm_op_alloc,
.vm_bo_validate = xe_gpuvm_validate,
.vm_free = xe_vm_free,
@@ -1289,6 +1299,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->flags = flags;
init_rwsem(&vm->lock);
+ mutex_init(&vm->snap_mutex);
INIT_LIST_HEAD(&vm->rebind_list);
@@ -1414,6 +1425,7 @@ err_close:
return ERR_PTR(err);
err_no_resv:
+ mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
kfree(vm);
@@ -1538,6 +1550,8 @@ static void vm_destroy_work_func(struct work_struct *w)
/* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size);
+ mutex_destroy(&vm->snap_mutex);
+
if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
xe_device_mem_access_put(xe);
@@ -2122,6 +2136,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
op->prefetch.region = prefetch_region;
@@ -2287,6 +2302,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
struct list_head *ops_list, bool last)
{
+ struct xe_device *xe = vm->xe;
struct xe_vma_op *last_op = NULL;
struct drm_gpuva_op *__op;
int err = 0;
@@ -2317,6 +2333,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->map.dumpable ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, &op->base.map, op->map.pat_index,
flags);
@@ -2341,6 +2359,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, op->base.remap.prev,
old->pat_index, flags);
@@ -2362,6 +2383,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
xe_vma_end(vma) -
xe_vma_start(old);
op->remap.start = xe_vma_end(vma);
+ vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.start,
+ (ULL)op->remap.range);
}
}
@@ -2372,6 +2396,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, op->base.remap.next,
old->pat_index, flags);
@@ -2392,6 +2419,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
op->remap.range -=
xe_vma_end(old) -
xe_vma_start(vma);
+ vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.start,
+ (ULL)op->remap.range);
}
}
break;
@@ -2722,7 +2752,8 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
#define SUPPORTED_FLAGS \
(DRM_XE_VM_BIND_FLAG_READONLY | \
- DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL)
+ DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
+ DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
@@ -3247,3 +3278,168 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
return 0;
}
+
+struct xe_vm_snapshot {
+ unsigned long num_snaps;
+ struct {
+ u64 ofs, bo_ofs;
+ unsigned long len;
+ struct xe_bo *bo;
+ void *data;
+ struct mm_struct *mm;
+ } snap[];
+};
+
+struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
+{
+ unsigned long num_snaps = 0, i;
+ struct xe_vm_snapshot *snap = NULL;
+ struct drm_gpuva *gpuva;
+
+ if (!vm)
+ return NULL;
+
+ mutex_lock(&vm->snap_mutex);
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ if (gpuva->flags & XE_VMA_DUMPABLE)
+ num_snaps++;
+ }
+
+ if (num_snaps)
+ snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
+ if (!snap)
+ goto out_unlock;
+
+ snap->num_snaps = num_snaps;
+ i = 0;
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+ struct xe_bo *bo = vma->gpuva.gem.obj ?
+ gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
+
+ if (!(gpuva->flags & XE_VMA_DUMPABLE))
+ continue;
+
+ snap->snap[i].ofs = xe_vma_start(vma);
+ snap->snap[i].len = xe_vma_size(vma);
+ if (bo) {
+ snap->snap[i].bo = xe_bo_get(bo);
+ snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
+ } else if (xe_vma_is_userptr(vma)) {
+ struct mm_struct *mm =
+ to_userptr_vma(vma)->userptr.notifier.mm;
+
+ if (mmget_not_zero(mm))
+ snap->snap[i].mm = mm;
+ else
+ snap->snap[i].data = ERR_PTR(-EFAULT);
+
+ snap->snap[i].bo_ofs = xe_vma_userptr(vma);
+ } else {
+ snap->snap[i].data = ERR_PTR(-ENOENT);
+ }
+ i++;
+ }
+
+out_unlock:
+ mutex_unlock(&vm->snap_mutex);
+ return snap;
+}
+
+void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
+{
+ for (int i = 0; i < snap->num_snaps; i++) {
+ struct xe_bo *bo = snap->snap[i].bo;
+ struct iosys_map src;
+ int err;
+
+ if (IS_ERR(snap->snap[i].data))
+ continue;
+
+ snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
+ if (!snap->snap[i].data) {
+ snap->snap[i].data = ERR_PTR(-ENOMEM);
+ goto cleanup_bo;
+ }
+
+ if (bo) {
+ dma_resv_lock(bo->ttm.base.resv, NULL);
+ err = ttm_bo_vmap(&bo->ttm, &src);
+ if (!err) {
+ xe_map_memcpy_from(xe_bo_device(bo),
+ snap->snap[i].data,
+ &src, snap->snap[i].bo_ofs,
+ snap->snap[i].len);
+ ttm_bo_vunmap(&bo->ttm, &src);
+ }
+ dma_resv_unlock(bo->ttm.base.resv);
+ } else {
+ void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
+
+ kthread_use_mm(snap->snap[i].mm);
+ if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
+ err = 0;
+ else
+ err = -EFAULT;
+ kthread_unuse_mm(snap->snap[i].mm);
+
+ mmput(snap->snap[i].mm);
+ snap->snap[i].mm = NULL;
+ }
+
+ if (err) {
+ kvfree(snap->snap[i].data);
+ snap->snap[i].data = ERR_PTR(err);
+ }
+
+cleanup_bo:
+ xe_bo_put(bo);
+ snap->snap[i].bo = NULL;
+ }
+}
+
+void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < snap->num_snaps; i++) {
+ if (IS_ERR(snap->snap[i].data))
+ goto uncaptured;
+
+ drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
+ drm_printf(p, "[%llx].data: ",
+ snap->snap[i].ofs);
+
+ for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
+ u32 *val = snap->snap[i].data + j;
+ char dumped[ASCII85_BUFSZ];
+
+ drm_puts(p, ascii85_encode(*val, dumped));
+ }
+
+ drm_puts(p, "\n");
+ continue;
+
+uncaptured:
+ drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n",
+ snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1,
+ PTR_ERR(snap->snap[i].data));
+ }
+}
+
+void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
+{
+ unsigned long i;
+
+ if (!snap)
+ return;
+
+ for (i = 0; i < snap->num_snaps; i++) {
+ if (!IS_ERR(snap->snap[i].data))
+ kvfree(snap->snap[i].data);
+ xe_bo_put(snap->snap[i].bo);
+ if (snap->snap[i].mm)
+ mmput(snap->snap[i].mm);
+ }
+ kvfree(snap);
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 9654a0612fc2..6df1f1c7f85d 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -211,8 +211,6 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
int xe_vm_invalidate_vma(struct xe_vma *vma);
-extern struct ttm_device_funcs xe_ttm_funcs;
-
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
@@ -273,3 +271,8 @@ static inline void vm_dbg(const struct drm_device *dev,
{ /* noop */ }
#endif
#endif
+
+struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
+void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
+void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
+void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index a603cc2eb56b..7d4f810f9c04 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -31,6 +31,7 @@ struct xe_vm;
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
+#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -159,6 +160,11 @@ struct xe_vm {
* VM
*/
struct rw_semaphore lock;
+ /**
+ * @snap_mutex: Mutex used to guard insertions and removals from gpuva,
+ * so we can take a snapshot safely from devcoredump.
+ */
+ struct mutex snap_mutex;
/**
* @rebind_list: list of VMAs that need rebinding. Protected by the
@@ -294,6 +300,8 @@ struct xe_vma_op_map {
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
+ /** @dumpable: whether BO is dumped on GPU hang */
+ bool dumpable;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
};
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
new file mode 100644
index 000000000000..079cc283a186
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#include <linux/sysfs.h>
+#include <drm/drm_managed.h>
+
+#include "xe_gt_types.h"
+#include "xe_pcode.h"
+#include "xe_pcode_api.h"
+#include "xe_tile.h"
+#include "xe_tile_sysfs.h"
+#include "xe_vram_freq.h"
+
+/**
+ * DOC: Xe VRAM freq
+ *
+ * Provides sysfs entries for vram frequency in tile
+ *
+ * device/tile#/memory/freq0/max_freq - This is maximum frequency. This value is read-only as it
+ * is the fixed fuse point P0. It is not the system
+ * configuration.
+ * device/tile#/memory/freq0/min_freq - This is minimum frequency. This value is read-only as it
+ * is the fixed fuse point PN. It is not the system
+ * configuration.
+ */
+
+static struct xe_tile *dev_to_tile(struct device *dev)
+{
+ return kobj_to_tile(dev->kobj.parent);
+}
+
+static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct xe_tile *tile = dev_to_tile(dev);
+ struct xe_gt *gt = tile->primary_gt;
+ u32 val, mbox;
+ int err;
+
+ mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
+ | REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_P0)
+ | REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
+
+ err = xe_pcode_read(gt, mbox, &val, NULL);
+ if (err)
+ return err;
+
+ /* data_out - Fused P0 for domain ID in units of 50 MHz */
+ val *= 50;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+static DEVICE_ATTR_RO(max_freq);
+
+static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct xe_tile *tile = dev_to_tile(dev);
+ struct xe_gt *gt = tile->primary_gt;
+ u32 val, mbox;
+ int err;
+
+ mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
+ | REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_PN)
+ | REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
+
+ err = xe_pcode_read(gt, mbox, &val, NULL);
+ if (err)
+ return err;
+
+ /* data_out - Fused Pn for domain ID in units of 50 MHz */
+ val *= 50;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+static DEVICE_ATTR_RO(min_freq);
+
+static struct attribute *freq_attrs[] = {
+ &dev_attr_max_freq.attr,
+ &dev_attr_min_freq.attr,
+ NULL
+};
+
+static const struct attribute_group freq_group_attrs = {
+ .name = "freq0",
+ .attrs = freq_attrs,
+};
+
+static void vram_freq_sysfs_fini(struct drm_device *drm, void *arg)
+{
+ struct kobject *kobj = arg;
+
+ sysfs_remove_group(kobj, &freq_group_attrs);
+ kobject_put(kobj);
+}
+
+/**
+ * xe_vram_freq_sysfs_init - Initialize vram frequency sysfs component
+ * @tile: Xe Tile object
+ *
+ * It needs to be initialized after the main tile component is ready
+ */
+void xe_vram_freq_sysfs_init(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct kobject *kobj;
+ int err;
+
+ if (xe->info.platform != XE_PVC)
+ return;
+
+ kobj = kobject_create_and_add("memory", tile->sysfs);
+ if (!kobj)
+ drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
+
+ err = sysfs_create_group(kobj, &freq_group_attrs);
+ if (err) {
+ kobject_put(kobj);
+ drm_warn(&xe->drm, "failed to register vram freq sysfs, err: %d\n", err);
+ return;
+ }
+
+ err = drmm_add_action_or_reset(&xe->drm, vram_freq_sysfs_fini, kobj);
+ if (err)
+ drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+}
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.h b/drivers/gpu/drm/xe/xe_vram_freq.h
new file mode 100644
index 000000000000..cbe8c12fbd64
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vram_freq.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_VRAM_FREQ_H_
+#define _XE_VRAM_FREQ_H_
+
+struct xe_tile;
+
+void xe_vram_freq_sysfs_init(struct xe_tile *tile);
+
+#endif /* _XE_VRAM_FREQ_H_ */
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 5f61dd87c586..a0264eedd443 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -9,7 +9,8 @@
#include <kunit/visibility.h>
#include <linux/compiler_types.h>
-#include "generated/xe_wa_oob.h"
+#include <generated/xe_wa_oob.h>
+
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
@@ -125,13 +126,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
/* DG2 */
- { XE_RTP_NAME("16010515920"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10),
- GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(VIDEO_DECODE)),
- XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F18(0), ALNUNIT_CLKGATE_DIS)),
- XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
- },
{ XE_RTP_NAME("22010523718"),
XE_RTP_RULES(SUBPLATFORM(DG2, G10)),
XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, CG3DDISCFEG_CLKGATE_DIS))
@@ -140,61 +134,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_RULES(SUBPLATFORM(DG2, G10)),
XE_RTP_ACTIONS(SET(SUBSLICE_UNIT_LEVEL_CLKGATE, DSS_ROUTER_CLKGATE_DIS))
},
- { XE_RTP_NAME("14012362059"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB))
- },
- { XE_RTP_NAME("14012362059"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB))
- },
- { XE_RTP_NAME("14010948348"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14011037102"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(UNSLCGCTL9444, LTCDD_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14011371254"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14011431319"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(UNSLCGCTL9440,
- GAMTLBOACS_CLKGATE_DIS |
- GAMTLBVDBOX7_CLKGATE_DIS | GAMTLBVDBOX6_CLKGATE_DIS |
- GAMTLBVDBOX5_CLKGATE_DIS | GAMTLBVDBOX4_CLKGATE_DIS |
- GAMTLBVDBOX3_CLKGATE_DIS | GAMTLBVDBOX2_CLKGATE_DIS |
- GAMTLBVDBOX1_CLKGATE_DIS | GAMTLBVDBOX0_CLKGATE_DIS |
- GAMTLBKCR_CLKGATE_DIS | GAMTLBGUC_CLKGATE_DIS |
- GAMTLBBLT_CLKGATE_DIS),
- SET(UNSLCGCTL9444,
- GAMTLBGFXA0_CLKGATE_DIS | GAMTLBGFXA1_CLKGATE_DIS |
- GAMTLBCOMPA0_CLKGATE_DIS | GAMTLBCOMPA1_CLKGATE_DIS |
- GAMTLBCOMPB0_CLKGATE_DIS | GAMTLBCOMPB1_CLKGATE_DIS |
- GAMTLBCOMPC0_CLKGATE_DIS | GAMTLBCOMPC1_CLKGATE_DIS |
- GAMTLBCOMPD0_CLKGATE_DIS | GAMTLBCOMPD1_CLKGATE_DIS |
- GAMTLBMERT_CLKGATE_DIS |
- GAMTLBVEBOX3_CLKGATE_DIS | GAMTLBVEBOX2_CLKGATE_DIS |
- GAMTLBVEBOX1_CLKGATE_DIS | GAMTLBVEBOX0_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14010569222"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, GAMEDIA_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14011028019"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(SSMCGCTL9530, RTFUNIT_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14010680813"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_GAMSTLB_CTRL,
- CONTROL_BLOCK_CLKGATE_DIS |
- EGRESS_BLOCK_CLKGATE_DIS |
- TAG_BLOCK_CLKGATE_DIS))
- },
{ XE_RTP_NAME("14014830051"),
XE_RTP_RULES(PLATFORM(DG2)),
XE_RTP_ACTIONS(CLR(SARB_CHICKEN1, COMP_CKN_IN))
@@ -212,10 +151,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
INVALIDATION_BROADCAST_MODE_DIS |
GLOBAL_INVALIDATION_MODE))
},
- { XE_RTP_NAME("14010648519"),
- XE_RTP_RULES(PLATFORM(DG2)),
- XE_RTP_ACTIONS(SET(XEHP_L3NODEARBCFG, XEHP_LNESPARE))
- },
/* PVC */
@@ -377,13 +312,6 @@ static const struct xe_rtp_entry_sr engine_was[] = {
POLYGON_TRIFAN_LINELOOP_DISABLE))
},
{ XE_RTP_NAME("22012826095, 22013059131"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(FIELD_SET(LSC_CHICKEN_BIT_0_UDW,
- MAXREQS_PER_BANK,
- REG_FIELD_PREP(MAXREQS_PER_BANK, 2)))
- },
- { XE_RTP_NAME("22012826095, 22013059131"),
XE_RTP_RULES(SUBPLATFORM(DG2, G11),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(FIELD_SET(LSC_CHICKEN_BIT_0_UDW,
@@ -391,27 +319,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
REG_FIELD_PREP(MAXREQS_PER_BANK, 2)))
},
{ XE_RTP_NAME("22013059131"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, FORCE_1_SUB_MESSAGE_PER_FRAGMENT))
- },
- { XE_RTP_NAME("22013059131"),
XE_RTP_RULES(SUBPLATFORM(DG2, G11),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, FORCE_1_SUB_MESSAGE_PER_FRAGMENT))
},
- { XE_RTP_NAME("14010918519"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW,
- FORCE_SLM_FENCE_SCOPE_TO_TILE |
- FORCE_UGM_FENCE_SCOPE_TO_TILE,
- /*
- * Ignore read back as it always returns 0 in these
- * steps
- */
- .read_mask = 0))
- },
{ XE_RTP_NAME("14015227452"),
XE_RTP_RULES(PLATFORM(DG2),
FUNC(xe_rtp_match_first_render_or_compute)),
@@ -428,22 +339,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3))
},
- { XE_RTP_NAME("16011620976, 22015475538"),
+ { XE_RTP_NAME("22015475538"),
XE_RTP_RULES(PLATFORM(DG2),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8))
},
{ XE_RTP_NAME("22012654132"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(CACHE_MODE_SS, ENABLE_PREFETCH_INTO_IC,
- /*
- * Register can't be read back for verification on
- * DG2 due to Wa_14012342262
- */
- .read_mask = 0))
- },
- { XE_RTP_NAME("22012654132"),
XE_RTP_RULES(SUBPLATFORM(DG2, G11),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(CACHE_MODE_SS, ENABLE_PREFETCH_INTO_IC,
@@ -461,68 +362,11 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN2, DISABLE_READ_SUPPRESSION))
},
- { XE_RTP_NAME("14013392000"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN2, ENABLE_LARGE_GRF_MODE))
- },
- { XE_RTP_NAME("14012419201"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN4,
- DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX))
- },
- { XE_RTP_NAME("14012419201"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN4,
- DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX))
- },
- { XE_RTP_NAME("1308578152"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
- ENGINE_CLASS(RENDER),
- FUNC(xe_rtp_match_first_gslice_fused_off)),
- XE_RTP_ACTIONS(CLR(CS_DEBUG_MODE1(RENDER_RING_BASE),
- REPLAY_MODE_GRANULARITY))
- },
{ XE_RTP_NAME("22010960976, 14013347512"),
XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(CLR(XEHP_HDC_CHICKEN0,
LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK))
},
- { XE_RTP_NAME("1608949956, 14010198302"),
- XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN,
- MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE))
- },
- { XE_RTP_NAME("22010430635"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN4,
- DISABLE_GRF_CLEAR))
- },
- { XE_RTP_NAME("14013202645"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
- },
- { XE_RTP_NAME("14013202645"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
- },
- { XE_RTP_NAME("22012532006"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7,
- DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA))
- },
- { XE_RTP_NAME("22012532006"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7,
- DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA))
- },
{ XE_RTP_NAME("14015150844"),
XE_RTP_RULES(PLATFORM(DG2), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_HDC_CHICKEN0, DIS_ATOMIC_CHAINING_TYPED_WRITES,
@@ -612,7 +456,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
-
+ { XE_RTP_NAME("16018610683"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE))
+ },
{}
};
@@ -652,21 +499,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
/* DG2 */
- { XE_RTP_NAME("16011186671"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(CLR(VFLSKPD, DIS_MULT_MISS_RD_SQUASH),
- SET(VFLSKPD, DIS_OVER_FETCH_CACHE))
- },
- { XE_RTP_NAME("14010469329"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_COMMON_SLICE_CHICKEN3,
- XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE))
- },
- { XE_RTP_NAME("14010698770, 22010613112, 22010465075"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_COMMON_SLICE_CHICKEN3,
- DISABLE_CPS_AWARE_COLOR_PIPE))
- },
{ XE_RTP_NAME("16013271637"),
XE_RTP_RULES(PLATFORM(DG2)),
XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1,
@@ -708,6 +540,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
XE_RTP_ACTIONS(SET(CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE))
},
+ { XE_RTP_NAME("14019877138"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
+ },
/* Xe2_LPG */
@@ -739,6 +575,11 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD))
},
+ { XE_RTP_NAME("16020183090"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT))
+ },
{}
};
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 727bdc429212..b138cbd51bdb 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -1,13 +1,8 @@
22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
-16011759253 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)
14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
PLATFORM(DG2)
22011391025 PLATFORM(DG2)
-14012197797 PLATFORM(DG2), GRAPHICS_STEP(A0, B0)
-16011777198 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0)
- SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)
-22012727170 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0)
- SUBPLATFORM(DG2, G11)
+22012727170 SUBPLATFORM(DG2, G11)
22012727685 SUBPLATFORM(DG2, G11)
16015675438 PLATFORM(PVC)
SUBPLATFORM(DG2, G10)
@@ -22,3 +17,8 @@
14019821291 MEDIA_VERSION_RANGE(1300, 2000)
14015076503 MEDIA_VERSION(1300)
16020292621 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
+14018913170 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
+ MEDIA_VERSION(2000), GRAPHICS_STEP(A0, A1)
+ GRAPHICS_VERSION_RANGE(1270, 1274)
+ MEDIA_VERSION(1300)
+ PLATFORM(DG2)
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
index a75eeba7bfe5..f69721339201 100644
--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -148,7 +148,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
if (q) {
if (q->ops->reset_status(q)) {
- drm_info(&xe->drm, "exec gueue reset detected\n");
+ drm_info(&xe->drm, "exec queue reset detected\n");
err = -EIO;
break;
}
diff --git a/drivers/gpu/drm/xe/xe_wopcm_types.h b/drivers/gpu/drm/xe/xe_wopcm_types.h
index 486d850c4084..99d34837c408 100644
--- a/drivers/gpu/drm/xe/xe_wopcm_types.h
+++ b/drivers/gpu/drm/xe/xe_wopcm_types.h
@@ -16,9 +16,9 @@ struct xe_wopcm {
u32 size;
/** @guc: GuC WOPCM Region info */
struct {
- /** @base: GuC WOPCM base which is offset from WOPCM base */
+ /** @guc.base: GuC WOPCM base which is offset from WOPCM base */
u32 base;
- /** @size: Size of the GuC WOPCM region */
+ /** @guc.size: Size of the GuC WOPCM region */
u32 size;
} guc;
};
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 407bc07cec69..8a39b3accce5 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -1166,7 +1166,7 @@ void zynqmp_disp_enable(struct zynqmp_disp *disp)
/* Choose clock source based on the DT clock handle. */
zynqmp_disp_avbuf_set_clocks_sources(disp, disp->dpsub->vid_clk_from_ps,
disp->dpsub->aud_clk_from_ps,
- true);
+ disp->dpsub->vid_clk_from_ps);
zynqmp_disp_avbuf_enable_channels(disp);
zynqmp_disp_avbuf_enable_audio(disp);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index a0606fab0e22..1846c4971fd8 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1560,12 +1560,12 @@ disconnected:
return connector_status_disconnected;
}
-static struct edid *zynqmp_dp_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
- return drm_get_edid(connector, &dp->aux.ddc);
+ return drm_edid_read_ddc(connector, &dp->aux.ddc);
}
static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
@@ -1579,7 +1579,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_check = zynqmp_dp_bridge_atomic_check,
.detect = zynqmp_dp_bridge_detect,
- .get_edid = zynqmp_dp_bridge_get_edid,
+ .edid_read = zynqmp_dp_bridge_edid_read,
};
/* -----------------------------------------------------------------------------
@@ -1624,8 +1624,17 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
u32 status, mask;
status = zynqmp_dp_read(dp, ZYNQMP_DP_INT_STATUS);
+ /* clear status register as soon as we read it */
+ zynqmp_dp_write(dp, ZYNQMP_DP_INT_STATUS, status);
mask = zynqmp_dp_read(dp, ZYNQMP_DP_INT_MASK);
- if (!(status & ~mask))
+
+ /*
+ * Status register may report some events, which corresponding interrupts
+ * have been disabled. Filter out those events against interrupts' mask.
+ */
+ status &= ~mask;
+
+ if (!status)
return IRQ_NONE;
/* dbg for diagnostic, but not much that the driver can do */
@@ -1634,8 +1643,6 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
if (status & ZYNQMP_DP_INT_CHBUF_OVERFLW_MASK)
dev_dbg_ratelimited(dp->dev, "overflow interrupt\n");
- zynqmp_dp_write(dp, ZYNQMP_DP_INT_STATUS, status);
-
if (status & ZYNQMP_DP_INT_VBLANK_START)
zynqmp_dpsub_drm_handle_vblank(dp->dpsub);
@@ -1721,6 +1728,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
+ bridge->of_node = dp->dev->of_node;
dpsub->bridge = bridge;
/*
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 84d042796d2e..783975d1384f 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -365,7 +365,7 @@ static const struct dev_pm_ops host1x_device_pm_ops = {
.restore = pm_generic_restore,
};
-struct bus_type host1x_bus_type = {
+const struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.uevent = host1x_device_uevent,
diff --git a/drivers/gpu/host1x/bus.h b/drivers/gpu/host1x/bus.h
index a4adf9abc3b4..a80ceadfeb34 100644
--- a/drivers/gpu/host1x/bus.h
+++ b/drivers/gpu/host1x/bus.h
@@ -10,7 +10,7 @@
struct bus_type;
struct host1x;
-extern struct bus_type host1x_bus_type;
+extern const struct bus_type host1x_bus_type;
int host1x_register(struct host1x *host1x);
int host1x_unregister(struct host1x *host1x);
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index d1336e438f4f..407ed9b9cf64 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -625,8 +625,7 @@ void host1x_cdma_push_wide(struct host1x_cdma *cdma, u32 op1, u32 op2,
struct host1x_channel *channel = cdma_to_channel(cdma);
struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
- unsigned int space = cdma->slots_free;
- unsigned int needed = 2, extra = 0;
+ unsigned int space, needed = 2, extra = 0;
if (host1x_debug_trace_cmdbuf)
trace_host1x_cdma_push_wide(dev_name(channel->dev), op1, op2,
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 2785935da497..558152575d10 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2091,9 +2091,6 @@ static int tc358743_probe(struct i2c_client *client)
state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
sd->dev = &client->dev;
- err = v4l2_async_register_subdev(sd);
- if (err < 0)
- goto err_hdl;
mutex_init(&state->confctl_mutex);
@@ -2151,6 +2148,10 @@ static int tc358743_probe(struct i2c_client *client)
if (err)
goto err_work_queues;
+ err = v4l2_async_register_subdev(sd);
+ if (err < 0)
+ goto err_work_queues;
+
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
diff --git a/drivers/staging/sm750fb/Kconfig b/drivers/staging/sm750fb/Kconfig
index ab3d9b057d56..08bcccdd0f1c 100644
--- a/drivers/staging/sm750fb/Kconfig
+++ b/drivers/staging/sm750fb/Kconfig
@@ -6,7 +6,6 @@ config FB_SM750
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- select VIDEO_NOMODESET
help
Frame buffer driver for the Silicon Motion SM750 chip
with 2D acceleration and dual head support.
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index b694d7669d32..44c9ef1435a2 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -11,6 +11,10 @@ config APERTURE_HELPERS
Support tracking and hand-over of aperture ownership. Required
by graphics drivers for firmware-provided framebuffers.
+config SCREEN_INFO
+ bool
+ default n
+
config STI_CORE
bool
depends on PARISC
@@ -18,10 +22,7 @@ config STI_CORE
STI refers to the HP "Standard Text Interface" which is a set of
BIOS routines contained in a ROM chip in HP PA-RISC based machines.
-config VIDEO_CMDLINE
- bool
-
-config VIDEO_NOMODESET
+config VIDEO
bool
default n
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 6bbc03950899..ffbac4387c67 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -1,12 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_APERTURE_HELPERS) += aperture.o
+obj-$(CONFIG_SCREEN_INFO) += screen_info.o
obj-$(CONFIG_STI_CORE) += sticore.o
obj-$(CONFIG_VGASTATE) += vgastate.o
-obj-$(CONFIG_VIDEO_CMDLINE) += cmdline.o
-obj-$(CONFIG_VIDEO_NOMODESET) += nomodeset.o
+obj-$(CONFIG_VIDEO) += cmdline.o nomodeset.o
obj-$(CONFIG_HDMI) += hdmi.o
+screen_info-y := screen_info_generic.o
+screen_info-$(CONFIG_PCI) += screen_info_pci.o
+
obj-$(CONFIG_VT) += console/
obj-$(CONFIG_FB_STI) += console/
obj-$(CONFIG_LOGO) += logo/
diff --git a/drivers/video/cmdline.c b/drivers/video/cmdline.c
index d3d257489c3d..49ee3fb4951a 100644
--- a/drivers/video/cmdline.c
+++ b/drivers/video/cmdline.c
@@ -80,6 +80,7 @@ const char *video_get_options(const char *name)
}
EXPORT_SYMBOL(video_get_options);
+#if IS_ENABLED(CONFIG_FB_CORE)
bool __video_get_options(const char *name, const char **options, bool is_of)
{
bool enabled = true;
@@ -96,6 +97,7 @@ bool __video_get_options(const char *name, const char **options, bool is_of)
return enabled;
}
EXPORT_SYMBOL(__video_get_options);
+#endif
/*
* Process command line options for video adapters. This function is
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d0bcc1d786e..a61b8260b8f3 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -75,7 +75,6 @@ config FB_CIRRUS
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This enables support for Cirrus Logic GD542x/543x based boards on
Amiga: SD64, Piccolo, Picasso II/II+, Picasso IV, or EGS Spectrum.
@@ -95,7 +94,6 @@ config FB_PM2
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for cards based on
the 3D Labs Permedia, Permedia 2 and Permedia 2V chips.
@@ -161,7 +159,6 @@ config FB_CYBER2000
tristate "CyberPro 2000/2010/5000 support"
depends on FB && PCI && (BROKEN || !SPARC64)
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This enables support for the Integraphics CyberPro 20x0 and 5000
VGA chips used in the Rebel.com Netwinder and other machines.
@@ -312,7 +309,6 @@ config FB_CT65550
bool "Chips 65550 display support"
depends on (FB = y) && PPC32 && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Chips & Technologies
65550 graphics chip in PowerBooks.
@@ -321,7 +317,6 @@ config FB_ASILIANT
bool "Asiliant (Chips) 69000 display support"
depends on (FB = y) && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Asiliant 69030 chipset
@@ -331,7 +326,6 @@ config FB_IMSTT
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
select FB_MACMODES if PPC_PMAC
- select VIDEO_NOMODESET
help
The IMS Twin Turbo is a PCI-based frame buffer card bundled with
many Macintosh and compatible computers.
@@ -396,7 +390,6 @@ config FB_TGA
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for generic TGA and SFB+
graphic cards. These include DEC ZLXp-E1, -E2 and -E3 PCI cards,
@@ -573,7 +566,6 @@ config FB_XVR500
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This is the framebuffer device for the Sun XVR-500 and similar
graphics cards based upon the 3DLABS Wildcat chipset. The driver
@@ -585,7 +577,6 @@ config FB_XVR2500
bool "Sun XVR-2500 3DLABS Wildcat support"
depends on (FB = y) && PCI && SPARC64
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This is the framebuffer device for the Sun XVR-2500 and similar
graphics cards based upon the 3DLABS Wildcat chipset. The driver
@@ -611,7 +602,6 @@ config FB_PVR2
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
Say Y here if you have a PowerVR 2 card in your box. If you plan to
run linux on your Dreamcast, you will have to say Y here.
@@ -674,7 +664,6 @@ config FB_NVIDIA
select FB_IOMEM_FOPS
select BITREVERSE
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the nVidia chips, TNT
and newer. For very old chipsets, such as the RIVA128, then use
@@ -723,7 +712,6 @@ config FB_RIVA
select FB_MODE_HELPERS
select BITREVERSE
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the nVidia Riva/Geforce
chips.
@@ -766,7 +754,6 @@ config FB_I740
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select VGASTATE
- select VIDEO_NOMODESET
select FB_DDC
help
This driver supports graphics cards based on Intel740 chip.
@@ -777,7 +764,6 @@ config FB_I810
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports the on-board graphics built in to the Intel 810
and 815 chipsets. Say Y if you have and plan to use such a board.
@@ -830,7 +816,6 @@ config FB_MATROX
select FB_IOMEM_FOPS
select FB_TILEBLITTING
select FB_MACMODES if PPC_PMAC
- select VIDEO_NOMODESET
help
Say Y here if you have a Matrox Millennium, Matrox Millennium II,
Matrox Mystique, Matrox Mystique 220, Matrox Productiva G100, Matrox
@@ -953,7 +938,6 @@ config FB_RADEON
select FB_IOMEM_FOPS
select FB_MACMODES if PPC
select FB_MODE_HELPERS
- select VIDEO_NOMODESET
help
Choose this option if you want to use an ATI Radeon graphics card as
a framebuffer device. There are both PCI and AGP versions. You
@@ -991,7 +975,6 @@ config FB_ATY128
select FB_BACKLIGHT if FB_ATY128_BACKLIGHT
select FB_IOMEM_HELPERS
select FB_MACMODES if PPC_PMAC
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the ATI Rage128 chips.
Say Y if you have such a graphics board and read
@@ -1017,7 +1000,6 @@ config FB_ATY
select FB_IOMEM_FOPS
select FB_MACMODES if PPC
select FB_ATY_CT if SPARC64 && PCI
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the ATI Mach64 chips.
Say Y if you have such a graphics board.
@@ -1069,7 +1051,6 @@ config FB_S3
select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
- select VIDEO_NOMODESET
select FONT_8x16 if FRAMEBUFFER_CONSOLE
help
Driver for graphics boards with S3 Trio / S3 Virge chip.
@@ -1091,7 +1072,6 @@ config FB_SAVAGE
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports notebooks and computers with S3 Savage PCI/AGP
chips.
@@ -1131,7 +1111,6 @@ config FB_SIS
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
select FB_SIS_300 if !FB_SIS_315
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the SiS 300, 315, 330
and 340 series as well as XGI V3XT, V5, V8, Z7 graphics chipsets.
@@ -1162,7 +1141,6 @@ config FB_VIA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
select I2C_ALGOBIT
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for Graphics chips of VIA
UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
@@ -1203,7 +1181,6 @@ config FB_NEOMAGIC
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports notebooks with NeoMagic PCI chips.
Say Y if you have such a graphics card.
@@ -1215,7 +1192,6 @@ config FB_KYRO
tristate "IMG Kyro support"
depends on FB && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Say Y here if you have a STG4000 / Kyro / PowerVR 3 based
graphics board.
@@ -1231,7 +1207,6 @@ config FB_3DFX
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the 3Dfx Banshee,
Voodoo3 or VSA-100 (aka Voodoo4/5) chips. Say Y if you have
@@ -1260,7 +1235,6 @@ config FB_VOODOO1
depends on FB && PCI
depends on FB_DEVICE
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
Voodoo2 (cvg) based graphics card.
@@ -1283,7 +1257,6 @@ config FB_VT8623
select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
- select VIDEO_NOMODESET
select FONT_8x16 if FRAMEBUFFER_CONSOLE
help
Driver for CastleRock integrated graphics core in the
@@ -1298,7 +1271,6 @@ config FB_TRIDENT
select FB_DDC
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for Trident PCI/AGP chipsets.
Supported chipset families are TGUI 9440/96XX, 3DImage, Blade3D
@@ -1323,7 +1295,6 @@ config FB_ARK
select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
- select VIDEO_NOMODESET
select FONT_8x16 if FRAMEBUFFER_CONSOLE
help
Driver for PCI graphics boards with ARK 2000PV chip
@@ -1336,7 +1307,6 @@ config FB_PM3
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the 3DLabs Permedia3
chipset, used in Formac ProFormance III, 3DLabs Oxygen VX1 &
@@ -1347,7 +1317,6 @@ config FB_CARMINE
tristate "Fujitsu carmine frame buffer support"
depends on FB && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Fujitsu Carmine chip.
The driver provides two independent frame buffer devices.
@@ -1629,7 +1598,6 @@ config FB_IBM_GXT4500
tristate "Framebuffer support for IBM GXT4000P/4500P/6000P/6500P adaptors"
depends on FB
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Say Y here to enable support for the IBM GXT4000P/6000P and
GXT4500P/6500P display adaptor based on Raster Engine RC1000,
@@ -1747,7 +1715,6 @@ config FB_MB862XX
depends on FB
depends on PCI || (OF && PPC)
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
@@ -1813,7 +1780,6 @@ config FB_HYPERV
depends on FB && HYPERV
select DMA_CMA if HAVE_DMA_CONTIGUOUS && CMA
select FB_IOMEM_HELPERS_DEFERRED
- select VIDEO_NOMODESET
help
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
@@ -1847,7 +1813,6 @@ config FB_SM712
tristate "Silicon Motion SM712 framebuffer support"
depends on FB && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Frame buffer driver for the Silicon Motion SM710, SM712, SM721
and SM722 chips.
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index 21053bf00dc5..db09fe87fcd4 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -4,7 +4,7 @@
#
config FB_CORE
- select VIDEO_CMDLINE
+ select VIDEO
tristate
config FB_NOTIFY
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index fc206755f5f6..48287366e0d4 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -645,7 +645,6 @@ int fb_new_modelist(struct fb_info *info)
return 0;
}
-#if defined(CONFIG_VIDEO_NOMODESET)
bool fb_modesetting_disabled(const char *drvname)
{
bool fwonly = video_firmware_drivers_only();
@@ -657,6 +656,5 @@ bool fb_modesetting_disabled(const char *drvname)
return fwonly;
}
EXPORT_SYMBOL(fb_modesetting_disabled);
-#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index f9b4ddd592ce..8dd82afb3452 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -13,11 +13,9 @@
#include <linux/efi-bgrt.h>
#include <linux/errno.h>
#include <linux/fb.h>
-#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/screen_info.h>
-#include <linux/pm_runtime.h>
#include <video/vga.h>
#include <asm/efi.h>
#include <drm/drm_utils.h> /* For drm_get_panel_orientation_quirk */
@@ -48,8 +46,6 @@ static bool use_bgrt = true;
static bool request_mem_succeeded = false;
static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
-static struct pci_dev *efifb_pci_dev; /* dev with BAR covering the efifb */
-
struct efifb_par {
u32 pseudo_palette[16];
resource_size_t base;
@@ -108,7 +104,7 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
*/
#if defined CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER && \
defined CONFIG_ACPI_BGRT
-static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
+static void efifb_copy_bmp(u8 *src, u32 *dst, int width, const struct screen_info *si)
{
u8 r, g, b;
@@ -130,7 +126,7 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
* resolution still fits, it will be displayed very close to the right edge of
* the display looking quite bad. This function checks for this.
*/
-static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+static bool efifb_bgrt_sanity_check(const struct screen_info *si, u32 bmp_width)
{
/*
* All x86 firmwares horizontally center the image (the yoffset
@@ -141,16 +137,15 @@ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
return bgrt_tab.image_offset_x == expected_xoffset;
}
#else
-static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+static bool efifb_bgrt_sanity_check(const struct screen_info *si, u32 bmp_width)
{
return true;
}
#endif
-static void efifb_show_boot_graphics(struct fb_info *info)
+static void efifb_show_boot_graphics(struct fb_info *info, const struct screen_info *si)
{
u32 bmp_width, bmp_height, bmp_pitch, dst_x, y, src_y;
- struct screen_info *si = &screen_info;
struct bmp_file_header *file_header;
struct bmp_dib_header *dib_header;
void *bgrt_image = NULL;
@@ -247,7 +242,8 @@ error:
pr_warn("efifb: Ignoring BGRT: unexpected or invalid BMP data\n");
}
#else
-static inline void efifb_show_boot_graphics(struct fb_info *info) {}
+static inline void efifb_show_boot_graphics(struct fb_info *info, const struct screen_info *si)
+{ }
#endif
/*
@@ -258,9 +254,6 @@ static void efifb_destroy(struct fb_info *info)
{
struct efifb_par *par = info->par;
- if (efifb_pci_dev)
- pm_runtime_put(&efifb_pci_dev->dev);
-
if (info->screen_base) {
if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
iounmap(info->screen_base);
@@ -282,7 +275,7 @@ static const struct fb_ops efifb_ops = {
.fb_setcolreg = efifb_setcolreg,
};
-static int efifb_setup(char *options)
+static int efifb_setup(struct screen_info *si, char *options)
{
char *this_opt;
@@ -290,16 +283,16 @@ static int efifb_setup(char *options)
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
- efifb_setup_from_dmi(&screen_info, this_opt);
+ efifb_setup_from_dmi(si, this_opt);
if (!strncmp(this_opt, "base:", 5))
- screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
+ si->lfb_base = simple_strtoul(this_opt+5, NULL, 0);
else if (!strncmp(this_opt, "stride:", 7))
- screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
+ si->lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
else if (!strncmp(this_opt, "height:", 7))
- screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
+ si->lfb_height = simple_strtoul(this_opt+7, NULL, 0);
else if (!strncmp(this_opt, "width:", 6))
- screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+ si->lfb_width = simple_strtoul(this_opt+6, NULL, 0);
else if (!strcmp(this_opt, "nowc"))
mem_flags &= ~EFI_MEMORY_WC;
else if (!strcmp(this_opt, "nobgrt"))
@@ -310,15 +303,15 @@ static int efifb_setup(char *options)
return 0;
}
-static inline bool fb_base_is_valid(void)
+static inline bool fb_base_is_valid(struct screen_info *si)
{
- if (screen_info.lfb_base)
+ if (si->lfb_base)
return true;
- if (!(screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE))
+ if (!(si->capabilities & VIDEO_CAPABILITY_64BIT_BASE))
return false;
- if (screen_info.ext_lfb_base)
+ if (si->ext_lfb_base)
return true;
return false;
@@ -329,7 +322,10 @@ static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- return sprintf(buf, fmt "\n", (screen_info.lfb_##name)); \
+ struct screen_info *si = dev_get_platdata(dev); \
+ if (!si) \
+ return -ENODEV; \
+ return sprintf(buf, fmt "\n", (si->lfb_##name)); \
} \
static DEVICE_ATTR_RO(name)
@@ -349,13 +345,9 @@ static struct attribute *efifb_attrs[] = {
};
ATTRIBUTE_GROUPS(efifb);
-static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
-
-static struct resource *bar_resource;
-static u64 bar_offset;
-
static int efifb_probe(struct platform_device *dev)
{
+ struct screen_info *si;
struct fb_info *info;
struct efifb_par *par;
int err, orientation;
@@ -365,62 +357,60 @@ static int efifb_probe(struct platform_device *dev)
char *option = NULL;
efi_memory_desc_t md;
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
+ /*
+ * If we fail probing the device, the kernel might try a different
+ * driver. We get a copy of the attached screen_info, so that we can
+ * modify its values without affecting later drivers.
+ */
+ si = dev_get_platdata(&dev->dev);
+ if (!si)
+ return -ENODEV;
+ si = devm_kmemdup(&dev->dev, si, sizeof(*si), GFP_KERNEL);
+ if (!si)
+ return -ENOMEM;
+
+ if (si->orig_video_isVGA != VIDEO_TYPE_EFI)
return -ENODEV;
if (fb_get_options("efifb", &option))
return -ENODEV;
- efifb_setup(option);
+ efifb_setup(si, option);
/* We don't get linelength from UGA Draw Protocol, only from
* EFI Graphics Protocol. So if it's not in DMI, and it's not
* passed in from the user, we really can't use the framebuffer.
*/
- if (!screen_info.lfb_linelength)
+ if (!si->lfb_linelength)
return -ENODEV;
- if (!screen_info.lfb_depth)
- screen_info.lfb_depth = 32;
- if (!screen_info.pages)
- screen_info.pages = 1;
- if (!fb_base_is_valid()) {
+ if (!si->lfb_depth)
+ si->lfb_depth = 32;
+ if (!si->pages)
+ si->pages = 1;
+ if (!fb_base_is_valid(si)) {
printk(KERN_DEBUG "efifb: invalid framebuffer address\n");
return -ENODEV;
}
printk(KERN_INFO "efifb: probing for efifb\n");
/* just assume they're all unset if any are */
- if (!screen_info.blue_size) {
- screen_info.blue_size = 8;
- screen_info.blue_pos = 0;
- screen_info.green_size = 8;
- screen_info.green_pos = 8;
- screen_info.red_size = 8;
- screen_info.red_pos = 16;
- screen_info.rsvd_size = 8;
- screen_info.rsvd_pos = 24;
+ if (!si->blue_size) {
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
}
- efifb_fix.smem_start = screen_info.lfb_base;
+ efifb_fix.smem_start = __screen_info_lfb_base(si);
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) {
- u64 ext_lfb_base;
-
- ext_lfb_base = (u64)(unsigned long)screen_info.ext_lfb_base << 32;
- efifb_fix.smem_start |= ext_lfb_base;
- }
-
- if (bar_resource &&
- bar_resource->start + bar_offset != efifb_fix.smem_start) {
- dev_info(&efifb_pci_dev->dev,
- "BAR has moved, updating efifb address\n");
- efifb_fix.smem_start = bar_resource->start + bar_offset;
- }
-
- efifb_defined.bits_per_pixel = screen_info.lfb_depth;
- efifb_defined.xres = screen_info.lfb_width;
- efifb_defined.yres = screen_info.lfb_height;
- efifb_fix.line_length = screen_info.lfb_linelength;
+ efifb_defined.bits_per_pixel = si->lfb_depth;
+ efifb_defined.xres = si->lfb_width;
+ efifb_defined.yres = si->lfb_height;
+ efifb_fix.line_length = si->lfb_linelength;
/* size_vmode -- that is the amount of memory needed for the
* used video mode, i.e. the minimum amount of
@@ -430,7 +420,7 @@ static int efifb_probe(struct platform_device *dev)
/* size_total -- all video memory we have. Used for
* entries, ressource allocation and bounds
* checking. */
- size_total = screen_info.lfb_size;
+ size_total = si->lfb_size;
if (size_total < size_vmode)
size_total = size_vmode;
@@ -505,14 +495,14 @@ static int efifb_probe(struct platform_device *dev)
goto err_release_fb;
}
- efifb_show_boot_graphics(info);
+ efifb_show_boot_graphics(info, si);
pr_info("efifb: framebuffer at 0x%lx, using %dk, total %dk\n",
efifb_fix.smem_start, size_remap/1024, size_total/1024);
pr_info("efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
efifb_defined.xres, efifb_defined.yres,
efifb_defined.bits_per_pixel, efifb_fix.line_length,
- screen_info.pages);
+ si->pages);
efifb_defined.xres_virtual = efifb_defined.xres;
efifb_defined.yres_virtual = efifb_fix.smem_len /
@@ -526,26 +516,26 @@ static int efifb_probe(struct platform_device *dev)
efifb_defined.left_margin = (efifb_defined.xres / 8) & 0xf8;
efifb_defined.hsync_len = (efifb_defined.xres / 8) & 0xf8;
- efifb_defined.red.offset = screen_info.red_pos;
- efifb_defined.red.length = screen_info.red_size;
- efifb_defined.green.offset = screen_info.green_pos;
- efifb_defined.green.length = screen_info.green_size;
- efifb_defined.blue.offset = screen_info.blue_pos;
- efifb_defined.blue.length = screen_info.blue_size;
- efifb_defined.transp.offset = screen_info.rsvd_pos;
- efifb_defined.transp.length = screen_info.rsvd_size;
+ efifb_defined.red.offset = si->red_pos;
+ efifb_defined.red.length = si->red_size;
+ efifb_defined.green.offset = si->green_pos;
+ efifb_defined.green.length = si->green_size;
+ efifb_defined.blue.offset = si->blue_pos;
+ efifb_defined.blue.length = si->blue_size;
+ efifb_defined.transp.offset = si->rsvd_pos;
+ efifb_defined.transp.length = si->rsvd_size;
pr_info("efifb: %s: "
"size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
"Truecolor",
- screen_info.rsvd_size,
- screen_info.red_size,
- screen_info.green_size,
- screen_info.blue_size,
- screen_info.rsvd_pos,
- screen_info.red_pos,
- screen_info.green_pos,
- screen_info.blue_pos);
+ si->rsvd_size,
+ si->red_size,
+ si->green_size,
+ si->blue_size,
+ si->rsvd_pos,
+ si->red_pos,
+ si->green_pos,
+ si->blue_pos);
efifb_fix.ypanstep = 0;
efifb_fix.ywrapstep = 0;
@@ -582,26 +572,20 @@ static int efifb_probe(struct platform_device *dev)
goto err_groups;
}
- if (efifb_pci_dev)
- WARN_ON(pm_runtime_get_sync(&efifb_pci_dev->dev) < 0);
-
err = devm_aperture_acquire_for_platform_device(dev, par->base, par->size);
if (err) {
pr_err("efifb: cannot acquire aperture\n");
- goto err_put_rpm_ref;
+ goto err_fb_dealloc_cmap;
}
err = register_framebuffer(info);
if (err < 0) {
pr_err("efifb: cannot register framebuffer\n");
- goto err_put_rpm_ref;
+ goto err_fb_dealloc_cmap;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
-err_put_rpm_ref:
- if (efifb_pci_dev)
- pm_runtime_put(&efifb_pci_dev->dev);
-
+err_fb_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
err_groups:
sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
@@ -636,58 +620,3 @@ static struct platform_driver efifb_driver = {
};
builtin_platform_driver(efifb_driver);
-
-#if defined(CONFIG_PCI)
-
-static void record_efifb_bar_resource(struct pci_dev *dev, int idx, u64 offset)
-{
- u16 word;
-
- efifb_pci_dev = dev;
-
- pci_read_config_word(dev, PCI_COMMAND, &word);
- if (!(word & PCI_COMMAND_MEMORY)) {
- pci_dev_disabled = true;
- dev_err(&dev->dev,
- "BAR %d: assigned to efifb but device is disabled!\n",
- idx);
- return;
- }
-
- bar_resource = &dev->resource[idx];
- bar_offset = offset;
-
- dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
-}
-
-static void efifb_fixup_resources(struct pci_dev *dev)
-{
- u64 base = screen_info.lfb_base;
- u64 size = screen_info.lfb_size;
- int i;
-
- if (efifb_pci_dev || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
- return;
-
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- base |= (u64)screen_info.ext_lfb_base << 32;
-
- if (!base)
- return;
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- struct resource *res = &dev->resource[i];
-
- if (!(res->flags & IORESOURCE_MEM))
- continue;
-
- if (res->start <= base && res->end >= base + size - 1) {
- record_efifb_bar_resource(dev, i, base - res->start);
- break;
- }
- }
-}
-DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
- 16, efifb_fixup_resources);
-
-#endif
diff --git a/drivers/video/fbdev/geode/Kconfig b/drivers/video/fbdev/geode/Kconfig
index 9a49916e0492..3b20420cc94d 100644
--- a/drivers/video/fbdev/geode/Kconfig
+++ b/drivers/video/fbdev/geode/Kconfig
@@ -14,7 +14,6 @@ config FB_GEODE_LX
tristate "AMD Geode LX framebuffer support"
depends on FB && FB_GEODE
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
AMD Geode LX processors.
@@ -28,7 +27,6 @@ config FB_GEODE_GX
tristate "AMD Geode GX framebuffer support"
depends on FB && FB_GEODE
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
AMD Geode GX processors.
@@ -42,7 +40,6 @@ config FB_GEODE_GX1
tristate "AMD Geode GX1 framebuffer support"
depends on FB && FB_GEODE
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
AMD Geode GX1 processor.
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 6f58ee276ad1..028a56525047 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -470,7 +470,7 @@ static int simplefb_attach_genpds(struct simplefb_par *par,
if (err == -ENOENT)
return 0;
- dev_info(dev, "failed to parse power-domains: %d\n", err);
+ dev_err(dev, "failed to parse power-domains: %d\n", err);
return err;
}
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index c0edceea0a79..8ab64ae4cad3 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -243,6 +243,7 @@ static int vesafb_setup(char *options)
static int vesafb_probe(struct platform_device *dev)
{
+ struct screen_info *si;
struct fb_info *info;
struct vesafb_par *par;
int i, err;
@@ -251,21 +252,33 @@ static int vesafb_probe(struct platform_device *dev)
unsigned int size_total;
char *option = NULL;
+ /*
+ * If we fail probing the device, the kernel might try a different
+ * driver. We get a copy of the attached screen_info, so that we can
+ * modify its values without affecting later drivers.
+ */
+ si = dev_get_platdata(&dev->dev);
+ if (!si)
+ return -ENODEV;
+ si = devm_kmemdup(&dev->dev, si, sizeof(*si), GFP_KERNEL);
+ if (!si)
+ return -ENOMEM;
+
/* ignore error return of fb_get_options */
fb_get_options("vesafb", &option);
vesafb_setup(option);
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
+ if (si->orig_video_isVGA != VIDEO_TYPE_VLFB)
return -ENODEV;
- vga_compat = (screen_info.capabilities & 2) ? 0 : 1;
- vesafb_fix.smem_start = screen_info.lfb_base;
- vesafb_defined.bits_per_pixel = screen_info.lfb_depth;
+ vga_compat = (si->capabilities & 2) ? 0 : 1;
+ vesafb_fix.smem_start = si->lfb_base;
+ vesafb_defined.bits_per_pixel = si->lfb_depth;
if (15 == vesafb_defined.bits_per_pixel)
vesafb_defined.bits_per_pixel = 16;
- vesafb_defined.xres = screen_info.lfb_width;
- vesafb_defined.yres = screen_info.lfb_height;
- vesafb_fix.line_length = screen_info.lfb_linelength;
+ vesafb_defined.xres = si->lfb_width;
+ vesafb_defined.yres = si->lfb_height;
+ vesafb_fix.line_length = si->lfb_linelength;
vesafb_fix.visual = (vesafb_defined.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
@@ -277,7 +290,7 @@ static int vesafb_probe(struct platform_device *dev)
/* size_total -- all video memory we have. Used for mtrr
* entries, resource allocation and bounds
* checking. */
- size_total = screen_info.lfb_size * 65536;
+ size_total = si->lfb_size * 65536;
if (vram_total)
size_total = vram_total * 1024 * 1024;
if (size_total < size_vmode)
@@ -297,7 +310,7 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_fix.smem_len = size_remap;
#ifndef __i386__
- screen_info.vesapm_seg = 0;
+ si->vesapm_seg = 0;
#endif
if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
@@ -317,23 +330,26 @@ static int vesafb_probe(struct platform_device *dev)
par = info->par;
info->pseudo_palette = par->pseudo_palette;
- par->base = screen_info.lfb_base;
+ par->base = si->lfb_base;
par->size = size_total;
printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
- vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
+ vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel,
+ vesafb_fix.line_length, si->pages);
- if (screen_info.vesapm_seg) {
+ if (si->vesapm_seg) {
printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
- screen_info.vesapm_seg,screen_info.vesapm_off);
+ si->vesapm_seg, si->vesapm_off);
}
- if (screen_info.vesapm_seg < 0xc000)
+ if (si->vesapm_seg < 0xc000)
ypan = pmi_setpal = 0; /* not available or some DOS TSR ... */
if (ypan || pmi_setpal) {
+ unsigned long pmi_phys;
unsigned short *pmi_base;
- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
+ pmi_phys = ((unsigned long)si->vesapm_seg << 4) + si->vesapm_off;
+ pmi_base = (unsigned short *)phys_to_virt(pmi_phys);
pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
@@ -377,14 +393,14 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_defined.left_margin = (vesafb_defined.xres / 8) & 0xf8;
vesafb_defined.hsync_len = (vesafb_defined.xres / 8) & 0xf8;
- vesafb_defined.red.offset = screen_info.red_pos;
- vesafb_defined.red.length = screen_info.red_size;
- vesafb_defined.green.offset = screen_info.green_pos;
- vesafb_defined.green.length = screen_info.green_size;
- vesafb_defined.blue.offset = screen_info.blue_pos;
- vesafb_defined.blue.length = screen_info.blue_size;
- vesafb_defined.transp.offset = screen_info.rsvd_pos;
- vesafb_defined.transp.length = screen_info.rsvd_size;
+ vesafb_defined.red.offset = si->red_pos;
+ vesafb_defined.red.length = si->red_size;
+ vesafb_defined.green.offset = si->green_pos;
+ vesafb_defined.green.length = si->green_size;
+ vesafb_defined.blue.offset = si->blue_pos;
+ vesafb_defined.blue.length = si->blue_size;
+ vesafb_defined.transp.offset = si->rsvd_pos;
+ vesafb_defined.transp.length = si->rsvd_size;
if (vesafb_defined.bits_per_pixel <= 8) {
depth = vesafb_defined.green.length;
@@ -399,14 +415,14 @@ static int vesafb_probe(struct platform_device *dev)
(vesafb_defined.bits_per_pixel > 8) ?
"Truecolor" : (vga_compat || pmi_setpal) ?
"Pseudocolor" : "Static Pseudocolor",
- screen_info.rsvd_size,
- screen_info.red_size,
- screen_info.green_size,
- screen_info.blue_size,
- screen_info.rsvd_pos,
- screen_info.red_pos,
- screen_info.green_pos,
- screen_info.blue_pos);
+ si->rsvd_size,
+ si->red_size,
+ si->green_size,
+ si->blue_size,
+ si->rsvd_pos,
+ si->red_pos,
+ si->green_pos,
+ si->blue_pos);
vesafb_fix.ypanstep = ypan ? 1 : 0;
vesafb_fix.ywrapstep = (ypan>1) ? 1 : 0;
diff --git a/drivers/video/screen_info_generic.c b/drivers/video/screen_info_generic.c
new file mode 100644
index 000000000000..64117c6367ab
--- /dev/null
+++ b/drivers/video/screen_info_generic.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/screen_info.h>
+#include <linux/string.h>
+
+static void resource_init_named(struct resource *r,
+ resource_size_t start, resource_size_t size,
+ const char *name, unsigned int flags)
+{
+ memset(r, 0, sizeof(*r));
+
+ r->start = start;
+ r->end = start + size - 1;
+ r->name = name;
+ r->flags = flags;
+}
+
+static void resource_init_io_named(struct resource *r,
+ resource_size_t start, resource_size_t size,
+ const char *name)
+{
+ resource_init_named(r, start, size, name, IORESOURCE_IO);
+}
+
+static void resource_init_mem_named(struct resource *r,
+ resource_size_t start, resource_size_t size,
+ const char *name)
+{
+ resource_init_named(r, start, size, name, IORESOURCE_MEM);
+}
+
+static inline bool __screen_info_has_ega_gfx(unsigned int mode)
+{
+ switch (mode) {
+ case 0x0d: /* 320x200-4 */
+ case 0x0e: /* 640x200-4 */
+ case 0x0f: /* 640x350-1 */
+ case 0x10: /* 640x350-4 */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool __screen_info_has_vga_gfx(unsigned int mode)
+{
+ switch (mode) {
+ case 0x10: /* 640x480-1 */
+ case 0x12: /* 640x480-4 */
+ case 0x13: /* 320-200-8 */
+ case 0x6a: /* 800x600-4 (VESA) */
+ return true;
+ default:
+ return __screen_info_has_ega_gfx(mode);
+ }
+}
+
+/**
+ * screen_info_resources() - Get resources from screen_info structure
+ * @si: the screen_info
+ * @r: pointer to an array of resource structures
+ * @num: number of elements in @r:
+ *
+ * Returns:
+ * The number of resources stored in @r on success, or a negative errno code otherwise.
+ *
+ * A call to screen_info_resources() returns the resources consumed by the
+ * screen_info's device or framebuffer. The result is stored in the caller-supplied
+ * array @r with up to @num elements. The function returns the number of
+ * initialized elements.
+ */
+ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num)
+{
+ struct resource *pos = r;
+ unsigned int type = screen_info_video_type(si);
+ u64 base, size;
+
+ switch (type) {
+ case VIDEO_TYPE_MDA:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3b0, 12, "mda");
+ if (num > 1)
+ resource_init_io_named(pos++, 0x3bf, 0x01, "mda");
+ if (num > 2)
+ resource_init_mem_named(pos++, 0xb0000, 0x2000, "mda");
+ break;
+ case VIDEO_TYPE_CGA:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3d4, 0x02, "cga");
+ if (num > 1)
+ resource_init_mem_named(pos++, 0xb8000, 0x2000, "cga");
+ break;
+ case VIDEO_TYPE_EGAM:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3bf, 0x10, "ega");
+ if (num > 1)
+ resource_init_mem_named(pos++, 0xb0000, 0x8000, "ega");
+ break;
+ case VIDEO_TYPE_EGAC:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3c0, 0x20, "ega");
+ if (num > 1) {
+ if (__screen_info_has_ega_gfx(si->orig_video_mode))
+ resource_init_mem_named(pos++, 0xa0000, 0x10000, "ega");
+ else
+ resource_init_mem_named(pos++, 0xb8000, 0x8000, "ega");
+ }
+ break;
+ case VIDEO_TYPE_VGAC:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3c0, 0x20, "vga+");
+ if (num > 1) {
+ if (__screen_info_has_vga_gfx(si->orig_video_mode))
+ resource_init_mem_named(pos++, 0xa0000, 0x10000, "vga+");
+ else
+ resource_init_mem_named(pos++, 0xb8000, 0x8000, "vga+");
+ }
+ break;
+ case VIDEO_TYPE_VLFB:
+ case VIDEO_TYPE_EFI:
+ base = __screen_info_lfb_base(si);
+ if (!base)
+ break;
+ size = __screen_info_lfb_size(si, type);
+ if (!size)
+ break;
+ if (num > 0)
+ resource_init_mem_named(pos++, base, size, "lfb");
+ break;
+ case VIDEO_TYPE_PICA_S3:
+ case VIDEO_TYPE_MIPS_G364:
+ case VIDEO_TYPE_SGI:
+ case VIDEO_TYPE_TGAC:
+ case VIDEO_TYPE_SUN:
+ case VIDEO_TYPE_SUNPCI:
+ case VIDEO_TYPE_PMAC:
+ default:
+ /* not supported */
+ return -EINVAL;
+ }
+
+ return pos - r;
+}
+EXPORT_SYMBOL(screen_info_resources);
diff --git a/drivers/video/screen_info_pci.c b/drivers/video/screen_info_pci.c
new file mode 100644
index 000000000000..6c5833517141
--- /dev/null
+++ b/drivers/video/screen_info_pci.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/screen_info.h>
+#include <linux/string.h>
+
+static struct pci_dev *screen_info_lfb_pdev;
+static size_t screen_info_lfb_bar;
+static resource_size_t screen_info_lfb_offset;
+static struct resource screen_info_lfb_res = DEFINE_RES_MEM(0, 0);
+
+static bool __screen_info_relocation_is_valid(const struct screen_info *si, struct resource *pr)
+{
+ u64 size = __screen_info_lfb_size(si, screen_info_video_type(si));
+
+ if (screen_info_lfb_offset > resource_size(pr))
+ return false;
+ if (size > resource_size(pr))
+ return false;
+ if (resource_size(pr) - size < screen_info_lfb_offset)
+ return false;
+
+ return true;
+}
+
+void screen_info_apply_fixups(void)
+{
+ struct screen_info *si = &screen_info;
+
+ if (screen_info_lfb_pdev) {
+ struct resource *pr = &screen_info_lfb_pdev->resource[screen_info_lfb_bar];
+
+ if (pr->start != screen_info_lfb_res.start) {
+ if (__screen_info_relocation_is_valid(si, pr)) {
+ /*
+ * Only update base if we have an actual
+ * relocation to a valid I/O range.
+ */
+ __screen_info_set_lfb_base(si, pr->start + screen_info_lfb_offset);
+ pr_info("Relocating firmware framebuffer to offset %pa[d] within %pr\n",
+ &screen_info_lfb_offset, pr);
+ } else {
+ pr_warn("Invalid relocating, disabling firmware framebuffer\n");
+ }
+ }
+ }
+}
+
+static void screen_info_fixup_lfb(struct pci_dev *pdev)
+{
+ unsigned int type;
+ struct resource res[SCREEN_INFO_MAX_RESOURCES];
+ size_t i, numres;
+ int ret;
+ const struct screen_info *si = &screen_info;
+
+ if (screen_info_lfb_pdev)
+ return; // already found
+
+ type = screen_info_video_type(si);
+ if (type != VIDEO_TYPE_EFI)
+ return; // only applies to EFI
+
+ ret = screen_info_resources(si, res, ARRAY_SIZE(res));
+ if (ret < 0)
+ return;
+ numres = ret;
+
+ for (i = 0; i < numres; ++i) {
+ struct resource *r = &res[i];
+ const struct resource *pr;
+
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ pr = pci_find_resource(pdev, r);
+ if (!pr)
+ continue;
+
+ /*
+ * We've found a PCI device with the framebuffer
+ * resource. Store away the parameters to track
+ * relocation of the framebuffer aperture.
+ */
+ screen_info_lfb_pdev = pdev;
+ screen_info_lfb_bar = pr - pdev->resource;
+ screen_info_lfb_offset = r->start - pr->start;
+ memcpy(&screen_info_lfb_res, r, sizeof(screen_info_lfb_res));
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16,
+ screen_info_fixup_lfb);
+
+static struct pci_dev *__screen_info_pci_dev(struct resource *res)
+{
+ struct pci_dev *pdev = NULL;
+ const struct resource *r = NULL;
+
+ if (!(res->flags & IORESOURCE_MEM))
+ return NULL;
+
+ while (!r && (pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
+ r = pci_find_resource(pdev, res);
+ }
+
+ return pdev;
+}
+
+/**
+ * screen_info_pci_dev() - Return PCI parent device that contains screen_info's framebuffer
+ * @si: the screen_info
+ *
+ * Returns:
+ * The screen_info's parent device or NULL on success, or a pointer-encoded
+ * errno value otherwise. The value NULL is not an error. It signals that no
+ * PCI device has been found.
+ */
+struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
+{
+ struct resource res[SCREEN_INFO_MAX_RESOURCES];
+ ssize_t i, numres;
+
+ numres = screen_info_resources(si, res, ARRAY_SIZE(res));
+ if (numres < 0)
+ return ERR_PTR(numres);
+
+ for (i = 0; i < numres; ++i) {
+ struct pci_dev *pdev = __screen_info_pci_dev(&res[i]);
+
+ if (pdev)
+ return pdev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(screen_info_pci_dev);