summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2022-11-22 06:41:11 +0300
committerDave Airlie <airlied@redhat.com>2022-11-22 06:41:11 +0300
commitfc58764bbf602b65a6f63c53e5fd6feae76c510c (patch)
tree03f5448cf5d742b8fd2980e86a33636026557ac6 /drivers
parent819683a1fc2f7e64017d50caf539e7bafcb37b81 (diff)
parentaec3bb3a01de09058fbebed4821ed7d07e1ed994 (diff)
downloadlinux-fc58764bbf602b65a6f63c53e5fd6feae76c510c.tar.xz
Merge tag 'amd-drm-next-6.2-2022-11-18' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.2-2022-11-18: amdgpu: - SR-IOV fixes - Clean up DC checks - DCN 3.2.x fixes - DCN 3.1.x fixes - Don't enable degamma on asics which don't support it - IP discovery fixes - BACO fixes - Fix vbios allocation handling when vkms is enabled - Drop buggy tdr advanced mode GPU reset handling - Fix the build when DCN is not set in kconfig - MST DSC fixes - Userptr fixes - FRU and RAS EEPROM fixes - VCN 4.x RAS support - Aldrebaran CU occupancy reporting fix - PSP ring cleanup amdkfd: - Memory limit fix - Enable cooperative launch on gfx 10.3 amd-drm-next-6.2-2022-11-11: amdgpu: - SMU 13.x updates - GPUVM TLB race fix - DCN 3.1.4 updates - DCN 3.2.x updates - PSR fixes - Kerneldoc fix - Vega10 fan fix - GPUVM locking fixes in error pathes - BACO fix for Beige Goby - EEPROM I2C address cleanup - GFXOFF fix - Fix DC memory leak in error pathes - Flexible array updates - Mtype fix for GPUVM PTEs - Move Kconfig into amdgpu directory - SR-IOV updates - Fix possible memory leak in CS IOCTL error path amdkfd: - Fix possible memory overrun - CRIU fixes radeon: - ACPI ref count fix - HDA audio notifier support - Move Kconfig into radeon directory UAPI: - Add new GEM_CREATE flags to help to transition more KFD functionality to the DRM UAPI. These are used internally in the driver to align location based memory coherency requirements from memory allocated in the KFD with how we manage GPUVM PTEs. They are currently blocked in the GEM_CREATE IOCTL as we don't have a user right now. They are just used internally in the kernel driver for now for existing KFD memory allocations. So a change to the UAPI header, but no functional change in the UAPI. From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221118170807.6505-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/Kconfig56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c203
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c215
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c)51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h)15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c65
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c254
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c157
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h363
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c8
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h30
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h63
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_4_0.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c25
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c83
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h3
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig30
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c117
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c58
174 files changed, 2670 insertions, 1642 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 722ded626399..315cbdf61979 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -233,64 +233,8 @@ source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
-config DRM_RADEON
- tristate "ATI Radeon"
- depends on DRM && PCI && MMU
- depends on AGP || !AGP
- select FW_LOADER
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_KMS_HELPER
- select DRM_TTM
- select DRM_TTM_HELPER
- select POWER_SUPPLY
- select HWMON
- select BACKLIGHT_CLASS_DEVICE
- select INTERVAL_TREE
- # radeon depends on ACPI_VIDEO when ACPI is enabled, for select to work
- # ACPI_VIDEO's dependencies must also be selected.
- select INPUT if ACPI
- select ACPI_VIDEO if ACPI
- # On x86 ACPI_VIDEO also needs ACPI_WMI
- select X86_PLATFORM_DEVICES if ACPI && X86
- select ACPI_WMI if ACPI && X86
- help
- Choose this option if you have an ATI Radeon graphics card. There
- are both PCI and AGP versions. You don't need to choose this to
- run the Radeon in plain VGA mode.
-
- If M is selected, the module will be called radeon.
-
source "drivers/gpu/drm/radeon/Kconfig"
-config DRM_AMDGPU
- tristate "AMD GPU"
- depends on DRM && PCI && MMU
- select FW_LOADER
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_KMS_HELPER
- select DRM_SCHED
- select DRM_TTM
- select DRM_TTM_HELPER
- select POWER_SUPPLY
- select HWMON
- select BACKLIGHT_CLASS_DEVICE
- select INTERVAL_TREE
- select DRM_BUDDY
- # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
- # ACPI_VIDEO's dependencies must also be selected.
- select INPUT if ACPI
- select ACPI_VIDEO if ACPI
- # On x86 ACPI_VIDEO also needs ACPI_WMI
- select X86_PLATFORM_DEVICES if ACPI && X86
- select ACPI_WMI if ACPI && X86
- help
- Choose this option if you have a recent AMD Radeon graphics card.
-
- If M is selected, the module will be called amdgpu.
-
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 7777d55275de..5fcd510f1abb 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -1,4 +1,33 @@
# SPDX-License-Identifier: MIT
+
+config DRM_AMDGPU
+ tristate "AMD GPU"
+ depends on DRM && PCI && MMU
+ select FW_LOADER
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SCHED
+ select DRM_TTM
+ select DRM_TTM_HELPER
+ select POWER_SUPPLY
+ select HWMON
+ select BACKLIGHT_CLASS_DEVICE
+ select INTERVAL_TREE
+ select DRM_BUDDY
+ # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
+ # ACPI_VIDEO's dependencies must also be selected.
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ # On x86 ACPI_VIDEO also needs ACPI_WMI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
+ help
+ Choose this option if you have a recent AMD Radeon graphics card.
+
+ If M is selected, the module will be called amdgpu.
+
config DRM_AMDGPU_SI
bool "Enable amdgpu support for SI parts"
depends on DRM_AMDGPU
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 6ad39cf71bdd..712075a491f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -250,7 +250,7 @@ endif
amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
-amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o
+amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_hmm.o
include $(FULL_AMD_PATH)/pm/Makefile
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9999c18e7d8e..6b74df446694 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -82,7 +82,6 @@
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_jpeg.h"
-#include "amdgpu_mn.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
@@ -1065,6 +1064,7 @@ struct amdgpu_device {
struct work_struct reset_work;
bool job_hang;
+ bool dc_enabled;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1122,6 +1122,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
+
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context);
@@ -1295,6 +1297,7 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
u32 reg, u32 v);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index b14800ac179e..57b5e11446c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -847,7 +847,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
if (atif->notifications.brightness_change) {
- if (amdgpu_device_has_dc_support(adev)) {
+ if (adev->dc_enabled) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index c8935d718207..4485bb29bec9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -41,5 +41,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ba72a910d0d5..3a763916a5a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -29,6 +29,7 @@
#include "amdgpu_object.h"
#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
+#include "amdgpu_hmm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
#include <uapi/linux/kfd_ioctl.h>
@@ -171,9 +172,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev && adev->kfd.vram_used + vram_needed >
- adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size) -
- reserved_for_pt)) {
+ adev->gmc.real_vram_size - reserved_for_pt)) {
ret = -ENOMEM;
goto release;
}
@@ -405,63 +404,15 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
{
- struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
- bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
- bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
- uint32_t mapping_flags;
- uint64_t pte_flags;
- bool snoop = false;
+ uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
+ AMDGPU_VM_MTYPE_DEFAULT;
- mapping_flags = AMDGPU_VM_PAGE_READABLE;
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(9, 4, 1):
- case IP_VERSION(9, 4, 2):
- if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
- if (bo_adev == adev) {
- if (uncached)
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
- else if (coherent)
- mapping_flags |= AMDGPU_VM_MTYPE_CC;
- else
- mapping_flags |= AMDGPU_VM_MTYPE_RW;
- if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) &&
- adev->gmc.xgmi.connected_to_cpu)
- snoop = true;
- } else {
- if (uncached || coherent)
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
- else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- if (amdgpu_xgmi_same_hive(adev, bo_adev))
- snoop = true;
- }
- } else {
- if (uncached || coherent)
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
- else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- snoop = true;
- }
- break;
- default:
- if (uncached || coherent)
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
- else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
-
- if (!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
- snoop = true;
- }
-
- pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
- pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
-
- return pte_flags;
+ return amdgpu_gem_va_map_flags(adev, mapping_flags);
}
/**
@@ -988,6 +939,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
+ struct hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
@@ -998,7 +950,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
goto out;
}
- ret = amdgpu_mn_register(bo, user_addr);
+ ret = amdgpu_hmm_register(bo, user_addr);
if (ret) {
pr_err("%s: Failed to register MMU notifier: %d\n",
__func__, ret);
@@ -1017,7 +969,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
goto unregister_out;
@@ -1035,10 +987,10 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
amdgpu_bo_unreserve(bo);
release_out:
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
unregister_out:
if (ret)
- amdgpu_mn_unregister(bo);
+ amdgpu_hmm_unregister(bo);
out:
mutex_unlock(&process_info->lock);
return ret;
@@ -1673,6 +1625,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
}
}
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
+ alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
+ alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
+
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
if (!*mem) {
ret = -ENOMEM;
@@ -1817,7 +1774,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mutex_unlock(&process_info->lock);
/* No more MMU notifiers */
- amdgpu_mn_unregister(mem->bo);
+ amdgpu_hmm_unregister(mem->bo);
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret))
@@ -2362,6 +2319,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
/* Go through userptr_inval_list and update any invalid user_pages */
list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list.head) {
+ struct hmm_range *range;
+
invalid = atomic_read(&mem->invalid);
if (!invalid)
/* BO hasn't been invalidated since the last
@@ -2372,7 +2331,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo;
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
+ &range);
if (ret) {
pr_debug("Failed %d to get user pages\n", ret);
@@ -2391,7 +2351,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* FIXME: Cannot ignore the return code, must hold
* notifier_lock
*/
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
}
/* Mark the BO as valid unless it was invalidated
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index b81b77a9efa6..9b97fa39d47a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -101,39 +101,97 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
}
}
+static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
+ struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
+{
+ uint32_t start_addr, fw_size, drv_size;
+
+ start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
+ fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
+ drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
+
+ DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
+ start_addr,
+ fw_size,
+ drv_size);
+
+ if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+ (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->mman.fw_vram_usage_start_offset = (start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->mman.fw_vram_usage_size = fw_size << 10;
+ /* Use the default scratch size */
+ *usage_bytes = 0;
+ } else {
+ *usage_bytes = drv_size << 10;
+ }
+ return 0;
+}
+
+static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
+ struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
+{
+ uint32_t fw_start_addr, fw_size, drv_start_addr, drv_size;
+
+ fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
+ fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
+
+ drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
+ drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
+
+ DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
+ fw_start_addr,
+ fw_size,
+ drv_start_addr,
+ drv_size);
+
+ if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->mman.fw_vram_usage_size = fw_size << 10;
+ }
+
+ if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
+ /* driver request VRAM reservation for SR-IOV */
+ adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->mman.drv_vram_usage_size = drv_size << 10;
+ }
+
+ *usage_bytes = 0;
+ return 0;
+}
+
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware);
- struct vram_usagebyfirmware_v2_1 *firmware_usage;
- uint32_t start_addr, size;
+ struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
+ struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
uint16_t data_offset;
+ uint8_t frev, crev;
int usage_bytes = 0;
- if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
- firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
- DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
- le32_to_cpu(firmware_usage->start_address_in_kb),
- le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
- le16_to_cpu(firmware_usage->used_by_driver_in_kb));
-
- start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
- size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
-
- if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
- (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
- ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
- /* Firmware request VRAM reservation for SR-IOV */
- adev->mman.fw_vram_usage_start_offset = (start_addr &
- (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
- adev->mman.fw_vram_usage_size = size << 10;
- /* Use the default scratch size */
- usage_bytes = 0;
- } else {
- usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
+ if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
+ if (frev == 2 && crev == 1) {
+ fw_usage_v2_1 =
+ (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_1(adev,
+ fw_usage_v2_1,
+ &usage_bytes);
+ } else if (frev >= 2 && crev >= 2) {
+ fw_usage_v2_2 =
+ (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_2(adev,
+ fw_usage_v2_2,
+ &usage_bytes);
}
}
+
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 2168163aad2d..252a876b0725 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -209,6 +209,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
list_add_tail(&e->tv.head, &bucket[priority]);
e->user_pages = NULL;
+ e->range = NULL;
}
/* Connect the sorted buckets in the output list. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 9caea1688fc3..e4d78491bcc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -26,6 +26,8 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
+struct hmm_range;
+
struct amdgpu_device;
struct amdgpu_bo;
struct amdgpu_bo_va;
@@ -36,6 +38,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va *bo_va;
uint32_t priority;
struct page **user_pages;
+ struct hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0528c2b1db6e..459150460815 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -287,8 +287,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
- if (!p->gang_size)
- return -EINVAL;
+ if (!p->gang_size) {
+ ret = -EINVAL;
+ goto free_partial_kdata;
+ }
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
@@ -917,7 +919,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
}
- r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
+ r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
if (r) {
kvfree(e->user_pages);
e->user_pages = NULL;
@@ -995,10 +997,12 @@ out_free_user_pages:
if (!e->user_pages)
continue;
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
kvfree(e->user_pages);
e->user_pages = NULL;
+ e->range = NULL;
}
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
}
@@ -1270,7 +1274,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
+ e->range = NULL;
}
if (r) {
r = -EAGAIN;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index de61a85c4b02..0f16d3c09309 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1969,7 +1969,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_ta_if_debugfs_init(adev);
#if defined(CONFIG_DRM_AMD_DC)
- if (amdgpu_device_has_dc_support(adev))
+ if (adev->dc_enabled)
dtn_debugfs_init(adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ad4e78728733..b2b1c66bfe39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1916,6 +1916,16 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
+void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
+ adev->mode_info.num_crtc = 1;
+ adev->enable_virtual_display = true;
+ DRM_INFO("virtual_display:%d, num_crtc:%d\n",
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
+ }
+}
+
/**
* amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
*
@@ -3348,8 +3358,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev) ||
- adev->enable_virtual_display ||
+ if (adev->enable_virtual_display ||
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
@@ -4216,25 +4225,27 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
amdgpu_ras_resume(adev);
- /*
- * Most of the connector probing functions try to acquire runtime pm
- * refs to ensure that the GPU is powered on when connector polling is
- * performed. Since we're calling this from a runtime PM callback,
- * trying to acquire rpm refs will cause us to deadlock.
- *
- * Since we're guaranteed to be holding the rpm lock, it's safe to
- * temporarily disable the rpm helpers so this doesn't deadlock us.
- */
+ if (adev->mode_info.num_crtc) {
+ /*
+ * Most of the connector probing functions try to acquire runtime pm
+ * refs to ensure that the GPU is powered on when connector polling is
+ * performed. Since we're calling this from a runtime PM callback,
+ * trying to acquire rpm refs will cause us to deadlock.
+ *
+ * Since we're guaranteed to be holding the rpm lock, it's safe to
+ * temporarily disable the rpm helpers so this doesn't deadlock us.
+ */
#ifdef CONFIG_PM
- dev->dev->power.disable_depth++;
+ dev->dev->power.disable_depth++;
#endif
- if (!amdgpu_device_has_dc_support(adev))
- drm_helper_hpd_irq_event(dev);
- else
- drm_kms_helper_hotplug_event(dev);
+ if (!adev->dc_enabled)
+ drm_helper_hpd_irq_event(dev);
+ else
+ drm_kms_helper_hotplug_event(dev);
#ifdef CONFIG_PM
- dev->dev->power.disable_depth--;
+ dev->dev->power.disable_depth--;
#endif
+ }
adev->in_suspend = false;
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
@@ -4583,6 +4594,10 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (amdgpu_gpu_recovery == 0)
goto disabled;
+ /* Skip soft reset check in fatal error mode */
+ if (!amdgpu_ras_is_poison_mode_supported(adev))
+ return true;
+
if (!amdgpu_device_ip_check_soft_reset(adev)) {
dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
return false;
@@ -5078,94 +5093,6 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
return 0;
}
-static void amdgpu_device_recheck_guilty_jobs(
- struct amdgpu_device *adev, struct list_head *device_list_handle,
- struct amdgpu_reset_context *reset_context)
-{
- int i, r = 0;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- int ret = 0;
- struct drm_sched_job *s_job;
-
- if (!ring || !ring->sched.thread)
- continue;
-
- s_job = list_first_entry_or_null(&ring->sched.pending_list,
- struct drm_sched_job, list);
- if (s_job == NULL)
- continue;
-
- /* clear job's guilty and depend the folowing step to decide the real one */
- drm_sched_reset_karma(s_job);
- drm_sched_resubmit_jobs_ext(&ring->sched, 1);
-
- if (!s_job->s_fence->parent) {
- DRM_WARN("Failed to get a HW fence for job!");
- continue;
- }
-
- ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
- if (ret == 0) { /* timeout */
- DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
- ring->sched.name, s_job->id);
-
-
- amdgpu_fence_driver_isr_toggle(adev, true);
-
- /* Clear this failed job from fence array */
- amdgpu_fence_driver_clear_job_fences(ring);
-
- amdgpu_fence_driver_isr_toggle(adev, false);
-
- /* Since the job won't signal and we go for
- * another resubmit drop this parent pointer
- */
- dma_fence_put(s_job->s_fence->parent);
- s_job->s_fence->parent = NULL;
-
- /* set guilty */
- drm_sched_increase_karma(s_job);
- amdgpu_reset_prepare_hwcontext(adev, reset_context);
-retry:
- /* do hw reset */
- if (amdgpu_sriov_vf(adev)) {
- amdgpu_virt_fini_data_exchange(adev);
- r = amdgpu_device_reset_sriov(adev, false);
- if (r)
- adev->asic_reset_res = r;
- } else {
- clear_bit(AMDGPU_SKIP_HW_RESET,
- &reset_context->flags);
- r = amdgpu_do_asic_reset(device_list_handle,
- reset_context);
- if (r && r == -EAGAIN)
- goto retry;
- }
-
- /*
- * add reset counter so that the following
- * resubmitted job could flush vmid
- */
- atomic_inc(&adev->gpu_reset_counter);
- continue;
- }
-
- /* got the hw fence, signal finished fence */
- atomic_dec(ring->sched.score);
- dma_fence_get(&s_job->s_fence->finished);
- dma_fence_signal(&s_job->s_fence->finished);
- dma_fence_put(&s_job->s_fence->finished);
-
- /* remove node from list and free the job */
- spin_lock(&ring->sched.job_list_lock);
- list_del_init(&s_job->list);
- spin_unlock(&ring->sched.job_list_lock);
- ring->sched.ops->free_job(s_job);
- }
-}
-
static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -5186,7 +5113,6 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
}
-
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -5209,7 +5135,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
int i, r = 0;
bool need_emergency_restart = false;
bool audio_suspended = false;
- int tmp_vram_lost_counter;
bool gpu_reset_for_dev_remove = false;
gpu_reset_for_dev_remove =
@@ -5355,7 +5280,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_stop_pending_resets(tmp_adev);
}
- tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
@@ -5380,29 +5304,13 @@ skip_hw_reset:
/* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- /*
- * Sometimes a later bad compute job can block a good gfx job as gfx
- * and compute ring share internal GC HW mutually. We add an additional
- * guilty jobs recheck step to find the real guilty job, it synchronously
- * submits and pends for the first job being signaled. If it gets timeout,
- * we identify it as a real guilty job.
- */
- if (amdgpu_gpu_recovery == 2 &&
- !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
- amdgpu_device_recheck_guilty_jobs(
- tmp_adev, device_list_handle, reset_context);
-
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
if (!ring || !ring->sched.thread)
continue;
- /* No point to resubmit jobs if we didn't HW reset*/
- if (!tmp_adev->asic_reset_res && !job_signaled)
- drm_sched_resubmit_jobs(&ring->sched);
-
- drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
+ drm_sched_start(&ring->sched, true);
}
if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
@@ -5444,6 +5352,8 @@ skip_sched_resume:
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unset_mp1_state(tmp_adev);
+
+ amdgpu_ras_set_error_query_ready(tmp_adev, true);
}
recover_end:
@@ -5855,8 +5765,6 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
if (!ring || !ring->sched.thread)
continue;
-
- drm_sched_resubmit_jobs(&ring->sched);
drm_sched_start(&ring->sched, true);
}
@@ -6047,3 +5955,44 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
dma_fence_put(old);
return NULL;
}
+
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ case CHIP_TOPAZ:
+ /* chips with no display hardware */
+ return false;
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+#endif
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ /* chips with display hardware */
+ return true;
+ default:
+ /* IP discovery */
+ if (!adev->ip_versions[DCE_HWIP][0] ||
+ (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
+ return false;
+ return true;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 3993e6134914..6b48178455bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -305,8 +305,13 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
- dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
+ if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) {
+ /* ignore the discovery binary from vram if discovery=2 in kernel module parameter */
+ if (amdgpu_discovery == 2)
+ dev_info(adev->dev,"force read ip discovery binary from file");
+ else
+ dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
+
/* retry read ip discovery binary from file */
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
if (r) {
@@ -1697,9 +1702,15 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
return 0;
}
+static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
+{
+ amdgpu_device_set_sriov_virtual_display(adev);
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+}
+
static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
{
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
+ if (adev->enable_virtual_display) {
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
return 0;
}
@@ -1727,7 +1738,10 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 6):
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
dev_err(adev->dev,
@@ -1740,7 +1754,10 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
case IP_VERSION(12, 1, 0):
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
dev_err(adev->dev,
@@ -2161,6 +2178,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(10, 3, 1):
adev->family = AMDGPU_FAMILY_VGH;
+ adev->apu_flags |= AMD_APU_IS_VANGOGH;
break;
case IP_VERSION(10, 3, 3):
adev->family = AMDGPU_FAMILY_YC;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 311a8ea6f065..b22471b3bd63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -44,6 +44,41 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
+/**
+ * amdgpu_display_hotplug_work_func - work handler for display hotplug event
+ *
+ * @work: work struct pointer
+ *
+ * This is the hotplug event work handler (all ASICs).
+ * The work gets scheduled from the IRQ handler if there
+ * was a hotplug interrupt. It walks through the connector table
+ * and calls hotplug handler for each connector. After this, it sends
+ * a DRM hotplug event to alert userspace.
+ *
+ * This design approach is required in order to defer hotplug event handling
+ * from the IRQ handler to a work handler because hotplug handler has to use
+ * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
+ * sleep).
+ */
+void amdgpu_display_hotplug_work_func(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ hotplug_work);
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+
+ mutex_lock(&mode_config->mutex);
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ amdgpu_connector_hotplug(connector);
+ drm_connector_list_iter_end(&iter);
+ mutex_unlock(&mode_config->mutex);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+}
+
static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
@@ -514,7 +549,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
*/
if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
- amdgpu_device_asic_has_dc_support(adev->asic_type) &&
+ adev->dc_enabled &&
adev->mode_info.gpu_vm_support)
domain |= AMDGPU_GEM_DOMAIN_GTT;
#endif
@@ -1280,7 +1315,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
- if (amdgpu_device_has_dc_support(adev)) {
+ if (adev->dc_enabled) {
adev->mode_info.abm_level_property =
drm_property_create_range(adev_to_drm(adev), 0,
"abm level", 0, 4);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 560352f7c317..9d19940f73c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -35,6 +35,7 @@
#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
+void amdgpu_display_hotplug_work_func(struct work_struct *work);
void amdgpu_display_update_priority(struct amdgpu_device *adev);
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 7bd8e33b14be..271e30e34d93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -328,7 +328,9 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
- flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_COHERENT |
+ AMDGPU_GEM_CREATE_UNCACHED);
}
ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 6b35bb948f96..5697d456d7d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -534,7 +534,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* DOC: gpu_recovery (int)
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/
-MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (2 = advanced tdr mode, 1 = enable, 0 = disable, -1 = auto)");
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/**
@@ -1925,9 +1925,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
- /* Van Gogh */
- {0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU},
-
/* Yellow Carp */
{0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
{0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
@@ -2472,7 +2469,7 @@ static int amdgpu_runtime_idle_check_display(struct device *dev)
if (ret)
return ret;
- if (amdgpu_device_has_dc_support(adev)) {
+ if (adev->dc_enabled) {
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, drm_dev) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
index 4d9eb0137f8c..7d2a908438e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
@@ -79,13 +79,15 @@
* That is, for an I2C EEPROM driver everything is controlled by
* the "eeprom_addr".
*
+ * See also top of amdgpu_ras_eeprom.c.
+ *
* P.S. If you need to write, lock and read the Identification Page,
* (M24M02-DR device only, which we do not use), change the "7" to
* "0xF" in the macro below, and let the client set bit 20 to 1 in
* "eeprom_addr", and set A10 to 0 to write into it, and A10 and A1 to
* 1 to lock it permanently.
*/
-#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 7))
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
u8 *eeprom_buf, u16 buf_size, bool read)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index e325150879df..2c38ac7bc643 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -29,9 +29,10 @@
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_eeprom.h"
-#define FRU_EEPROM_MADDR 0x60000
+#define FRU_EEPROM_MADDR_6 0x60000
+#define FRU_EEPROM_MADDR_8 0x80000
-static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
+static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
{
/* Only server cards have the FRU EEPROM
* TODO: See if we can figure this out dynamically instead of
@@ -45,6 +46,11 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return false;
+ /* The default I2C EEPROM address of the FRU.
+ */
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_8;
+
/* VBIOS is of the format ###-DXXXYYYY-##. For SKU identification,
* we can use just the "DXXX" portion. If there were more models, we
* could convert the 3 characters to a hex integer and use a switch
@@ -57,21 +63,29 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
if (strnstr(atom_ctx->vbios_version, "D161",
sizeof(atom_ctx->vbios_version)) ||
strnstr(atom_ctx->vbios_version, "D163",
- sizeof(atom_ctx->vbios_version)))
+ sizeof(atom_ctx->vbios_version))) {
+ *fru_addr = FRU_EEPROM_MADDR_6;
return true;
- else
+ } else {
return false;
+ }
case CHIP_ALDEBARAN:
- /* All Aldebaran SKUs have the FRU */
+ /* All Aldebaran SKUs have an FRU */
+ if (!strnstr(atom_ctx->vbios_version, "D673",
+ sizeof(atom_ctx->vbios_version)))
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_6;
return true;
case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603",
- sizeof(atom_ctx->vbios_version))) {
+ sizeof(atom_ctx->vbios_version))) {
if (strnstr(atom_ctx->vbios_version, "D603GLXE",
- sizeof(atom_ctx->vbios_version)))
+ sizeof(atom_ctx->vbios_version))) {
return false;
- else
+ } else {
+ *fru_addr = FRU_EEPROM_MADDR_6;
return true;
+ }
} else {
return false;
}
@@ -80,41 +94,14 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
}
}
-static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
- unsigned char *buf, size_t buf_size)
-{
- int ret;
- u8 size;
-
- ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr, buf, 1);
- if (ret < 1) {
- DRM_WARN("FRU: Failed to get size field");
- return ret;
- }
-
- /* The size returned by the i2c requires subtraction of 0xC0 since the
- * size apparently always reports as 0xC0+actual size.
- */
- size = buf[0] & 0x3F;
- size = min_t(size_t, size, buf_size);
-
- ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr + 1,
- buf, size);
- if (ret < 1) {
- DRM_WARN("FRU: Failed to get data field");
- return ret;
- }
-
- return size;
-}
-
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
- unsigned char buf[AMDGPU_PRODUCT_NAME_LEN];
- u32 addrptr;
+ unsigned char buf[8], *pia;
+ u32 addr, fru_addr;
int size, len;
+ u8 csum;
- if (!is_fru_eeprom_supported(adev))
+ if (!is_fru_eeprom_supported(adev, &fru_addr))
return 0;
/* If algo exists, it means that the i2c_adapter's initialized */
@@ -123,88 +110,102 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
return -ENODEV;
}
- /* There's a lot of repetition here. This is due to the FRU having
- * variable-length fields. To get the information, we have to find the
- * size of each field, and then keep reading along and reading along
- * until we get all of the data that we want. We use addrptr to track
- * the address as we go
- */
-
- /* The first fields are all of size 1-byte, from 0-7 are offsets that
- * contain information that isn't useful to us.
- * Bytes 8-a are all 1-byte and refer to the size of the entire struct,
- * and the language field, so just start from 0xb, manufacturer size
- */
- addrptr = FRU_EEPROM_MADDR + 0xb;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
- if (size < 1) {
- DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
- return -EINVAL;
+ /* Read the IPMI Common header */
+ len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf,
+ sizeof(buf));
+ if (len != 8) {
+ DRM_ERROR("Couldn't read the IPMI Common Header: %d", len);
+ return len < 0 ? len : -EIO;
}
- /* Increment the addrptr by the size of the field, and 1 due to the
- * size field being 1 byte. This pattern continues below.
- */
- addrptr += size + 1;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
- if (size < 1) {
- DRM_ERROR("Failed to read FRU product name, ret:%d", size);
- return -EINVAL;
+ if (buf[0] != 1) {
+ DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]);
+ return -EIO;
}
- len = size;
- if (len >= AMDGPU_PRODUCT_NAME_LEN) {
- DRM_WARN("FRU Product Name is larger than %d characters. This is likely a mistake",
- AMDGPU_PRODUCT_NAME_LEN);
- len = AMDGPU_PRODUCT_NAME_LEN - 1;
- }
- memcpy(adev->product_name, buf, len);
- adev->product_name[len] = '\0';
-
- addrptr += size + 1;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
- if (size < 1) {
- DRM_ERROR("Failed to read FRU product number, ret:%d", size);
- return -EINVAL;
+ for (csum = 0; len > 0; len--)
+ csum += buf[len - 1];
+ if (csum) {
+ DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum);
+ return -EIO;
}
- len = size;
- /* Product number should only be 16 characters. Any more,
- * and something could be wrong. Cap it at 16 to be safe
- */
- if (len >= sizeof(adev->product_number)) {
- DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
- len = sizeof(adev->product_number) - 1;
- }
- memcpy(adev->product_number, buf, len);
- adev->product_number[len] = '\0';
+ /* Get the offset to the Product Info Area (PIA). */
+ addr = buf[4] * 8;
+ if (!addr)
+ return 0;
- addrptr += size + 1;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
+ /* Get the absolute address to the PIA. */
+ addr += fru_addr;
- if (size < 1) {
- DRM_ERROR("Failed to read FRU product version, ret:%d", size);
- return -EINVAL;
+ /* Read the header of the PIA. */
+ len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3);
+ if (len != 3) {
+ DRM_ERROR("Couldn't read the Product Info Area header: %d", len);
+ return len < 0 ? len : -EIO;
}
- addrptr += size + 1;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
+ if (buf[0] != 1) {
+ DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]);
+ return -EIO;
+ }
- if (size < 1) {
- DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
- return -EINVAL;
+ size = buf[1] * 8;
+ pia = kzalloc(size, GFP_KERNEL);
+ if (!pia)
+ return -ENOMEM;
+
+ /* Read the whole PIA. */
+ len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size);
+ if (len != size) {
+ kfree(pia);
+ DRM_ERROR("Couldn't read the Product Info Area: %d", len);
+ return len < 0 ? len : -EIO;
}
- len = size;
- /* Serial number should only be 16 characters. Any more,
- * and something could be wrong. Cap it at 16 to be safe
- */
- if (len >= sizeof(adev->serial)) {
- DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
- len = sizeof(adev->serial) - 1;
+ for (csum = 0; size > 0; size--)
+ csum += pia[size - 1];
+ if (csum) {
+ DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
+ return -EIO;
}
- memcpy(adev->serial, buf, len);
- adev->serial[len] = '\0';
+ /* Now extract useful information from the PIA.
+ *
+ * Skip the Manufacturer Name at [3] and go directly to
+ * the Product Name field.
+ */
+ addr = 3 + 1 + (pia[3] & 0x3F);
+ if (addr + 1 >= len)
+ goto Out;
+ memcpy(adev->product_name, pia + addr + 1,
+ min_t(size_t,
+ sizeof(adev->product_name),
+ pia[addr] & 0x3F));
+ adev->product_name[sizeof(adev->product_name) - 1] = '\0';
+
+ /* Go to the Product Part/Model Number field. */
+ addr += 1 + (pia[addr] & 0x3F);
+ if (addr + 1 >= len)
+ goto Out;
+ memcpy(adev->product_number, pia + addr + 1,
+ min_t(size_t,
+ sizeof(adev->product_number),
+ pia[addr] & 0x3F));
+ adev->product_number[sizeof(adev->product_number) - 1] = '\0';
+
+ /* Go to the Product Version field. */
+ addr += 1 + (pia[addr] & 0x3F);
+
+ /* Go to the Product Serial Number field. */
+ addr += 1 + (pia[addr] & 0x3F);
+ if (addr + 1 >= len)
+ goto Out;
+ memcpy(adev->serial, pia + addr + 1, min_t(size_t,
+ sizeof(adev->serial),
+ pia[addr] & 0x3F));
+ adev->serial[sizeof(adev->serial) - 1] = '\0';
+Out:
+ kfree(pia);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8ef31d687ef3..a0780a4e3e61 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -38,6 +38,7 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_dma_buf.h"
+#include "amdgpu_hmm.h"
#include "amdgpu_xgmi.h"
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
@@ -87,7 +88,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
if (robj) {
- amdgpu_mn_unregister(robj);
+ amdgpu_hmm_unregister(robj);
amdgpu_bo_unref(&robj);
}
}
@@ -378,6 +379,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj;
+ struct hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
@@ -413,14 +415,13 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto release_object;
- if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
- r = amdgpu_mn_register(bo, args->addr);
- if (r)
- goto release_object;
- }
+ r = amdgpu_hmm_register(bo, args->addr);
+ if (r)
+ goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
+ r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
+ &range);
if (r)
goto release_object;
@@ -443,7 +444,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
release_object:
drm_gem_object_put(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 9546adc8a76f..23692e5d4d13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -583,10 +583,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
/* If going to s2idle, no need to wait */
- if (adev->in_s0ix)
- delay = GFX_OFF_NO_DELAY;
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ if (adev->in_s0ix) {
+ if (!amdgpu_dpm_set_powergating_by_smu(adev,
+ AMD_IP_BLOCK_TYPE_GFX, true))
+ adev->gfx.gfx_off_state = true;
+ } else {
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
+ }
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 9c0d9baab4e2..4365ede42855 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -657,7 +657,7 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev) ||
- !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
+ !amdgpu_device_has_display_hardware(adev)) {
size = 0;
} else {
size = amdgpu_gmc_get_vbios_fb_size(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index b86c0b8252a5..a48ea62b12b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -49,9 +49,10 @@
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_hmm.h"
/**
- * amdgpu_mn_invalidate_gfx - callback to notify about mm change
+ * amdgpu_hmm_invalidate_gfx - callback to notify about mm change
*
* @mni: the range (mm) is about to update
* @range: details on the invalidation
@@ -60,9 +61,9 @@
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
-static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
+static bool amdgpu_hmm_invalidate_gfx(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -83,12 +84,12 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
return true;
}
-static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = {
- .invalidate = amdgpu_mn_invalidate_gfx,
+static const struct mmu_interval_notifier_ops amdgpu_hmm_gfx_ops = {
+ .invalidate = amdgpu_hmm_invalidate_gfx,
};
/**
- * amdgpu_mn_invalidate_hsa - callback to notify about mm change
+ * amdgpu_hmm_invalidate_hsa - callback to notify about mm change
*
* @mni: the range (mm) is about to update
* @range: details on the invalidation
@@ -97,9 +98,9 @@ static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = {
* We temporarily evict the BO attached to this range. This necessitates
* evicting all user-mode queues of the process.
*/
-static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
+static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -117,12 +118,12 @@ static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
return true;
}
-static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = {
- .invalidate = amdgpu_mn_invalidate_hsa,
+static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
+ .invalidate = amdgpu_hmm_invalidate_hsa,
};
/**
- * amdgpu_mn_register - register a BO for notifier updates
+ * amdgpu_hmm_register - register a BO for notifier updates
*
* @bo: amdgpu buffer object
* @addr: userptr addr we should monitor
@@ -130,25 +131,25 @@ static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = {
* Registers a mmu_notifier for the given BO at the specified address.
* Returns 0 on success, -ERRNO if anything goes wrong.
*/
-int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{
if (bo->kfd_bo)
return mmu_interval_notifier_insert(&bo->notifier, current->mm,
addr, amdgpu_bo_size(bo),
- &amdgpu_mn_hsa_ops);
+ &amdgpu_hmm_hsa_ops);
return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
amdgpu_bo_size(bo),
- &amdgpu_mn_gfx_ops);
+ &amdgpu_hmm_gfx_ops);
}
/**
- * amdgpu_mn_unregister - unregister a BO for notifier updates
+ * amdgpu_hmm_unregister - unregister a BO for notifier updates
*
* @bo: amdgpu buffer object
*
* Remove any registration of mmu notifier updates from the buffer object.
*/
-void amdgpu_mn_unregister(struct amdgpu_bo *bo)
+void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
{
if (!bo->notifier.mm)
return;
@@ -157,10 +158,9 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
}
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
- struct mm_struct *mm, struct page **pages,
- uint64_t start, uint64_t npages,
- struct hmm_range **phmm_range, bool readonly,
- bool mmap_locked, void *owner)
+ uint64_t start, uint64_t npages, bool readonly,
+ void *owner, struct page **pages,
+ struct hmm_range **phmm_range)
{
struct hmm_range *hmm_range;
unsigned long timeout;
@@ -193,14 +193,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
retry:
hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
-
- if (likely(!mmap_locked))
- mmap_read_lock(mm);
-
r = hmm_range_fault(hmm_range);
-
- if (likely(!mmap_locked))
- mmap_read_unlock(mm);
if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index 14a3c1864085..13ed94d3b01b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -31,23 +31,22 @@
#include <linux/interval_tree.h>
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
- struct mm_struct *mm, struct page **pages,
- uint64_t start, uint64_t npages,
- struct hmm_range **phmm_range, bool readonly,
- bool mmap_locked, void *owner);
+ uint64_t start, uint64_t npages, bool readonly,
+ void *owner, struct page **pages,
+ struct hmm_range **phmm_range);
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR)
-int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
-void amdgpu_mn_unregister(struct amdgpu_bo *bo);
+int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
+void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
#else
-static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{
DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, "
"add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
return -ENODEV;
}
-static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
+static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
#endif
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 89011bae7588..a6aef488a822 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -101,41 +101,6 @@ const char *soc15_ih_clientid_name[] = {
};
/**
- * amdgpu_hotplug_work_func - work handler for display hotplug event
- *
- * @work: work struct pointer
- *
- * This is the hotplug event work handler (all ASICs).
- * The work gets scheduled from the IRQ handler if there
- * was a hotplug interrupt. It walks through the connector table
- * and calls hotplug handler for each connector. After this, it sends
- * a DRM hotplug event to alert userspace.
- *
- * This design approach is required in order to defer hotplug event handling
- * from the IRQ handler to a work handler because hotplug handler has to use
- * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
- * sleep).
- */
-static void amdgpu_hotplug_work_func(struct work_struct *work)
-{
- struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
- hotplug_work);
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
-
- mutex_lock(&mode_config->mutex);
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter)
- amdgpu_connector_hotplug(connector);
- drm_connector_list_iter_end(&iter);
- mutex_unlock(&mode_config->mutex);
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(dev);
-}
-
-/**
* amdgpu_irq_disable_all - disable *all* interrupts
*
* @adev: amdgpu device pointer
@@ -317,21 +282,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
}
- if (!amdgpu_device_has_dc_support(adev)) {
- if (!adev->enable_virtual_display)
- /* Disable vblank IRQs aggressively for power-saving */
- /* XXX: can this be enabled for DC? */
- adev_to_drm(adev)->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
- if (r)
- return r;
-
- /* Pre-DCE11 */
- INIT_WORK(&adev->hotplug_work,
- amdgpu_hotplug_work_func);
- }
-
INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
@@ -345,11 +295,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
/* PCI devices require shared interrupts. */
r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
adev_to_drm(adev));
- if (r) {
- if (!amdgpu_device_has_dc_support(adev))
- flush_work(&adev->hotplug_work);
+ if (r)
return r;
- }
adev->irq.installed = true;
adev->irq.irq = irq;
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
@@ -366,9 +313,6 @@ void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
adev->irq.installed = false;
if (adev->irq.msi_enabled)
pci_free_irq_vectors(adev->pdev);
-
- if (!amdgpu_device_has_dc_support(adev))
- flush_work(&adev->hotplug_work);
}
amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index de182bfcf85f..6f81ed4fb0d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -235,3 +235,20 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
+
+void jpeg_set_ras_funcs(struct amdgpu_device *adev)
+{
+ if (!adev->jpeg.ras)
+ return;
+
+ amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
+
+ strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
+ adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
+ adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
+
+ /* If don't define special ras_late_init function, use default ras_late_init */
+ if (!adev->jpeg.ras->ras_block.ras_late_init)
+ adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 635dca59a70a..e8ca3e32ad52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -72,5 +72,6 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+void jpeg_set_ras_funcs(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 2fcb5bfbef89..98dbf4e5aae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -52,6 +52,32 @@ static int psp_load_smu_fw(struct psp_context *psp);
static int psp_rap_terminate(struct psp_context *psp);
static int psp_securedisplay_terminate(struct psp_context *psp);
+static int psp_ring_init(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ring = &psp->km_ring;
+
+ ring->ring_type = ring_type;
+
+ /* allocate 4k Page of Local Frame Buffer memory for ring */
+ ring->ring_size = 0x1000;
+ ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+ if (ret) {
+ ring->ring_size = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Due to DF Cstate management centralized to PMFW, the firmware
* loading sequence will be updated as below:
@@ -1526,11 +1552,6 @@ int psp_ras_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(adev))
return 0;
- if (psp->ras_context.context.initialized) {
- dev_warn(adev->dev, "RAS WARN: TA has already been loaded\n");
- return 0;
- }
-
if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
!adev->psp.ras_context.context.bin_desc.start_addr) {
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
@@ -1602,6 +1623,9 @@ int psp_ras_initialize(struct psp_context *psp)
else {
if (ras_cmd->ras_status)
dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
+
+ /* fail to load RAS TA */
+ psp->ras_context.context.initialized = false;
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index cbd4194a2883..cf4f60c66122 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -118,7 +118,6 @@ struct psp_funcs
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
- int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*ring_stop)(struct psp_context *psp,
@@ -396,7 +395,6 @@ struct amdgpu_psp_funcs {
};
-#define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 693bce07eb46..077404a9c935 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1948,7 +1948,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
- clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
+ /* Perform full reset in fatal error mode */
+ if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ else
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
}
@@ -2343,7 +2348,8 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
- if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
+ if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
+ adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
1 << AMDGPU_RAS_BLOCK__JPEG);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 7268ae65c140..2d9f3f4cd79e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -33,12 +33,29 @@
#include "amdgpu_reset.h"
-#define EEPROM_I2C_MADDR_VEGA20 0x0
-#define EEPROM_I2C_MADDR_ARCTURUS 0x40000
-#define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0
-#define EEPROM_I2C_MADDR_SIENNA_CICHLID 0x0
-#define EEPROM_I2C_MADDR_ALDEBARAN 0x0
-#define EEPROM_I2C_MADDR_54H (0x54UL << 16)
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3. See top of amdgpu_eeprom.c.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -117,9 +134,9 @@ static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
if (strnstr(atom_ctx->vbios_version,
"D342",
sizeof(atom_ctx->vbios_version)))
- control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS_D342;
+ control->i2c_address = EEPROM_I2C_MADDR_0;
else
- control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS;
+ control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
}
@@ -130,7 +147,7 @@ static bool __get_eeprom_i2c_addr_ip_discovery(struct amdgpu_device *adev,
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 10):
- control->i2c_address = EEPROM_I2C_MADDR_54H;
+ control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
default:
return false;
@@ -140,6 +157,7 @@ static bool __get_eeprom_i2c_addr_ip_discovery(struct amdgpu_device *adev,
static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
struct amdgpu_ras_eeprom_control *control)
{
+ struct atom_context *atom_ctx = adev->mode_info.atom_context;
u8 i2c_addr;
if (!control)
@@ -162,18 +180,22 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
switch (adev->asic_type) {
case CHIP_VEGA20:
- control->i2c_address = EEPROM_I2C_MADDR_VEGA20;
+ control->i2c_address = EEPROM_I2C_MADDR_0;
break;
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, control);
case CHIP_SIENNA_CICHLID:
- control->i2c_address = EEPROM_I2C_MADDR_SIENNA_CICHLID;
+ control->i2c_address = EEPROM_I2C_MADDR_0;
break;
case CHIP_ALDEBARAN:
- control->i2c_address = EEPROM_I2C_MADDR_ALDEBARAN;
+ if (strnstr(atom_ctx->vbios_version, "D673",
+ sizeof(atom_ctx->vbios_version)))
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ else
+ control->i2c_address = EEPROM_I2C_MADDR_0;
break;
case CHIP_IP_DISCOVERY:
@@ -185,7 +207,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 0):
- control->i2c_address = EEPROM_I2C_MADDR_54H;
+ control->i2c_address = EEPROM_I2C_MADDR_4;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0ba87ca6f318..7b5074e776f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -58,6 +58,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
+#include "amdgpu_hmm.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_res_cursor.h"
#include "bif/bif_4_1_d.h"
@@ -634,9 +635,6 @@ struct amdgpu_ttm_tt {
struct task_struct *usertask;
uint32_t userflags;
bool bound;
-#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
- struct hmm_range *range;
-#endif
};
#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
@@ -649,7 +647,8 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking
*/
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+ struct hmm_range **range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -659,16 +658,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
bool readonly;
int r = 0;
+ /* Make sure get_user_pages_done() can cleanup gracefully */
+ *range = NULL;
+
mm = bo->notifier.mm;
if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n");
return -EFAULT;
}
- /* Another get_user_pages is running at the same time?? */
- if (WARN_ON(gtt->range))
- return -EFAULT;
-
if (!mmget_not_zero(mm)) /* Happens during process shutdown */
return -ESRCH;
@@ -685,9 +683,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
}
readonly = amdgpu_ttm_tt_is_readonly(ttm);
- r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
- ttm->num_pages, &gtt->range, readonly,
- true, NULL);
+ r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
+ readonly, NULL, pages, range);
out_unlock:
mmap_read_unlock(mm);
if (r)
@@ -704,30 +701,24 @@ out_unlock:
*
* Returns: true if pages are still valid
*/
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+ struct hmm_range *range)
{
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
- bool r = false;
- if (!gtt || !gtt->userptr)
+ if (!gtt || !gtt->userptr || !range)
return false;
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
gtt->userptr, ttm->num_pages);
- WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
- "No user pages to check\n");
-
- if (gtt->range) {
- /*
- * FIXME: Must always hold notifier_lock for this, and must
- * not ignore the return code.
- */
- r = amdgpu_hmm_range_get_pages_done(gtt->range);
- gtt->range = NULL;
- }
+ WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
- return !r;
+ /*
+ * FIXME: Must always hold notifier_lock for this, and must
+ * not ignore the return code.
+ */
+ return !amdgpu_hmm_range_get_pages_done(range);
}
#endif
@@ -804,20 +795,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
/* unmap the pages mapped to the device */
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
-
-#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
- if (gtt->range) {
- unsigned long i;
-
- for (i = 0; i < ttm->num_pages; i++) {
- if (ttm->pages[i] !=
- hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
- break;
- }
-
- WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
- }
-#endif
}
static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
@@ -1168,8 +1145,9 @@ int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
* @addr: The address in the current tasks VM space to use
* @flags: Requirements of userptr object.
*
- * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
- * to current task
+ * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
+ * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
+ * initialize GPU VM for a KFD process.
*/
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
uint64_t addr, uint32_t flags)
@@ -1553,6 +1531,23 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
NULL, &adev->mman.fw_vram_usage_va);
}
+/*
+ * Driver Reservation functions
+ */
+/**
+ * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free drv reserved vram if it has been reserved.
+ */
+static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
+ NULL,
+ NULL);
+}
+
/**
* amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
*
@@ -1579,6 +1574,31 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
&adev->mman.fw_vram_usage_va);
}
+/**
+ * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from drv.
+ */
+static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
+{
+ uint64_t vram_size = adev->gmc.visible_vram_size;
+
+ adev->mman.drv_vram_usage_reserved_bo = NULL;
+
+ if (adev->mman.drv_vram_usage_size == 0 ||
+ adev->mman.drv_vram_usage_size > vram_size)
+ return 0;
+
+ return amdgpu_bo_create_kernel_at(adev,
+ adev->mman.drv_vram_usage_start_offset,
+ adev->mman.drv_vram_usage_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mman.drv_vram_usage_reserved_bo,
+ NULL);
+}
+
/*
* Memoy training reservation functions
*/
@@ -1747,6 +1767,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
/*
+ *The reserved vram for driver must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_ttm_drv_reserve_vram_init(adev);
+ if (r)
+ return r;
+
+ /*
* only NAVI10 and onwards ASIC support for IP discovery.
* If IP discovery enabled, a block of memory should be
* reserved for IP discovey.
@@ -1871,6 +1899,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
amdgpu_ttm_fw_reserve_vram_fini(adev);
+ amdgpu_ttm_drv_reserve_vram_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 6a70818039dd..b391c8d076ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -39,6 +39,8 @@
#define AMDGPU_POISON 0xd0bed0be
+struct hmm_range;
+
struct amdgpu_gtt_mgr {
struct ttm_resource_manager manager;
struct drm_mm mm;
@@ -84,6 +86,11 @@ struct amdgpu_mman {
struct amdgpu_bo *fw_vram_usage_reserved_bo;
void *fw_vram_usage_va;
+ /* driver VRAM reservation */
+ u64 drv_vram_usage_start_offset;
+ u64 drv_vram_usage_size;
+ struct amdgpu_bo *drv_vram_usage_reserved_bo;
+
/* PAGE_SIZE'd BO for process memory r/w over SDMA. */
struct amdgpu_bo *sdma_access_bo;
void *sdma_access_ptr;
@@ -149,15 +156,19 @@ void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+ struct hmm_range **range);
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+ struct hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct page **pages)
+ struct page **pages,
+ struct hmm_range **range)
{
return -EPERM;
}
-static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
+static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+ struct hmm_range *range)
{
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 3449145ab2bc..33f3415096f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -1252,3 +1252,20 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
+
+void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev)
+{
+ if (!adev->vcn.ras)
+ return;
+
+ amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
+
+ strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
+ adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
+ adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
+
+ /* If don't define special ras_late_init function, use default ras_late_init */
+ if (!adev->vcn.ras->ras_block.ras_late_init)
+ adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 253ea6b159df..dbb8d68a30c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -399,5 +399,6 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 9b81b6867db3..7cf99f752e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -511,6 +511,10 @@ static int amdgpu_vkms_sw_init(void *handle)
return r;
}
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 2291aa14d888..003aa9e47085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -143,32 +143,6 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return 0;
}
-/*
- * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
- * happens while holding this lock anywhere to prevent deadlocks when
- * an MMU notifier runs in reclaim-FS context.
- */
-static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
-{
- mutex_lock(&vm->eviction_lock);
- vm->saved_flags = memalloc_noreclaim_save();
-}
-
-static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
-{
- if (mutex_trylock(&vm->eviction_lock)) {
- vm->saved_flags = memalloc_noreclaim_save();
- return 1;
- }
- return 0;
-}
-
-static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
-{
- memalloc_noreclaim_restore(vm->saved_flags);
- mutex_unlock(&vm->eviction_lock);
-}
-
/**
* amdgpu_vm_bo_evicted - vm_bo is evicted
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 83acb7bd80fe..6546e786bf00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -492,7 +492,48 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
*/
static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
{
+ unsigned long flags;
+ spinlock_t *lock;
+
+ /*
+ * Workaround to stop racing between the fence signaling and handling
+ * the cb. The lock is static after initially setting it up, just make
+ * sure that the dma_fence structure isn't freed up.
+ */
+ rcu_read_lock();
+ lock = vm->last_tlb_flush->lock;
+ rcu_read_unlock();
+
+ spin_lock_irqsave(lock, flags);
+ spin_unlock_irqrestore(lock, flags);
+
return atomic64_read(&vm->tlb_seq);
}
+/*
+ * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
+{
+ mutex_lock(&vm->eviction_lock);
+ vm->saved_flags = memalloc_noreclaim_save();
+}
+
+static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
+{
+ if (mutex_trylock(&vm->eviction_lock)) {
+ vm->saved_flags = memalloc_noreclaim_save();
+ return true;
+ }
+ return false;
+}
+
+static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
+{
+ memalloc_noreclaim_restore(vm->saved_flags);
+ mutex_unlock(&vm->eviction_lock);
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 358b91243e37..b5f3bba851db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -597,7 +597,9 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
if (entry->bo)
return 0;
+ amdgpu_vm_eviction_unlock(vm);
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
+ amdgpu_vm_eviction_lock(vm);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 90f87b2d985b..248f1a4e915f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2829,6 +2829,17 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
+ /* Disable vblank IRQs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev_to_drm(adev)->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_display_hotplug_work_func);
+
drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
@@ -2891,6 +2902,8 @@ static int dce_v10_0_hw_fini(void *handle)
dce_v10_0_pageflip_interrupt_fini(adev);
+ flush_work(&adev->hotplug_work);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 0352de72c886..cd9c19060d89 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2948,6 +2948,17 @@ static int dce_v11_0_sw_init(void *handle)
if (r)
return r;
+ /* Disable vblank IRQs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev_to_drm(adev)->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_display_hotplug_work_func);
+
drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
@@ -3021,6 +3032,8 @@ static int dce_v11_0_hw_fini(void *handle)
dce_v11_0_pageflip_interrupt_fini(adev);
+ flush_work(&adev->hotplug_work);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 07bd16e82046..76323deecc58 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2706,6 +2706,18 @@ static int dce_v6_0_sw_init(void *handle)
if (r)
return r;
+ /* Disable vblank IRQs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev_to_drm(adev)->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ /* Pre-DCE11 */
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_display_hotplug_work_func);
+
drm_kms_helper_poll_init(adev_to_drm(adev));
return r;
@@ -2764,6 +2776,8 @@ static int dce_v6_0_hw_fini(void *handle)
dce_v6_0_pageflip_interrupt_fini(adev);
+ flush_work(&adev->hotplug_work);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index d73df100f2b3..01cf3ab111cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2730,6 +2730,18 @@ static int dce_v8_0_sw_init(void *handle)
if (r)
return r;
+ /* Disable vblank IRQs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev_to_drm(adev)->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ /* Pre-DCE11 */
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_display_hotplug_work_func);
+
drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
@@ -2790,6 +2802,8 @@ static int dce_v8_0_hw_fini(void *handle)
dce_v8_0_pageflip_interrupt_fini(adev);
+ flush_work(&adev->hotplug_work);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 9447999a3a48..9d2c6523f546 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -4392,7 +4392,6 @@ static int gfx_v11_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -4411,15 +4410,14 @@ static int gfx_v11_0_hw_fini(void *handle)
amdgpu_mes_kiq_hw_fini(adev);
}
- if (amdgpu_sriov_vf(adev)) {
- gfx_v11_0_cp_gfx_enable(adev, false);
- /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
- tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
-
+ if (amdgpu_sriov_vf(adev))
+ /* Remove the steps disabling CPG and clearing KIQ position,
+ * so that CP could perform IDLE-SAVE during switch. Those
+ * steps are necessary to avoid a DMAR error in gfx9 but it is
+ * not reproduced on gfx11.
+ */
return 0;
- }
+
gfx_v11_0_cp_enable(adev, false);
gfx_v11_0_enable_gui_idle_interrupt(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
index 716ae6f2aefe..080ff11ca305 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
@@ -357,18 +357,6 @@ static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev)
static int gfxhub_v3_0_3_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
gfxhub_v3_0_3_init_gart_aperture_regs(adev);
gfxhub_v3_0_3_init_system_aperture_regs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 657e53708248..21e46817d82d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -608,6 +608,8 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
+ struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
@@ -624,6 +626,11 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
*flags |= AMDGPU_PTE_SYSTEM;
*flags &= ~AMDGPU_PTE_VALID;
}
+
+ if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
+ AMDGPU_GEM_CREATE_UNCACHED))
+ *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
+ AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
}
static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 66dfb574cc7d..96e52ec0fb69 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -503,6 +503,8 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
+ struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
@@ -519,6 +521,11 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
*flags |= AMDGPU_PTE_SYSTEM;
*flags &= ~AMDGPU_PTE_VALID;
}
+
+ if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
+ AMDGPU_GEM_CREATE_UNCACHED))
+ *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
+ AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
}
static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
@@ -551,7 +558,10 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.node_inst_num = adev->gmc.num_umc;
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
- adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
+ if (adev->umc.node_inst_num == 4)
+ adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
+ else
+ adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
adev->umc.ras = &umc_v8_10_ras;
break;
case IP_VERSION(8, 11, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 67ca16a8027c..50386eb2eec8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1113,6 +1113,74 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
+ bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
+ bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
+ unsigned int mtype;
+ bool snoop = false;
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ if (is_vram) {
+ if (bo_adev == adev) {
+ if (uncached)
+ mtype = MTYPE_UC;
+ else if (coherent)
+ mtype = MTYPE_CC;
+ else
+ mtype = MTYPE_RW;
+ /* FIXME: is this still needed? Or does
+ * amdgpu_ttm_tt_pde_flags already handle this?
+ */
+ if (adev->ip_versions[GC_HWIP][0] ==
+ IP_VERSION(9, 4, 2) &&
+ adev->gmc.xgmi.connected_to_cpu)
+ snoop = true;
+ } else {
+ if (uncached || coherent)
+ mtype = MTYPE_UC;
+ else
+ mtype = MTYPE_NC;
+ if (mapping->bo_va->is_xgmi)
+ snoop = true;
+ }
+ } else {
+ if (uncached || coherent)
+ mtype = MTYPE_UC;
+ else
+ mtype = MTYPE_NC;
+ /* FIXME: is this still needed? Or does
+ * amdgpu_ttm_tt_pde_flags already handle this?
+ */
+ snoop = true;
+ }
+ break;
+ default:
+ if (uncached || coherent)
+ mtype = MTYPE_UC;
+ else
+ mtype = MTYPE_NC;
+
+ /* FIXME: is this still needed? Or does
+ * amdgpu_ttm_tt_pde_flags already handle this?
+ */
+ if (!is_vram)
+ snoop = true;
+ }
+
+ if (mtype != MTYPE_NC)
+ *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
+ AMDGPU_PTE_MTYPE_VG10(mtype);
+ *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
+}
+
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
@@ -1128,14 +1196,9 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) &&
- !(*flags & AMDGPU_PTE_SYSTEM) &&
- mapping->bo_va->is_xgmi)
- *flags |= AMDGPU_PTE_SNOOPED;
-
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
- *flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
+ if (mapping->bo_va->base.bo)
+ gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
+ mapping, flags);
}
static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index f87d0f6ffc93..f2b743a93915 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -807,16 +807,5 @@ static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev)
break;
}
- if (adev->jpeg.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
-
- strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
- adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
- adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
- adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->jpeg.ras->ras_block.ras_late_init)
- adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
- }
+ jpeg_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 63b0d0b810ec..3beb731b2ce5 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -27,6 +27,7 @@
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v2_0.h"
+#include "jpeg_v4_0.h"
#include "vcn/vcn_4_0_0_offset.h"
#include "vcn/vcn_4_0_0_sh_mask.h"
@@ -38,6 +39,7 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
+static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
/**
* jpeg_v4_0_early_init - set function pointers
@@ -55,6 +57,7 @@ static int jpeg_v4_0_early_init(void *handle)
jpeg_v4_0_set_dec_ring_funcs(adev);
jpeg_v4_0_set_irq_funcs(adev);
+ jpeg_v4_0_set_ras_funcs(adev);
return 0;
}
@@ -78,6 +81,18 @@ static int jpeg_v4_0_sw_init(void *handle)
if (r)
return r;
+ /* JPEG DJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+
+ /* JPEG EJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
@@ -167,6 +182,8 @@ static int jpeg_v4_0_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
+
return 0;
}
@@ -524,6 +541,10 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
case VCN_4_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
break;
+ case VCN_4_0__SRCID_DJPEG0_POISON:
+ case VCN_4_0__SRCID_EJPEG0_POISON:
+ amdgpu_jpeg_process_poison_irq(adev, source, entry);
+ break;
default:
DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -607,3 +628,63 @@ const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v4_0_ip_funcs,
};
+
+static uint32_t jpeg_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_JPEG_V4_0_JPEG0:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
+ break;
+ case AMDGPU_JPEG_V4_0_JPEG1:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool jpeg_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst = 0, sub = 0, poison_stat = 0;
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
+ for (sub = 0; sub < AMDGPU_JPEG_V4_0_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ jpeg_v4_0_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
+ .query_poison_status = jpeg_v4_0_query_ras_poison_status,
+};
+
+static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
+ .ras_block = {
+ .hw_ops = &jpeg_v4_0_ras_hw_ops,
+ },
+};
+
+static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[JPEG_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ adev->jpeg.ras = &jpeg_v4_0_ras;
+ break;
+ default:
+ break;
+ }
+
+ jpeg_set_ras_funcs(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h
index f1ed6ccfedca..07d36c2abd6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h
@@ -24,6 +24,13 @@
#ifndef __JPEG_V4_0_H__
#define __JPEG_V4_0_H__
+enum amdgpu_jpeg_v4_0_sub_block {
+ AMDGPU_JPEG_V4_0_JPEG0 = 0,
+ AMDGPU_JPEG_V4_0_JPEG1,
+
+ AMDGPU_JPEG_V4_0_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version jpeg_v4_0_ip_block;
#endif /* __JPEG_V4_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 1395453a0662..8d9c1e841353 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1253,7 +1253,9 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
if (adev->mes.ring.sched.ready)
mes_v11_0_kiq_dequeue_sched(adev);
- mes_v11_0_enable(adev, false);
+ if (!amdgpu_sriov_vf(adev))
+ mes_v11_0_enable(adev, false);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index ed2293686f0d..9de46fa8f46c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -126,32 +126,6 @@ out:
return err;
}
-static int psp_v10_0_ring_init(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct psp_ring *ring;
- struct amdgpu_device *adev = psp->adev;
-
- ring = &psp->km_ring;
-
- ring->ring_type = ring_type;
-
- /* allocate 4k Page of Local Frame Buffer memory for ring */
- ring->ring_size = 0x1000;
- ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->firmware.rbuf,
- &ring->ring_mem_mc_addr,
- (void **)&ring->ring_mem);
- if (ret) {
- ring->ring_size = 0;
- return ret;
- }
-
- return 0;
-}
-
static int psp_v10_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -245,7 +219,6 @@ static void psp_v10_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
static const struct psp_funcs psp_v10_0_funcs = {
.init_microcode = psp_v10_0_init_microcode,
- .ring_init = psp_v10_0_ring_init,
.ring_create = psp_v10_0_ring_create,
.ring_stop = psp_v10_0_ring_stop,
.ring_destroy = psp_v10_0_ring_destroy,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 9518b4394a6e..bd3e3e23a939 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -360,32 +360,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static int psp_v11_0_ring_init(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct psp_ring *ring;
- struct amdgpu_device *adev = psp->adev;
-
- ring = &psp->km_ring;
-
- ring->ring_type = ring_type;
-
- /* allocate 4k Page of Local Frame Buffer memory for ring */
- ring->ring_size = 0x1000;
- ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->firmware.rbuf,
- &ring->ring_mem_mc_addr,
- (void **)&ring->ring_mem);
- if (ret) {
- ring->ring_size = 0;
- return ret;
- }
-
- return 0;
-}
-
static int psp_v11_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -779,7 +753,6 @@ static const struct psp_funcs psp_v11_0_funcs = {
.bootloader_load_spl = psp_v11_0_bootloader_load_spl,
.bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v11_0_bootloader_load_sos,
- .ring_init = psp_v11_0_ring_init,
.ring_create = psp_v11_0_ring_create,
.ring_stop = psp_v11_0_ring_stop,
.ring_destroy = psp_v11_0_ring_destroy,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
index ff13e1beb49b..5697760a819b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
@@ -28,32 +28,6 @@
#include "mp/mp_11_0_8_offset.h"
-static int psp_v11_0_8_ring_init(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct psp_ring *ring;
- struct amdgpu_device *adev = psp->adev;
-
- ring = &psp->km_ring;
-
- ring->ring_type = ring_type;
-
- /* allocate 4k Page of Local Frame Buffer memory for ring */
- ring->ring_size = 0x1000;
- ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->firmware.rbuf,
- &ring->ring_mem_mc_addr,
- (void **)&ring->ring_mem);
- if (ret) {
- ring->ring_size = 0;
- return ret;
- }
-
- return 0;
-}
-
static int psp_v11_0_8_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -194,7 +168,6 @@ static void psp_v11_0_8_ring_set_wptr(struct psp_context *psp, uint32_t value)
}
static const struct psp_funcs psp_v11_0_8_funcs = {
- .ring_init = psp_v11_0_8_ring_init,
.ring_create = psp_v11_0_8_ring_create,
.ring_stop = psp_v11_0_8_ring_stop,
.ring_destroy = psp_v11_0_8_ring_destroy,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 0b2ac418e4ac..8ed2281b6557 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -236,34 +236,6 @@ static void psp_v12_0_reroute_ih(struct psp_context *psp)
0x80000000, 0x8000FFFF, false);
}
-static int psp_v12_0_ring_init(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct psp_ring *ring;
- struct amdgpu_device *adev = psp->adev;
-
- psp_v12_0_reroute_ih(psp);
-
- ring = &psp->km_ring;
-
- ring->ring_type = ring_type;
-
- /* allocate 4k Page of Local Frame Buffer memory for ring */
- ring->ring_size = 0x1000;
- ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->firmware.rbuf,
- &ring->ring_mem_mc_addr,
- (void **)&ring->ring_mem);
- if (ret) {
- ring->ring_size = 0;
- return ret;
- }
-
- return 0;
-}
-
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -272,6 +244,8 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
+ psp_v12_0_reroute_ih(psp);
+
if (amdgpu_sriov_vf(psp->adev)) {
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
@@ -425,7 +399,6 @@ static const struct psp_funcs psp_v12_0_funcs = {
.init_microcode = psp_v12_0_init_microcode,
.bootloader_load_sysdrv = psp_v12_0_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v12_0_bootloader_load_sos,
- .ring_init = psp_v12_0_ring_init,
.ring_create = psp_v12_0_ring_create,
.ring_stop = psp_v12_0_ring_stop,
.ring_destroy = psp_v12_0_ring_destroy,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 21d822b1d589..ee27bfaba6fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -267,32 +268,6 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static int psp_v13_0_ring_init(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct psp_ring *ring;
- struct amdgpu_device *adev = psp->adev;
-
- ring = &psp->km_ring;
-
- ring->ring_type = ring_type;
-
- /* allocate 4k Page of Local Frame Buffer memory for ring */
- ring->ring_size = 0x1000;
- ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->firmware.rbuf,
- &ring->ring_mem_mc_addr,
- (void **)&ring->ring_mem);
- if (ret) {
- ring->ring_size = 0;
- return ret;
- }
-
- return 0;
-}
-
static int psp_v13_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -728,7 +703,6 @@ static const struct psp_funcs psp_v13_0_funcs = {
.bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv,
.bootloader_load_ras_drv = psp_v13_0_bootloader_load_ras_drv,
.bootloader_load_sos = psp_v13_0_bootloader_load_sos,
- .ring_init = psp_v13_0_ring_init,
.ring_create = psp_v13_0_ring_create,
.ring_stop = psp_v13_0_ring_stop,
.ring_destroy = psp_v13_0_ring_destroy,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
index 321089dfa7db..9d4e24e518e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -199,32 +199,6 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static int psp_v13_0_4_ring_init(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct psp_ring *ring;
- struct amdgpu_device *adev = psp->adev;
-
- ring = &psp->km_ring;
-
- ring->ring_type = ring_type;
-
- /* allocate 4k Page of Local Frame Buffer memory for ring */
- ring->ring_size = 0x1000;
- ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->firmware.rbuf,
- &ring->ring_mem_mc_addr,
- (void **)&ring->ring_mem);
- if (ret) {
- ring->ring_size = 0;
- return ret;
- }
-
- return 0;
-}
-
static int psp_v13_0_4_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -373,7 +347,6 @@ static const struct psp_funcs psp_v13_0_4_funcs = {
.bootloader_load_intf_drv = psp_v13_0_4_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v13_0_4_bootloader_load_dbg_drv,
.bootloader_load_sos = psp_v13_0_4_bootloader_load_sos,
- .ring_init = psp_v13_0_4_ring_init,
.ring_create = psp_v13_0_4_ring_create,
.ring_stop = psp_v13_0_4_ring_stop,
.ring_destroy = psp_v13_0_4_ring_destroy,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 01f3bcc62a6c..157147c6c94e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -160,32 +160,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static int psp_v3_1_ring_init(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct psp_ring *ring;
- struct amdgpu_device *adev = psp->adev;
-
- ring = &psp->km_ring;
-
- ring->ring_type = ring_type;
-
- /* allocate 4k Page of Local Frame Buffer memory for ring */
- ring->ring_size = 0x1000;
- ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->firmware.rbuf,
- &ring->ring_mem_mc_addr,
- (void **)&ring->ring_mem);
- if (ret) {
- ring->ring_size = 0;
- return ret;
- }
-
- return 0;
-}
-
static void psp_v3_1_reroute_ih(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -401,7 +375,6 @@ static const struct psp_funcs psp_v3_1_funcs = {
.init_microcode = psp_v3_1_init_microcode,
.bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v3_1_bootloader_load_sos,
- .ring_init = psp_v3_1_ring_init,
.ring_create = psp_v3_1_ring_create,
.ring_stop = psp_v3_1_ring_stop,
.ring_destroy = psp_v3_1_ring_destroy,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 1d4013ed0d10..b258e9aa0558 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -322,6 +322,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
return AMD_RESET_METHOD_MODE1;
case IP_VERSION(13, 0, 4):
return AMD_RESET_METHOD_MODE2;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index 91235df54e22..b7da4528cf0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -46,6 +46,16 @@ const struct channelnum_map_colbit umc_v8_10_channelnum_map_colbit_table[] = {
};
const uint32_t
+ umc_v8_10_channel_idx_tbl_ext0[]
+ [UMC_V8_10_UMC_INSTANCE_NUM]
+ [UMC_V8_10_CHANNEL_INSTANCE_NUM] = {
+ {{1, 5}, {7, 3}},
+ {{14, 15}, {13, 12}},
+ {{10, 11}, {9, 8}},
+ {{6, 2}, {0, 4}}
+ };
+
+const uint32_t
umc_v8_10_channel_idx_tbl[]
[UMC_V8_10_UMC_INSTANCE_NUM]
[UMC_V8_10_CHANNEL_INSTANCE_NUM] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
index 849ede88e111..25eaf4af5fcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
@@ -66,5 +66,9 @@ extern const uint32_t
[UMC_V8_10_UMC_INSTANCE_NUM]
[UMC_V8_10_CHANNEL_INSTANCE_NUM];
+extern const uint32_t
+ umc_v8_10_channel_idx_tbl_ext0[]
+ [UMC_V8_10_UMC_INSTANCE_NUM]
+ [UMC_V8_10_CHANNEL_INSTANCE_NUM];
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 8a7006d62a87..ce8374ee824d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -2002,16 +2002,5 @@ static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
break;
}
- if (adev->vcn.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
-
- strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
- adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
- adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
- adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->vcn.ras->ras_block.ras_late_init)
- adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
- }
+ amdgpu_vcn_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 897a5ce9c9da..403d054cf51b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -31,6 +31,7 @@
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
#include "mmsch_v4_0.h"
+#include "vcn_v4_0.h"
#include "vcn/vcn_4_0_0_offset.h"
#include "vcn/vcn_4_0_0_sh_mask.h"
@@ -64,6 +65,7 @@ static int vcn_v4_0_set_powergating_state(void *handle,
static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
+static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
/**
* vcn_v4_0_early_init - set function pointers
@@ -84,6 +86,7 @@ static int vcn_v4_0_early_init(void *handle)
vcn_v4_0_set_unified_ring_funcs(adev);
vcn_v4_0_set_irq_funcs(adev);
+ vcn_v4_0_set_ras_funcs(adev);
return 0;
}
@@ -132,6 +135,12 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
+ /* VCN POISON TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
+ VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
+ if (r)
+ return r;
+
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev))
@@ -296,6 +305,7 @@ static int vcn_v4_0_hw_fini(void *handle)
}
}
+ amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
}
return 0;
@@ -1939,6 +1949,9 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break;
+ case VCN_4_0__SRCID_UVD_POISON:
+ amdgpu_vcn_process_poison_irq(adev, source, entry);
+ break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -2001,3 +2014,60 @@ const struct amdgpu_ip_block_version vcn_v4_0_ip_block =
.rev = 0,
.funcs = &vcn_v4_0_ip_funcs,
};
+
+static uint32_t vcn_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_VCN_V4_0_VCPU_VCODEC:
+ reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool vcn_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst, sub;
+ uint32_t poison_stat = 0;
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+ for (sub = 0; sub < AMDGPU_VCN_V4_0_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ vcn_v4_0_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
+ .query_poison_status = vcn_v4_0_query_ras_poison_status,
+};
+
+static struct amdgpu_vcn_ras vcn_v4_0_ras = {
+ .ras_block = {
+ .hw_ops = &vcn_v4_0_ras_hw_ops,
+ },
+};
+
+static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[VCN_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ adev->vcn.ras = &vcn_v4_0_ras;
+ break;
+ default:
+ break;
+ }
+
+ amdgpu_vcn_set_ras_funcs(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.h
index 7c5c9d91bb52..7d3d11f40f27 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.h
@@ -24,6 +24,12 @@
#ifndef __VCN_V4_0_H__
#define __VCN_V4_0_H__
+enum amdgpu_vcn_v4_0_sub_block {
+ AMDGPU_VCN_V4_0_VCPU_VCODEC = 0,
+
+ AMDGPU_VCN_V4_0_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version vcn_v4_0_ip_block;
#endif /* __VCN_V4_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f6ffd7c96ff9..12ef782eb478 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -2111,6 +2111,8 @@ void vi_set_virt_ops(struct amdgpu_device *adev)
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
+ amdgpu_device_set_sriov_virtual_display(adev);
+
switch (adev->asic_type) {
case CHIP_TOPAZ:
/* topaz has no DCE, UVD, VCE */
@@ -2130,7 +2132,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
@@ -2150,7 +2152,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 5feaba6a77de..6d291aa6386b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1950,7 +1950,7 @@ static int criu_checkpoint(struct file *filep,
{
int ret;
uint32_t num_devices, num_bos, num_objects;
- uint64_t priv_size, priv_offset = 0;
+ uint64_t priv_size, priv_offset = 0, bo_priv_offset;
if (!args->devices || !args->bos || !args->priv_data)
return -EINVAL;
@@ -1994,38 +1994,34 @@ static int criu_checkpoint(struct file *filep,
if (ret)
goto exit_unlock;
- ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
- (uint8_t __user *)args->priv_data, &priv_offset);
- if (ret)
- goto exit_unlock;
+ /* Leave room for BOs in the private data. They need to be restored
+ * before events, but we checkpoint them last to simplify the error
+ * handling.
+ */
+ bo_priv_offset = priv_offset;
+ priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
if (num_objects) {
ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
&priv_offset);
if (ret)
- goto close_bo_fds;
+ goto exit_unlock;
ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
&priv_offset);
if (ret)
- goto close_bo_fds;
+ goto exit_unlock;
ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
if (ret)
- goto close_bo_fds;
+ goto exit_unlock;
}
-close_bo_fds:
- if (ret) {
- /* If IOCTL returns err, user assumes all FDs opened in criu_dump_bos are closed */
- uint32_t i;
- struct kfd_criu_bo_bucket *bo_buckets = (struct kfd_criu_bo_bucket *) args->bos;
-
- for (i = 0; i < num_bos; i++) {
- if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
- close_fd(bo_buckets[i].dmabuf_fd);
- }
- }
+ /* This must be the last thing in this function that can fail.
+ * Otherwise we leak dmabuf file descriptors.
+ */
+ ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
+ (uint8_t __user *)args->priv_data, &bo_priv_offset);
exit_unlock:
mutex_unlock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 267dd69737fa..af01ba061e1b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1111,7 +1111,7 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
props->cache_latency = cache->cache_latency;
memcpy(props->sibling_map, cache->sibling_map,
- sizeof(props->sibling_map));
+ CRAT_SIBLINGMAP_SIZE);
/* set the sibling_map_size as 32 for CRAT from ACPI */
props->sibling_map_size = CRAT_SIBLINGMAP_SIZE;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index ee8e5f8b007d..c8dd948966c2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -495,7 +495,10 @@ static int kfd_gws_init(struct kfd_dev *kfd)
(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
&& kfd->mec2_fw_version >= 0x30) ||
(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
- && kfd->mec2_fw_version >= 0x28))))
+ && kfd->mec2_fw_version >= 0x28) ||
+ (KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0)
+ && KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)
+ && kfd->mec2_fw_version >= 0x6b))))
ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
kfd->adev->gds.gws_size, &kfd->gws);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 83e3ce9f6049..729d26d648af 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -506,6 +506,7 @@ int kfd_criu_restore_event(struct file *devkfd,
ret = create_other_event(p, ev, &ev_priv->event_id);
break;
}
+ mutex_unlock(&p->event_mutex);
exit:
if (ret)
@@ -513,8 +514,6 @@ exit:
kfree(ev_priv);
- mutex_unlock(&p->event_mutex);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index c9a95a816912..10048ce16aea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -28,7 +28,6 @@
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
-#include "amdgpu_mn.h"
#include "amdgpu_res_cursor.h"
#include "kfd_priv.h"
#include "kfd_svm.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index afe7c4998676..814f99888ab1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -26,7 +26,7 @@
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
-#include "amdgpu_mn.h"
+#include "amdgpu_hmm.h"
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "kfd_priv.h"
@@ -1596,9 +1596,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
WRITE_ONCE(p->svms.faulting_task, current);
- r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
- addr, npages, &hmm_range,
- readonly, true, owner);
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+ readonly, owner, NULL,
+ &hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
pr_debug("failed %d to get svm range pages\n", r);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 1d9b90d979c4..ef9c6fdfb88d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1723,7 +1723,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
/* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info
* tables
*/
-void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev)
+static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev)
{
struct kfd_gpu_cache_info *pcache_info = NULL;
int i, j, k;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 441810277cb5..a8772082883e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -146,6 +146,14 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
/* Number of bytes in PSP footer for firmware. */
#define PSP_FOOTER_BYTES 0x100
+/*
+ * DMUB Async to Sync Mechanism Status
+ */
+#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
+#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
+#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
+#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4
+
/**
* DOC: overview
*
@@ -1635,12 +1643,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
}
- if (amdgpu_dm_initialize_drm_device(adev)) {
- DRM_ERROR(
- "amdgpu: failed to initialize sw for display support.\n");
- goto error;
- }
-
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point, for
* example HPD from DPIA.
@@ -1648,6 +1650,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc))
dc_enable_dmub_outbox(adev->dm.dc);
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
@@ -4590,6 +4598,7 @@ static int dm_early_init(void *handle)
adev_to_drm(adev)->dev,
&dev_attr_s3_debug);
#endif
+ adev->dc_enabled = true;
return 0;
}
@@ -5682,7 +5691,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_connector_state *con_state =
dm_state ? &dm_state->base : NULL;
struct dc_stream_state *stream = NULL;
- struct drm_display_mode mode = *drm_mode;
+ struct drm_display_mode mode;
struct drm_display_mode saved_mode;
struct drm_display_mode *freesync_mode = NULL;
bool native_mode_found = false;
@@ -5697,6 +5706,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink = NULL;
+ drm_mode_init(&mode, drm_mode);
memset(&saved_mode, 0, sizeof(saved_mode));
if (aconnector == NULL) {
@@ -6527,7 +6537,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
- int i, j;
+ int i, j, ret;
int vcpi, pbn_div, pbn, slot_num = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6574,8 +6584,11 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
dm_conn_state->pbn = pbn;
dm_conn_state->vcpi_slots = slot_num;
- drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn,
- false);
+ ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
+ dm_conn_state->pbn, false);
+ if (ret < 0)
+ return ret;
+
continue;
}
@@ -7682,9 +7695,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
- fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
- new_crtc_state,
- &bundle->flip_addrs[planes_count]);
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled)
+ fill_dc_dirty_rects(plane, old_plane_state,
+ new_plane_state, new_crtc_state,
+ &bundle->flip_addrs[planes_count]);
/*
* Only allow immediate flips for fast updates that don't
@@ -9591,10 +9605,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_resource_is_dsc_encoding_supported(dc)) {
- if (!pre_validate_dsc(state, &dm_state, vars)) {
- ret = -EINVAL;
+ ret = pre_validate_dsc(state, &dm_state, vars);
+ if (ret != 0)
goto fail;
- }
}
#endif
@@ -9689,9 +9702,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
- ret = -EINVAL;
goto fail;
}
@@ -10171,6 +10184,8 @@ static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
*operation_result = AUX_RET_ERROR_TIMEOUT;
} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+ } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) {
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
} else {
*operation_result = AUX_RET_ERROR_UNKNOWN;
}
@@ -10218,6 +10233,16 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
+
+ if (payload->length != adev->dm.dmub_notify->aux_reply.length) {
+ DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n",
+ payload->address, payload->length,
+ adev->dm.dmub_notify->aux_reply.length);
+ return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx,
+ DMUB_ASYNC_TO_SYNC_ACCESS_INVALID,
+ (uint32_t *)operation_result);
+ }
+
memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
adev->dm.dmub_notify->aux_reply.length);
}
@@ -10238,8 +10263,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
*/
bool check_seamless_boot_capability(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
if (!adev->mman.keep_stolen_vga_memory)
return true;
break;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b618b2586e7b..83436ef3b26b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -51,12 +51,6 @@
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
/*
- * DMUB Async to Sync Mechanism Status
- */
-#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
-#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
-#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
-/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 3675b39e297a..22125daf9dcf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -415,7 +415,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
-
+ bool is_dcn;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@@ -453,8 +453,14 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
acrtc->otg_inst = -1;
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
+
+ /* Don't enable DRM CRTC degamma property for DCE since it doesn't
+ * support programmable degamma anywhere.
+ */
+ is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
+ drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
+
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index f72c013d3a5b..e47098fa5aac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -971,3 +971,11 @@ void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
{
/* TODO: add periodic detection implementation */
}
+
+void dm_helpers_dp_mst_update_branch_bandwidth(
+ struct dc_context *ctx,
+ struct dc_link *link)
+{
+ // TODO
+}
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index d7907974f25a..24d859ad4712 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -710,13 +710,13 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
return dsc_config.bits_per_pixel;
}
-static bool increase_dsc_bpp(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_state *mst_state,
- struct dc_link *dc_link,
- struct dsc_mst_fairness_params *params,
- struct dsc_mst_fairness_vars *vars,
- int count,
- int k)
+static int increase_dsc_bpp(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count,
+ int k)
{
int i;
bool bpp_increased[MAX_PIPES];
@@ -726,6 +726,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
int remaining_to_increase = 0;
int link_timeslots_used;
int fair_pbn_alloc;
+ int ret = 0;
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
@@ -764,52 +765,60 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
if (initial_slack[next_index] > fair_pbn_alloc) {
vars[next_index].pbn += fair_pbn_alloc;
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
- if (!drm_dp_mst_atomic_check(state)) {
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret == 0) {
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
} else {
vars[next_index].pbn -= fair_pbn_alloc;
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
}
} else {
vars[next_index].pbn += initial_slack[next_index];
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
- if (!drm_dp_mst_atomic_check(state)) {
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret == 0) {
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
} else {
vars[next_index].pbn -= initial_slack[next_index];
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
}
}
bpp_increased[next_index] = true;
remaining_to_increase--;
}
- return true;
+ return 0;
}
-static bool try_disable_dsc(struct drm_atomic_state *state,
- struct dc_link *dc_link,
- struct dsc_mst_fairness_params *params,
- struct dsc_mst_fairness_vars *vars,
- int count,
- int k)
+static int try_disable_dsc(struct drm_atomic_state *state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count,
+ int k)
{
int i;
bool tried[MAX_PIPES];
@@ -817,6 +826,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
int max_kbps_increase;
int next_index;
int remaining_to_try = 0;
+ int ret;
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -847,49 +857,52 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
break;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
- if (!drm_dp_mst_atomic_check(state)) {
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret == 0) {
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
- if (drm_dp_atomic_find_time_slots(state,
- params[next_index].port->mgr,
- params[next_index].port,
- vars[next_index].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn);
+ if (ret < 0)
+ return ret;
}
tried[next_index] = true;
remaining_to_try--;
}
- return true;
+ return 0;
}
-static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
- struct dc_state *dc_state,
- struct dc_link *dc_link,
- struct dsc_mst_fairness_vars *vars,
- struct drm_dp_mst_topology_mgr *mgr,
- int *link_vars_start_index)
+static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_vars *vars,
+ struct drm_dp_mst_topology_mgr *mgr,
+ int *link_vars_start_index)
{
struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
int count = 0;
- int i, k;
+ int i, k, ret;
bool debugfs_overwrite = false;
memset(params, 0, sizeof(params));
if (IS_ERR(mst_state))
- return false;
+ return PTR_ERR(mst_state);
mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -940,7 +953,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (count == 0) {
ASSERT(0);
- return true;
+ return 0;
}
/* k is start index of vars for current phy link used by mst hub */
@@ -954,13 +967,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
- if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
- vars[i + k].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+ vars[i + k].pbn);
+ if (ret < 0)
+ return ret;
}
- if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret == 0 && !debugfs_overwrite) {
set_dsc_configs_from_fairness_vars(params, vars, count, k);
- return true;
+ return 0;
+ } else if (ret != -ENOSPC) {
+ return ret;
}
/* Try max compression */
@@ -969,31 +986,36 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
- if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
- params[i].port, vars[i + k].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+ params[i].port, vars[i + k].pbn);
+ if (ret < 0)
+ return ret;
} else {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
- if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
- params[i].port, vars[i + k].pbn) < 0)
- return false;
+ ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+ params[i].port, vars[i + k].pbn);
+ if (ret < 0)
+ return ret;
}
}
- if (drm_dp_mst_atomic_check(state))
- return false;
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret != 0)
+ return ret;
/* Optimize degree of compression */
- if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k))
- return false;
+ ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
+ if (ret < 0)
+ return ret;
- if (!try_disable_dsc(state, dc_link, params, vars, count, k))
- return false;
+ ret = try_disable_dsc(state, dc_link, params, vars, count, k);
+ if (ret < 0)
+ return ret;
set_dsc_configs_from_fairness_vars(params, vars, count, k);
- return true;
+ return 0;
}
static bool is_dsc_need_re_compute(
@@ -1094,15 +1116,17 @@ static bool is_dsc_need_re_compute(
return is_dsc_need_re_compute;
}
-bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state,
- struct dsc_mst_fairness_vars *vars)
+int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
int i, j;
struct dc_stream_state *stream;
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
int link_vars_start_index = 0;
+ int ret = 0;
for (i = 0; i < dc_state->stream_count; i++)
computed_streams[i] = false;
@@ -1115,7 +1139,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->dc_sink)
+ if (!aconnector || !aconnector->dc_sink || !aconnector->port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1125,19 +1149,16 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
continue;
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
- return false;
+ return -EINVAL;
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
- mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
- &aconnector->mst_mgr,
- &link_vars_start_index)) {
- mutex_unlock(&aconnector->mst_mgr.lock);
- return false;
- }
- mutex_unlock(&aconnector->mst_mgr.lock);
+ mst_mgr = aconnector->port->mgr;
+ ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
+ &link_vars_start_index);
+ if (ret != 0)
+ return ret;
for (j = 0; j < dc_state->stream_count; j++) {
if (dc_state->streams[j]->link == stream->link)
@@ -1150,22 +1171,23 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (stream->timing.flags.DSC == 1)
if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
- return false;
+ return -EINVAL;
}
- return true;
+ return ret;
}
-static bool
- pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state,
- struct dsc_mst_fairness_vars *vars)
+static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
int i, j;
struct dc_stream_state *stream;
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
int link_vars_start_index = 0;
+ int ret;
for (i = 0; i < dc_state->stream_count; i++)
computed_streams[i] = false;
@@ -1178,7 +1200,7 @@ static bool
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->dc_sink)
+ if (!aconnector || !aconnector->dc_sink || !aconnector->port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1190,14 +1212,11 @@ static bool
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
- mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
- &aconnector->mst_mgr,
- &link_vars_start_index)) {
- mutex_unlock(&aconnector->mst_mgr.lock);
- return false;
- }
- mutex_unlock(&aconnector->mst_mgr.lock);
+ mst_mgr = aconnector->port->mgr;
+ ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
+ &link_vars_start_index);
+ if (ret != 0)
+ return ret;
for (j = 0; j < dc_state->stream_count; j++) {
if (dc_state->streams[j]->link == stream->link)
@@ -1205,7 +1224,7 @@ static bool
}
}
- return true;
+ return ret;
}
static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state,
@@ -1260,9 +1279,9 @@ static bool is_dsc_precompute_needed(struct drm_atomic_state *state)
return ret;
}
-bool pre_validate_dsc(struct drm_atomic_state *state,
- struct dm_atomic_state **dm_state_ptr,
- struct dsc_mst_fairness_vars *vars)
+int pre_validate_dsc(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state_ptr,
+ struct dsc_mst_fairness_vars *vars)
{
int i;
struct dm_atomic_state *dm_state;
@@ -1271,11 +1290,12 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
if (!is_dsc_precompute_needed(state)) {
DRM_INFO_ONCE("DSC precompute is not needed.\n");
- return true;
+ return 0;
}
- if (dm_atomic_get_state(state, dm_state_ptr)) {
+ ret = dm_atomic_get_state(state, dm_state_ptr);
+ if (ret != 0) {
DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
- return false;
+ return ret;
}
dm_state = *dm_state_ptr;
@@ -1287,7 +1307,7 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL);
if (!local_dc_state)
- return false;
+ return -ENOMEM;
for (i = 0; i < local_dc_state->stream_count; i++) {
struct dc_stream_state *stream = dm_state->context->streams[i];
@@ -1323,9 +1343,9 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
if (ret != 0)
goto clean_exit;
- if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) {
+ ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
+ if (ret != 0) {
DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
- ret = -EINVAL;
goto clean_exit;
}
@@ -1356,7 +1376,7 @@ clean_exit:
kfree(local_dc_state);
- return (ret == 0);
+ return ret;
}
static unsigned int kbps_from_pbn(unsigned int pbn)
@@ -1399,6 +1419,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
unsigned int max_compressed_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
+ struct drm_dp_mst_topology_mgr *mst_mgr;
/*
* check if the mode could be supported if DSC pass-through is supported
@@ -1407,7 +1428,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
*/
if (is_dsc_common_config_possible(stream, &bw_range) &&
aconnector->port->passthrough_aux) {
- mutex_lock(&aconnector->mst_mgr.lock);
+ mst_mgr = aconnector->port->mgr;
+ mutex_lock(&mst_mgr->lock);
cur_link_settings = stream->link->verified_link_cap;
@@ -1420,7 +1442,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
down_link_bw_in_kbps);
- mutex_unlock(&aconnector->mst_mgr.lock);
+ mutex_unlock(&mst_mgr->lock);
/*
* use the maximum dsc compression bandwidth as the required
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index b92a7c5671aa..97fd70df531b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -53,15 +53,15 @@ struct dsc_mst_fairness_vars {
struct amdgpu_dm_connector *aconnector;
};
-bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state,
- struct dsc_mst_fairness_vars *vars);
+int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars);
bool needs_dsc_aux_workaround(struct dc_link *link);
-bool pre_validate_dsc(struct drm_atomic_state *state,
- struct dm_atomic_state **dm_state_ptr,
- struct dsc_mst_fairness_vars *vars);
+int pre_validate_dsc(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state_ptr,
+ struct dsc_mst_fairness_vars *vars);
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 9b8ea6e9a2b9..a1a00f432168 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -138,7 +138,9 @@ static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset)
uint32_t object_table_offset = bp->object_info_tbl_offset + offset;
- table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset);
+ table = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base,
+ object_table_offset,
+ struct_size(table, asObjects, 1)));
if (!table)
return 0;
@@ -166,8 +168,9 @@ static struct graphics_object_id bios_parser_get_connector_id(
uint32_t connector_table_offset = bp->object_info_tbl_offset
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
- ATOM_OBJECT_TABLE *tbl =
- GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
+ ATOM_OBJECT_TABLE *tbl = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base,
+ connector_table_offset,
+ struct_size(tbl, asObjects, 1)));
if (!tbl) {
dm_error("Can't get connector table from atom bios.\n");
@@ -662,8 +665,9 @@ static enum bp_result get_ss_info_v3_1(
if (!DATA_TABLES(ASIC_InternalSS_Info))
return BP_RESULT_UNSUPPORTED;
- ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
- DATA_TABLES(ASIC_InternalSS_Info));
+ ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
+ DATA_TABLES(ASIC_InternalSS_Info),
+ struct_size(ss_table_header_include, asSpreadSpectrum, 1)));
table_size =
(le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
- sizeof(ATOM_COMMON_TABLE_HEADER))
@@ -1029,8 +1033,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
if (!DATA_TABLES(ASIC_InternalSS_Info))
return result;
- header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
- DATA_TABLES(ASIC_InternalSS_Info));
+ header = ((ATOM_ASIC_INTERNAL_SS_INFO_V2 *) bios_get_image(
+ &bp->base,
+ DATA_TABLES(ASIC_InternalSS_Info),
+ struct_size(header, asSpreadSpectrum, 1)));
memset(info, 0, sizeof(struct spread_spectrum_info));
@@ -1709,8 +1715,10 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
if (!DATA_TABLES(ASIC_InternalSS_Info))
return 0;
- header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
- DATA_TABLES(ASIC_InternalSS_Info));
+ header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V2 *) bios_get_image(
+ &bp->base,
+ DATA_TABLES(ASIC_InternalSS_Info),
+ struct_size(header_include, asSpreadSpectrum, 1)));
size = (le16_to_cpu(header_include->sHeader.usStructureSize)
- sizeof(ATOM_COMMON_TABLE_HEADER))
@@ -1746,8 +1754,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
if (!DATA_TABLES(ASIC_InternalSS_Info))
return number;
- header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
- DATA_TABLES(ASIC_InternalSS_Info));
+ header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
+ DATA_TABLES(ASIC_InternalSS_Info),
+ struct_size(header_include, asSpreadSpectrum, 1)));
size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
@@ -1789,11 +1798,13 @@ static enum bp_result bios_parser_get_gpio_pin_info(
if (!DATA_TABLES(GPIO_Pin_LUT))
return BP_RESULT_BADBIOSTABLE;
- header = GET_IMAGE(ATOM_GPIO_PIN_LUT, DATA_TABLES(GPIO_Pin_LUT));
+ header = ((ATOM_GPIO_PIN_LUT *) bios_get_image(&bp->base,
+ DATA_TABLES(GPIO_Pin_LUT),
+ struct_size(header, asGPIO_Pin, 1)));
if (!header)
return BP_RESULT_BADBIOSTABLE;
- if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_PIN_LUT)
+ if (sizeof(ATOM_COMMON_TABLE_HEADER) + struct_size(header, asGPIO_Pin, 1)
> le16_to_cpu(header->sHeader.usStructureSize))
return BP_RESULT_BADBIOSTABLE;
@@ -1978,7 +1989,8 @@ static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
offset += bp->object_info_tbl_offset;
- tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+ tbl = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, offset,
+ struct_size(tbl, asObjects, 1)));
if (!tbl)
return NULL;
@@ -2600,8 +2612,7 @@ static enum bp_result update_slot_layout_info(
for (;;) {
- record_header = (ATOM_COMMON_RECORD_HEADER *)
- GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
+ record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
if (record_header == NULL) {
result = BP_RESULT_BADBIOSTABLE;
break;
@@ -2615,7 +2626,7 @@ static enum bp_result update_slot_layout_info(
if (record_header->ucRecordType ==
ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
- sizeof(ATOM_BRACKET_LAYOUT_RECORD)
+ struct_size(record, asConnInfo, 1)
<= record_header->ucRecordSize) {
record = (ATOM_BRACKET_LAYOUT_RECORD *)
(record_header);
@@ -2709,8 +2720,9 @@ static enum bp_result get_bracket_layout_record(
genericTableOffset = bp->object_info_tbl_offset +
bp->object_info_tbl.v1_3->usMiscObjectTableOffset;
- object_table = (ATOM_OBJECT_TABLE *)
- GET_IMAGE(ATOM_OBJECT_TABLE, genericTableOffset);
+ object_table = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base,
+ genericTableOffset,
+ struct_size(object_table, asObjects, 1)));
if (!object_table)
return BP_RESULT_FAILURE;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index ee0456b5e14e..074e70a5c458 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -462,6 +462,7 @@ static enum bp_result get_gpio_i2c_info(
uint32_t count = 0;
unsigned int table_index = 0;
bool find_valid = false;
+ struct atom_gpio_pin_assignment *pin;
if (!info)
return BP_RESULT_BADINPUT;
@@ -489,20 +490,17 @@ static enum bp_result get_gpio_i2c_info(
- sizeof(struct atom_common_table_header))
/ sizeof(struct atom_gpio_pin_assignment);
+ pin = (struct atom_gpio_pin_assignment *) header->gpio_pin;
+
for (table_index = 0; table_index < count; table_index++) {
- if (((record->i2c_id & I2C_HW_CAP) == (
- header->gpio_pin[table_index].gpio_id &
- I2C_HW_CAP)) &&
- ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
- (header->gpio_pin[table_index].gpio_id &
- I2C_HW_ENGINE_ID_MASK)) &&
- ((record->i2c_id & I2C_HW_LANE_MUX) ==
- (header->gpio_pin[table_index].gpio_id &
- I2C_HW_LANE_MUX))) {
+ if (((record->i2c_id & I2C_HW_CAP) == (pin->gpio_id & I2C_HW_CAP)) &&
+ ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) &&
+ ((record->i2c_id & I2C_HW_LANE_MUX) == (pin->gpio_id & I2C_HW_LANE_MUX))) {
/* still valid */
find_valid = true;
break;
}
+ pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment));
}
/* If we don't find the entry that we are looking for then
@@ -2393,6 +2391,26 @@ static enum bp_result get_vram_info_v25(
return result;
}
+static enum bp_result get_vram_info_v30(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_vram_info_header_v3_0 *info_v30;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v30 = GET_IMAGE(struct atom_vram_info_header_v3_0,
+ DATA_TABLES(vram_info));
+
+ if (info_v30 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v30->channel_num;
+ info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
+
+ return result;
+}
+
+
/*
* get_integrated_info_v11
*
@@ -3060,6 +3078,16 @@ static enum bp_result bios_parser_get_vram_info(
}
break;
+ case 3:
+ switch (revision.minor) {
+ case 0:
+ result = get_vram_info_v30(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+
default:
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index c1eaf571407a..1c0569b1dc8f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -609,8 +609,10 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
}
bw_params->vram_type = bios_info->memory_type;
- bw_params->num_channels = bios_info->ma_channel_number;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
+ //bw_params->dram_channel_width_bytes = dc->ctx->asic_id.vram_width;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 1131c6d73f6c..20a06c04e4a1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -363,32 +363,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
}
@@ -400,32 +400,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index ef0795b14a1f..2db595672a46 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -123,9 +123,10 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
uint32_t result;
result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000);
- ASSERT(result == VBIOSSMC_Result_OK);
- smu_print("SMU response after wait: %d\n", result);
+ if (result != VBIOSSMC_Result_OK)
+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n",
+ result);
if (result == VBIOSSMC_Status_BUSY)
return -1;
@@ -216,6 +217,12 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
+#ifdef DBG
+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n",
+ actual_dcfclk_set_mhz,
+ actual_dcfclk_set_mhz * 1000);
+#endif
+
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 187f5b27fdc8..3edc81e2d417 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -553,6 +553,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params(
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d446e6098948..1c3de3a1671e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1054,6 +1054,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
int i, j;
struct dc_state *dangling_context = dc_create_state(dc);
struct dc_state *current_ctx;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
if (dangling_context == NULL)
return;
@@ -1096,6 +1098,18 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
}
if (should_disable && old_stream) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+ /* When disabling plane for a phantom pipe, we must turn on the
+ * phantom OTG so the disable programming gets the double buffer
+ * update. Otherwise the pipe will be left in a partially disabled
+ * state that can result in underflow or hang when enabling it
+ * again for different use.
+ */
+ if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (tg->funcs->enable_crtc)
+ tg->funcs->enable_crtc(tg);
+ }
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
@@ -1111,6 +1125,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
dc->hwss.post_unlock_program_front_end(dc, dangling_context);
}
+ /* We need to put the phantom OTG back into it's default (disabled) state or we
+ * can get corruption when transition from one SubVP config to a different one.
+ * The OTG is set to disable on falling edge of VUPDATE so the plane disable
+ * will still get it's double buffer update.
+ */
+ if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+ }
}
}
@@ -1169,7 +1192,7 @@ static void disable_vbios_mode_if_required(
if (pix_clk_100hz != requested_pix_clk_100hz) {
core_link_disable_stream(pipe);
- pipe->stream->dpms_off = false;
+ pipe->stream->dpms_off = true;
}
}
}
@@ -1749,6 +1772,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ /* When SubVP is active, all HW programming must be done while
+ * SubVP lock is acquired
+ */
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, false);
@@ -1776,9 +1805,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
-
result = dc->hwss.apply_ctx_to_hw(dc, context);
if (result != DC_OK) {
@@ -3382,22 +3408,6 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
}
- if (update_type != UPDATE_TYPE_FAST) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
- if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
- subvp_prev_use) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
- }
- }
- }
-
dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
if (update_type != UPDATE_TYPE_FAST) {
@@ -3455,6 +3465,24 @@ static void commit_planes_for_stream(struct dc *dc,
return;
}
+ if (update_type != UPDATE_TYPE_FAST) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP &&
+ pipe_ctx->stream && pipe_ctx->plane_state) {
+ /* Only update visual confirm for SUBVP here.
+ * The bar appears on all pipes, so we need to update the bar on all displays,
+ * so the information doesn't get stale.
+ */
+ struct mpcc_blnd_cfg blnd_cfg = { 0 };
+
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color,
+ pipe_ctx->plane_res.hubp->inst);
+ }
+ }
+ }
+
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -3572,7 +3600,6 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
}
-
}
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
@@ -3609,6 +3636,44 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
+ /* For phantom pipe OTG enable, it has to be done after any previous pipe
+ * that was in use has already been programmed at gotten its double buffer
+ * update for "disable".
+ */
+ if (update_type != UPDATE_TYPE_FAST) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* If an active, non-phantom pipe is being transitioned into a phantom
+ * pipe, wait for the double buffer update to complete first before we do
+ * ANY phantom pipe programming.
+ */
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
+ old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ old_pipe->stream_res.tg->funcs->wait_for_state(
+ old_pipe->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ old_pipe->stream_res.tg->funcs->wait_for_state(
+ old_pipe->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ }
+ }
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
+ subvp_prev_use) {
+ // If old context or new context has phantom pipes, apply
+ // the phantom timings now. We can't change the phantom
+ // pipe configuration safely without driver acquiring
+ // the DMCUB lock first.
+ dc->hwss.apply_ctx_to_hw(dc, context);
+ break;
+ }
+ }
+ }
+
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
if (update_type != UPDATE_TYPE_FAST)
@@ -3675,7 +3740,6 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
bool force_minimal_pipe_splitting = false;
- uint32_t i;
*is_plane_addition = false;
@@ -3707,33 +3771,17 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
}
}
- /* For SubVP pipe split case when adding MPO video
- * we need to add a minimal transition. In this case
- * there will be 2 streams (1 main stream, 1 phantom
- * stream).
+ /* For SubVP when adding or removing planes we need to add a minimal transition
+ * (even when disabling all planes). Whenever disabling a phantom pipe, we
+ * must use the minimal transition path to disable the pipe correctly.
*/
- if (cur_stream_status &&
- dc->current_state->stream_count == 2 &&
- stream->mall_stream_config.type == SUBVP_MAIN) {
- bool is_pipe_split = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
- (dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
- dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
- is_pipe_split = true;
- break;
- }
- }
-
+ if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) {
/* determine if minimal transition is required due to SubVP*/
- if (surface_count > 0 && is_pipe_split) {
- if (cur_stream_status->plane_count > surface_count) {
- force_minimal_pipe_splitting = true;
- } else if (cur_stream_status->plane_count < surface_count) {
- force_minimal_pipe_splitting = true;
- *is_plane_addition = true;
- }
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
}
}
@@ -3768,6 +3816,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
unsigned int pipe_in_use = 0;
+ bool subvp_in_use = false;
if (!transition_context)
return false;
@@ -3784,6 +3833,18 @@ static bool commit_minimal_transition_state(struct dc *dc,
pipe_in_use++;
}
+ /* If SubVP is enabled and we are adding or removing planes from any main subvp
+ * pipe, we must use the minimal transition.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ subvp_in_use = true;
+ break;
+ }
+ }
+
/* When the OS add a new surface if we have been used all of pipes with odm combine
* and mpc split feature, it need use commit_minimal_transition_state to transition safely.
* After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
@@ -3792,7 +3853,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
* enter/exit MPO when DCN still have enough resources.
*/
- if (pipe_in_use != dc->res_pool->pipe_count) {
+ if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) {
dc_release_state(transition_context);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 24ed057414e1..5304e9daf90a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -4663,6 +4663,10 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
}
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->type == dc_connection_mst_branch)
+ dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link);
+
/* Retrain now, or wait until next stream update to apply */
if (skip_immediate_retrain == false)
dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 651231387043..ce8d6a54ca54 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -82,6 +82,7 @@ struct dp_hdmi_dongle_signature_data {
#define HDMI_SCDC_STATUS_FLAGS 0x40
#define HDMI_SCDC_ERR_DETECT 0x50
#define HDMI_SCDC_TEST_CONFIG 0xC0
+#define HDMI_SCDC_DEVICE_ID 0xD3
union hdmi_scdc_update_read_data {
uint8_t byte[2];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index e5ab751a5ca1..dedd1246ce58 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1912,7 +1912,7 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
return status;
}
-static void dpcd_exit_training_mode(struct dc_link *link)
+static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
{
uint8_t sink_status = 0;
uint8_t i;
@@ -1920,12 +1920,14 @@ static void dpcd_exit_training_mode(struct dc_link *link)
/* clear training pattern set */
dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
- /* poll for intra-hop disable */
- for (i = 0; i < 10; i++) {
- if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
- (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
- break;
- udelay(1000);
+ if (encoding == DP_128b_132b_ENCODING) {
+ /* poll for intra-hop disable */
+ for (i = 0; i < 10; i++) {
+ if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
+ (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
+ break;
+ udelay(1000);
+ }
}
}
@@ -2649,7 +2651,7 @@ enum link_training_result dc_link_dp_perform_link_training(
&lt_settings);
/* reset previous training states */
- dpcd_exit_training_mode(link);
+ dpcd_exit_training_mode(link, encoding);
/* configure link prior to entering training mode */
dpcd_configure_lttpr_mode(link, &lt_settings);
@@ -2670,7 +2672,7 @@ enum link_training_result dc_link_dp_perform_link_training(
ASSERT(0);
/* exit training mode */
- dpcd_exit_training_mode(link);
+ dpcd_exit_training_mode(link, encoding);
/* switch to video idle */
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
@@ -2771,8 +2773,11 @@ bool perform_link_training_with_retries(
/* Update verified link settings to current one
* Because DPIA LT might fallback to lower link setting.
*/
- link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
- link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link);
+ }
}
} else {
status = dc_link_dp_perform_link_training(link,
@@ -3020,7 +3025,7 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
{
- enum dc_link_rate cable_max_link_rate = LINK_RATE_HIGH3;
+ enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN;
if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
cable_max_link_rate = LINK_RATE_UHBR20;
@@ -3083,15 +3088,29 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
max_link_cap.link_spread =
link->reported_link_cap.link_spread;
- /* Lower link settings based on cable attributes */
+ /* Lower link settings based on cable attributes
+ * Cable ID is a DP2 feature to identify max certified link rate that
+ * a cable can carry. The cable identification method requires both
+ * cable and display hardware support. Since the specs comes late, it is
+ * anticipated that the first round of DP2 cables and displays may not
+ * be fully compatible to reliably return cable ID data. Therefore the
+ * decision of our cable id policy is that if the cable can return non
+ * zero cable id data, we will take cable's link rate capability into
+ * account. However if we get zero data, the cable link rate capability
+ * is considered inconclusive. In this case, we will not take cable's
+ * capability into account to avoid of over limiting hardware capability
+ * from users. The max overall link rate capability is still determined
+ * after actual dp pre-training. Cable id is considered as an auxiliary
+ * method of determining max link bandwidth capability.
+ */
cable_max_link_rate = get_cable_max_link_rate(link);
if (!link->dc->debug.ignore_cable_id &&
+ cable_max_link_rate != LINK_RATE_UNKNOWN &&
cable_max_link_rate < max_link_cap.link_rate)
max_link_cap.link_rate = cable_max_link_rate;
- /*
- * account for lttpr repeaters cap
+ /* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
if (dp_is_lttpr_present(link)) {
@@ -4540,9 +4559,19 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off
+ && pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ // Always use max settings here for DP 1.4a LL Compliance CTS
+ if (link->is_automated) {
+ pipe_ctx->link_config.dp_link_settings.lane_count =
+ link->verified_link_cap.lane_count;
+ pipe_ctx->link_config.dp_link_settings.link_rate =
+ link->verified_link_cap.link_rate;
+ pipe_ctx->link_config.dp_link_settings.link_spread =
+ link->verified_link_cap.link_spread;
+ }
core_link_enable_stream(link->dc->current_state, pipe_ctx);
+ }
}
}
@@ -4583,6 +4612,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
}
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
+ link->is_automated = true;
device_service_clear.bits.AUTOMATED_TEST = 1;
core_link_write_dpcd(
link,
@@ -7226,6 +7257,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
struct pipe_ctx *pipes =
&link->dc->current_state->res_ctx.pipe_ctx[0];
unsigned int i;
+ bool do_fallback = false;
for (i = 0; i < MAX_PIPES; i++) {
@@ -7258,13 +7290,16 @@ void dp_retrain_link_dp_test(struct dc_link *link,
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ do_fallback = true;
+
perform_link_training_with_retries(
link_setting,
skip_video_pattern,
LINK_TRAINING_ATTEMPTS,
&pipes[i],
SIGNAL_TYPE_DISPLAY_PORT,
- false);
+ do_fallback);
link->dc->hwss.enable_stream(&pipes[i]);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index 74e36b34d3f7..d130d58ac08e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -791,10 +791,14 @@ static enum link_training_result dpia_training_eq_transparent(
}
if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated)) {
- result = LINK_TRAINING_SUCCESS;
- break;
+ dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status)) {
+ /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4
+ * has to share encoders unlike DP and USBC
+ */
+ if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
}
/* Update VS/PE. */
@@ -1008,7 +1012,8 @@ enum link_training_result dc_link_dpia_perform_link_training(
*/
if (result == LINK_TRAINING_SUCCESS) {
msleep(5);
- result = dp_check_link_loss_status(link, &lt_settings);
+ if (!link->is_automated)
+ result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT) {
dpia_training_abort(link, &lt_settings, repeater_id);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 84c82d3a6761..57fc9193c770 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.210"
+#define DC_VER "3.2.212"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -261,6 +261,7 @@ struct dc_caps {
uint32_t cache_line_size;
uint32_t cache_num_ways;
uint16_t subvp_fw_processing_delay_us;
+ uint8_t subvp_drr_max_vblank_margin_us;
uint16_t subvp_prefetch_end_to_mall_start_us;
uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
uint16_t subvp_pstate_allow_width_us;
@@ -407,6 +408,7 @@ struct dc_config {
bool use_default_clock_table;
bool force_bios_enable_lttpr;
uint8_t force_bios_fixed_vs;
+ int sdpif_request_limit_words_per_umc;
};
@@ -456,15 +458,15 @@ enum pipe_split_policy {
MPC_SPLIT_DYNAMIC = 0,
/**
- * @MPC_SPLIT_DYNAMIC: Avoid pipe split, which means that DC will not
+ * @MPC_SPLIT_AVOID: Avoid pipe split, which means that DC will not
* try any sort of split optimization.
*/
MPC_SPLIT_AVOID = 1,
/**
- * @MPC_SPLIT_DYNAMIC: With this option, DC will only try to optimize
- * the pipe utilization when using a single display; if the user
- * connects to a second display, DC will avoid pipe split.
+ * @MPC_SPLIT_AVOID_MULT_DISP: With this option, DC will only try to
+ * optimize the pipe utilization when using a single display; if the
+ * user connects to a second display, DC will avoid pipe split.
*/
MPC_SPLIT_AVOID_MULT_DISP = 2,
};
@@ -495,7 +497,7 @@ enum dcn_zstate_support_state {
};
/**
- * dc_clocks - DC pipe clocks
+ * struct dc_clocks - DC pipe clocks
*
* For any clocks that may differ per pipe only the max is stored in this
* structure
@@ -526,7 +528,7 @@ struct dc_clocks {
bool fclk_prev_p_state_change_support;
int num_ways;
- /**
+ /*
* @fw_based_mclk_switching
*
* DC has a mechanism that leverage the variable refresh rate to switch
@@ -864,6 +866,7 @@ struct dc_debug_options {
bool enable_dp_dig_pixel_rate_div_policy;
enum lttpr_mode lttpr_mode_override;
unsigned int dsc_delay_factor_wa_x1000;
+ unsigned int min_prefetch_in_strobe_ns;
};
struct gpu_info_soc_bounding_box_v1_0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 67eef5beab95..097556f7b32c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -477,12 +477,20 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
(((uint64_t)main_timing->pix_clk_100hz * 100)));
drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
(((uint64_t)drr_timing->pix_clk_100hz * 100)));
- max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
- max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
+ max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us -
+ dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us;
+ max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us;
max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
(((uint64_t)drr_timing->h_total * 1000000)));
+ /* When calculating the max vtotal supported for SubVP + DRR cases, add
+ * margin due to possible rounding errors (being off by 1 line in the
+ * FW calculation can incorrectly push the P-State switch to wait 1 frame
+ * longer).
+ */
+ max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us;
+
pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
}
@@ -859,11 +867,59 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
diag_data.is_cw6_enabled);
}
+static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *test_pipe, *split_pipe;
+ const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
+ struct rect r1 = scl_data->recout, r2, r2_half;
+ int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
+ int cur_layer = pipe_ctx->plane_state->layer_index;
+
+ /**
+ * Disable the cursor if there's another pipe above this with a
+ * plane that contains this pipe's viewport to prevent double cursor
+ * and incorrect scaling artifacts.
+ */
+ for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+ test_pipe = test_pipe->top_pipe) {
+ // Skip invisible layer and pipe-split plane on same layer
+ if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
+ continue;
+
+ r2 = test_pipe->plane_res.scl_data.recout;
+ r2_r = r2.x + r2.width;
+ r2_b = r2.y + r2.height;
+ split_pipe = test_pipe;
+
+ /**
+ * There is another half plane on same layer because of
+ * pipe-split, merge together per same height.
+ */
+ for (split_pipe = pipe_ctx->top_pipe; split_pipe;
+ split_pipe = split_pipe->top_pipe)
+ if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
+ r2_half = split_pipe->plane_res.scl_data.recout;
+ r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
+ r2.width = r2.width + r2_half.width;
+ r2_r = r2.x + r2.width;
+ break;
+ }
+
+ if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
+ return true;
+ }
+
+ return false;
+}
+
static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state != NULL) {
if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
return false;
+
+ if (dc_can_pipe_disable_cursor(pipe_ctx))
+ return false;
}
if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index edb4532eaa39..dc6afe33bca2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -184,6 +184,7 @@ struct dc_link {
bool is_dig_mapping_flexible;
bool hpd_status; /* HPD status of link without physical HPD pin. */
bool is_hpd_pending; /* Indicates a new received hpd */
+ bool is_automated; /* Indicates automated testing */
bool edp_sink_present;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index cda1592c3a5b..2d3201b77d6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -413,6 +413,11 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
else
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;
+ //WA for PSR1 on specific TCON, require frame delay for frame re-lock
+ copy_settings_data->relock_delay_frame_cnt = 0;
+ if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
+ copy_settings_data->relock_delay_frame_cnt = 2;
+
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index e48fd044f572..ba1c0621f0f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -171,6 +171,7 @@ struct dcn_hubbub_registers {
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B;
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C;
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D;
+ uint32_t SDPIF_REQUEST_RATE_LIMIT;
};
#define HUBBUB_REG_FIELD_LIST_DCN32(type) \
@@ -360,7 +361,8 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C;\
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\
- type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\
+ type SDPIF_REQUEST_RATE_LIMIT
struct dcn_hubbub_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 11e4c4e46947..587b04b8712d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -97,10 +97,12 @@ void dcn10_lock_all_pipes(struct dc *dc,
bool lock)
{
struct pipe_ctx *pipe_ctx;
+ struct pipe_ctx *old_pipe_ctx;
struct timing_generator *tg;
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
@@ -110,7 +112,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
- !pipe_ctx->plane_state ||
+ (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
!tg->funcs->is_tg_enabled(tg))
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h
index a85ed228dfc2..a9dd9ae23ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h
@@ -27,204 +27,177 @@
#define TO_DCN20_DWBC(dwbc_base) \
container_of(dwbc_base, struct dcn20_dwbc, base)
-/* DCN */
-#define BASE_INNER(seg) \
- DCE_BASE__INST0_SEG ## seg
-
-#define BASE(seg) \
- BASE_INNER(seg)
-
-#define SR(reg_name)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SRI2(reg_name, block, id)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
-
#define DWBC_COMMON_REG_LIST_DCN2_0(inst) \
- SRI2(WB_ENABLE, CNV, inst),\
- SRI2(WB_EC_CONFIG, CNV, inst),\
- SRI2(CNV_MODE, CNV, inst),\
- SRI2(CNV_WINDOW_START, CNV, inst),\
- SRI2(CNV_WINDOW_SIZE, CNV, inst),\
- SRI2(CNV_UPDATE, CNV, inst),\
- SRI2(CNV_SOURCE_SIZE, CNV, inst),\
- SRI2(CNV_TEST_CNTL, CNV, inst),\
- SRI2(CNV_TEST_CRC_RED, CNV, inst),\
- SRI2(CNV_TEST_CRC_GREEN, CNV, inst),\
- SRI2(CNV_TEST_CRC_BLUE, CNV, inst),\
- SRI2(WBSCL_COEF_RAM_SELECT, WBSCL, inst),\
- SRI2(WBSCL_COEF_RAM_TAP_DATA, WBSCL, inst),\
- SRI2(WBSCL_MODE, WBSCL, inst),\
- SRI2(WBSCL_TAP_CONTROL, WBSCL, inst),\
- SRI2(WBSCL_DEST_SIZE, WBSCL, inst),\
- SRI2(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL, inst),\
- SRI2(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL, inst),\
- SRI2(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL, inst),\
- SRI2(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL, inst),\
- SRI2(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL, inst),\
- SRI2(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL, inst),\
- SRI2(WBSCL_ROUND_OFFSET, WBSCL, inst),\
- SRI2(WBSCL_OVERFLOW_STATUS, WBSCL, inst),\
- SRI2(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL, inst),\
- SRI2(WBSCL_TEST_CNTL, WBSCL, inst),\
- SRI2(WBSCL_TEST_CRC_RED, WBSCL, inst),\
- SRI2(WBSCL_TEST_CRC_GREEN, WBSCL, inst),\
- SRI2(WBSCL_TEST_CRC_BLUE, WBSCL, inst),\
- SRI2(WBSCL_BACKPRESSURE_CNT_EN, WBSCL, inst),\
- SRI2(WB_MCIF_BACKPRESSURE_CNT, WBSCL, inst),\
- SRI2(WBSCL_CLAMP_Y_RGB, WBSCL, inst),\
- SRI2(WBSCL_CLAMP_CBCR, WBSCL, inst),\
- SRI2(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL, inst),\
- SRI2(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL, inst),\
- SRI2(WBSCL_DEBUG, WBSCL, inst),\
- SRI2(WBSCL_TEST_DEBUG_INDEX, WBSCL, inst),\
- SRI2(WBSCL_TEST_DEBUG_DATA, WBSCL, inst),\
- SRI2(WB_DEBUG_CTRL, CNV, inst),\
- SRI2(WB_DBG_MODE, CNV, inst),\
- SRI2(WB_HW_DEBUG, CNV, inst),\
- SRI2(CNV_TEST_DEBUG_INDEX, CNV, inst),\
- SRI2(CNV_TEST_DEBUG_DATA, CNV, inst),\
- SRI2(WB_SOFT_RESET, CNV, inst),\
- SRI2(WB_WARM_UP_MODE_CTL1, CNV, inst),\
- SRI2(WB_WARM_UP_MODE_CTL2, CNV, inst)
+ SRI2_DWB(WB_ENABLE, CNV, inst),\
+ SRI2_DWB(WB_EC_CONFIG, CNV, inst),\
+ SRI2_DWB(CNV_MODE, CNV, inst),\
+ SRI2_DWB(CNV_WINDOW_START, CNV, inst),\
+ SRI2_DWB(CNV_WINDOW_SIZE, CNV, inst),\
+ SRI2_DWB(CNV_UPDATE, CNV, inst),\
+ SRI2_DWB(CNV_SOURCE_SIZE, CNV, inst),\
+ SRI2_DWB(CNV_TEST_CNTL, CNV, inst),\
+ SRI2_DWB(CNV_TEST_CRC_RED, CNV, inst),\
+ SRI2_DWB(CNV_TEST_CRC_GREEN, CNV, inst),\
+ SRI2_DWB(CNV_TEST_CRC_BLUE, CNV, inst),\
+ SRI2_DWB(WBSCL_COEF_RAM_SELECT, WBSCL, inst),\
+ SRI2_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL, inst),\
+ SRI2_DWB(WBSCL_MODE, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TAP_CONTROL, WBSCL, inst),\
+ SRI2_DWB(WBSCL_DEST_SIZE, WBSCL, inst),\
+ SRI2_DWB(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL, inst),\
+ SRI2_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL, inst),\
+ SRI2_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL, inst),\
+ SRI2_DWB(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL, inst),\
+ SRI2_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL, inst),\
+ SRI2_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL, inst),\
+ SRI2_DWB(WBSCL_ROUND_OFFSET, WBSCL, inst),\
+ SRI2_DWB(WBSCL_OVERFLOW_STATUS, WBSCL, inst),\
+ SRI2_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_CNTL, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_CRC_RED, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_CRC_GREEN, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_CRC_BLUE, WBSCL, inst),\
+ SRI2_DWB(WBSCL_BACKPRESSURE_CNT_EN, WBSCL, inst),\
+ SRI2_DWB(WB_MCIF_BACKPRESSURE_CNT, WBSCL, inst),\
+ SRI2_DWB(WBSCL_CLAMP_Y_RGB, WBSCL, inst),\
+ SRI2_DWB(WBSCL_CLAMP_CBCR, WBSCL, inst),\
+ SRI2_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL, inst),\
+ SRI2_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL, inst),\
+ SRI2_DWB(WBSCL_DEBUG, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_DEBUG_DATA, WBSCL, inst),\
+ SRI2_DWB(WB_DEBUG_CTRL, CNV, inst),\
+ SRI2_DWB(WB_DBG_MODE, CNV, inst),\
+ SRI2_DWB(WB_HW_DEBUG, CNV, inst),\
+ SRI2_DWB(CNV_TEST_DEBUG_INDEX, CNV, inst),\
+ SRI2_DWB(CNV_TEST_DEBUG_DATA, CNV, inst),\
+ SRI2_DWB(WB_SOFT_RESET, CNV, inst),\
+ SRI2_DWB(WB_WARM_UP_MODE_CTL1, CNV, inst),\
+ SRI2_DWB(WB_WARM_UP_MODE_CTL2, CNV, inst)
#define DWBC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \
- SF(WB_ENABLE, WB_ENABLE, mask_sh),\
- SF(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, mask_sh),\
- SF(WB_EC_CONFIG, DISPCLK_G_WB_GATE_DIS, mask_sh),\
- SF(WB_EC_CONFIG, DISPCLK_G_WBSCL_GATE_DIS, mask_sh),\
- SF(WB_EC_CONFIG, WB_TEST_CLK_SEL, mask_sh),\
- SF(WB_EC_CONFIG, WB_LB_LS_DIS, mask_sh),\
- SF(WB_EC_CONFIG, WB_LB_SD_DIS, mask_sh),\
- SF(WB_EC_CONFIG, WB_LUT_LS_DIS, mask_sh),\
- SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_MODE_SEL, mask_sh),\
- SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_DIS, mask_sh),\
- SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_FORCE, mask_sh),\
- SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_STATE, mask_sh),\
- SF(WB_EC_CONFIG, WB_RAM_PW_SAVE_MODE, mask_sh),\
- SF(WB_EC_CONFIG, WBSCL_LUT_MEM_PWR_STATE, mask_sh),\
- SF(CNV_MODE, CNV_OUT_BPC, mask_sh),\
- SF(CNV_MODE, CNV_FRAME_CAPTURE_RATE, mask_sh),\
- SF(CNV_MODE, CNV_WINDOW_CROP_EN, mask_sh),\
- SF(CNV_MODE, CNV_STEREO_TYPE, mask_sh),\
- SF(CNV_MODE, CNV_INTERLACED_MODE, mask_sh),\
- SF(CNV_MODE, CNV_EYE_SELECTION, mask_sh),\
- SF(CNV_MODE, CNV_STEREO_POLARITY, mask_sh),\
- SF(CNV_MODE, CNV_INTERLACED_FIELD_ORDER, mask_sh),\
- SF(CNV_MODE, CNV_STEREO_SPLIT, mask_sh),\
- SF(CNV_MODE, CNV_NEW_CONTENT, mask_sh),\
- SF(CNV_MODE, CNV_FRAME_CAPTURE_EN_CURRENT, mask_sh),\
- SF(CNV_MODE, CNV_FRAME_CAPTURE_EN, mask_sh),\
- SF(CNV_WINDOW_START, CNV_WINDOW_START_X, mask_sh),\
- SF(CNV_WINDOW_START, CNV_WINDOW_START_Y, mask_sh),\
- SF(CNV_WINDOW_SIZE, CNV_WINDOW_WIDTH, mask_sh),\
- SF(CNV_WINDOW_SIZE, CNV_WINDOW_HEIGHT, mask_sh),\
- SF(CNV_UPDATE, CNV_UPDATE_PENDING, mask_sh),\
- SF(CNV_UPDATE, CNV_UPDATE_TAKEN, mask_sh),\
- SF(CNV_UPDATE, CNV_UPDATE_LOCK, mask_sh),\
- SF(CNV_SOURCE_SIZE, CNV_SOURCE_WIDTH, mask_sh),\
- SF(CNV_SOURCE_SIZE, CNV_SOURCE_HEIGHT, mask_sh),\
- SF(CNV_TEST_CNTL, CNV_TEST_CRC_EN, mask_sh),\
- SF(CNV_TEST_CNTL, CNV_TEST_CRC_CONT_EN, mask_sh),\
- SF(CNV_TEST_CRC_RED, CNV_TEST_CRC_RED_MASK, mask_sh),\
- SF(CNV_TEST_CRC_RED, CNV_TEST_CRC_SIG_RED, mask_sh),\
- SF(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_GREEN_MASK, mask_sh),\
- SF(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_SIG_GREEN, mask_sh),\
- SF(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_BLUE_MASK, mask_sh),\
- SF(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_SIG_BLUE, mask_sh),\
- SF(WB_DEBUG_CTRL, WB_DEBUG_EN, mask_sh),\
- SF(WB_DEBUG_CTRL, WB_DEBUG_SEL, mask_sh),\
- SF(WB_DBG_MODE, WB_DBG_MODE_EN, mask_sh),\
- SF(WB_DBG_MODE, WB_DBG_DIN_FMT, mask_sh),\
- SF(WB_DBG_MODE, WB_DBG_36MODE, mask_sh),\
- SF(WB_DBG_MODE, WB_DBG_CMAP, mask_sh),\
- SF(WB_DBG_MODE, WB_DBG_PXLRATE_ERROR, mask_sh),\
- SF(WB_DBG_MODE, WB_DBG_SOURCE_WIDTH, mask_sh),\
- SF(WB_HW_DEBUG, WB_HW_DEBUG, mask_sh),\
- SF(WB_SOFT_RESET, WB_SOFT_RESET, mask_sh),\
- SF(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_INDEX, mask_sh),\
- SF(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_WRITE_EN, mask_sh),\
- SF(CNV_TEST_DEBUG_DATA, CNV_TEST_DEBUG_DATA, mask_sh),\
- SF(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
- SF(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_PHASE, mask_sh),\
- SF(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_FILTER_TYPE, mask_sh),\
- SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
- SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
- SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
- SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
- SF(WBSCL_MODE, WBSCL_MODE, mask_sh),\
- SF(WBSCL_MODE, WBSCL_OUT_BIT_DEPTH, mask_sh),\
- SF(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_Y_RGB, mask_sh),\
- SF(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_CBCR, mask_sh),\
- SF(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_Y_RGB, mask_sh),\
- SF(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_CBCR, mask_sh),\
- SF(WBSCL_DEST_SIZE, WBSCL_DEST_HEIGHT, mask_sh),\
- SF(WBSCL_DEST_SIZE, WBSCL_DEST_WIDTH, mask_sh),\
- SF(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, mask_sh),\
- SF(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_FRAC_Y_RGB, mask_sh),\
- SF(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_INT_Y_RGB, mask_sh),\
- SF(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_FRAC_CBCR, mask_sh),\
- SF(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_INT_CBCR, mask_sh),\
- SF(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, mask_sh),\
- SF(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_FRAC_Y_RGB, mask_sh),\
- SF(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_INT_Y_RGB, mask_sh),\
- SF(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_FRAC_CBCR, mask_sh),\
- SF(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_INT_CBCR, mask_sh),\
- SF(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_Y_RGB, mask_sh),\
- SF(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_CBCR, mask_sh),\
- SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_FLAG, mask_sh),\
- SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_ACK, mask_sh),\
- SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_MASK, mask_sh),\
- SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_STATUS, mask_sh),\
- SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_TYPE, mask_sh),\
- SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_FLAG, mask_sh),\
- SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_ACK, mask_sh),\
- SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_MASK, mask_sh),\
- SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_STATUS, mask_sh),\
- SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_TYPE, mask_sh),\
- SF(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_EN, mask_sh),\
- SF(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_CONT_EN, mask_sh),\
- SF(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_RED_MASK, mask_sh),\
- SF(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_SIG_RED, mask_sh),\
- SF(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_GREEN_MASK, mask_sh),\
- SF(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_SIG_GREEN, mask_sh),\
- SF(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_BLUE_MASK, mask_sh),\
- SF(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_SIG_BLUE, mask_sh),\
- SF(WBSCL_BACKPRESSURE_CNT_EN, WBSCL_BACKPRESSURE_CNT_EN, mask_sh),\
- SF(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_Y_MAX_BACKPRESSURE, mask_sh),\
- SF(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_C_MAX_BACKPRESSURE, mask_sh),\
- SF(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_UPPER_Y_RGB, mask_sh),\
- SF(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_LOWER_Y_RGB, mask_sh),\
- SF(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_UPPER_CBCR, mask_sh),\
- SF(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_LOWER_CBCR, mask_sh),\
- SF(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_OUTSIDE_PIX_STRATEGY, mask_sh),\
- SF(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_BLACK_COLOR_G_Y, mask_sh),\
- SF(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_B_CB, mask_sh),\
- SF(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_R_CR, mask_sh),\
- SF(WBSCL_DEBUG, WBSCL_DEBUG, mask_sh),\
- SF(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_INDEX, mask_sh),\
- SF(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_WRITE_EN, mask_sh),\
- SF(WBSCL_TEST_DEBUG_DATA, WBSCL_TEST_DEBUG_DATA, mask_sh),\
- SF(WB_WARM_UP_MODE_CTL1, WIDTH_WARMUP, mask_sh),\
- SF(WB_WARM_UP_MODE_CTL1, HEIGHT_WARMUP, mask_sh),\
- SF(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, mask_sh),\
- SF(WB_WARM_UP_MODE_CTL2, DATA_VALUE_WARMUP, mask_sh),\
- SF(WB_WARM_UP_MODE_CTL2, MODE_WARMUP, mask_sh),\
- SF(WB_WARM_UP_MODE_CTL2, DATA_DEPTH_WARMUP, mask_sh)
+ SF_DWB(WB_ENABLE, WB_ENABLE, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, DISPCLK_G_WB_GATE_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, DISPCLK_G_WBSCL_GATE_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_TEST_CLK_SEL, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_LB_LS_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_LB_SD_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_LUT_LS_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_MODE_SEL, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_FORCE, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_STATE, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_RAM_PW_SAVE_MODE, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LUT_MEM_PWR_STATE, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_OUT_BPC, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_RATE, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_WINDOW_CROP_EN, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_STEREO_TYPE, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_INTERLACED_MODE, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_EYE_SELECTION, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_STEREO_POLARITY, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_INTERLACED_FIELD_ORDER, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_STEREO_SPLIT, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_NEW_CONTENT, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_EN_CURRENT, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_EN, mask_sh),\
+ SF_DWB(CNV_WINDOW_START, CNV_WINDOW_START_X, mask_sh),\
+ SF_DWB(CNV_WINDOW_START, CNV_WINDOW_START_Y, mask_sh),\
+ SF_DWB(CNV_WINDOW_SIZE, CNV_WINDOW_WIDTH, mask_sh),\
+ SF_DWB(CNV_WINDOW_SIZE, CNV_WINDOW_HEIGHT, mask_sh),\
+ SF_DWB(CNV_UPDATE, CNV_UPDATE_PENDING, mask_sh),\
+ SF_DWB(CNV_UPDATE, CNV_UPDATE_TAKEN, mask_sh),\
+ SF_DWB(CNV_UPDATE, CNV_UPDATE_LOCK, mask_sh),\
+ SF_DWB(CNV_SOURCE_SIZE, CNV_SOURCE_WIDTH, mask_sh),\
+ SF_DWB(CNV_SOURCE_SIZE, CNV_SOURCE_HEIGHT, mask_sh),\
+ SF_DWB(CNV_TEST_CNTL, CNV_TEST_CRC_EN, mask_sh),\
+ SF_DWB(CNV_TEST_CNTL, CNV_TEST_CRC_CONT_EN, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_RED, CNV_TEST_CRC_RED_MASK, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_RED, CNV_TEST_CRC_SIG_RED, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_GREEN_MASK, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_SIG_GREEN, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_BLUE_MASK, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_SIG_BLUE, mask_sh),\
+ SF_DWB(WB_DEBUG_CTRL, WB_DEBUG_EN, mask_sh),\
+ SF_DWB(WB_DEBUG_CTRL, WB_DEBUG_SEL, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_MODE_EN, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_DIN_FMT, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_36MODE, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_CMAP, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_PXLRATE_ERROR, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_SOURCE_WIDTH, mask_sh),\
+ SF_DWB(WB_HW_DEBUG, WB_HW_DEBUG, mask_sh),\
+ SF_DWB(WB_SOFT_RESET, WB_SOFT_RESET, mask_sh),\
+ SF_DWB(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_INDEX, mask_sh),\
+ SF_DWB(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_WRITE_EN, mask_sh),\
+ SF_DWB(CNV_TEST_DEBUG_DATA, CNV_TEST_DEBUG_DATA, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_PHASE, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_FILTER_TYPE, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
+ SF_DWB(WBSCL_MODE, WBSCL_MODE, mask_sh),\
+ SF_DWB(WBSCL_MODE, WBSCL_OUT_BIT_DEPTH, mask_sh),\
+ SF_DWB(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_CBCR, mask_sh),\
+ SF_DWB(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_CBCR, mask_sh),\
+ SF_DWB(WBSCL_DEST_SIZE, WBSCL_DEST_HEIGHT, mask_sh),\
+ SF_DWB(WBSCL_DEST_SIZE, WBSCL_DEST_WIDTH, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_FRAC_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_INT_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_FRAC_CBCR, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_INT_CBCR, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_FRAC_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_INT_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_FRAC_CBCR, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_INT_CBCR, mask_sh),\
+ SF_DWB(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_CBCR, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_FLAG, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_ACK, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_MASK, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_STATUS, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_TYPE, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_FLAG, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_ACK, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_MASK, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_STATUS, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_TYPE, mask_sh),\
+ SF_DWB(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_EN, mask_sh),\
+ SF_DWB(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_CONT_EN, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_RED_MASK, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_SIG_RED, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_GREEN_MASK, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_SIG_GREEN, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_BLUE_MASK, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_SIG_BLUE, mask_sh),\
+ SF_DWB(WBSCL_BACKPRESSURE_CNT_EN, WBSCL_BACKPRESSURE_CNT_EN, mask_sh),\
+ SF_DWB(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_Y_MAX_BACKPRESSURE, mask_sh),\
+ SF_DWB(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_C_MAX_BACKPRESSURE, mask_sh),\
+ SF_DWB(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_UPPER_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_LOWER_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_UPPER_CBCR, mask_sh),\
+ SF_DWB(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_LOWER_CBCR, mask_sh),\
+ SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_OUTSIDE_PIX_STRATEGY, mask_sh),\
+ SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_BLACK_COLOR_G_Y, mask_sh),\
+ SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_B_CB, mask_sh),\
+ SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_R_CR, mask_sh),\
+ SF_DWB(WBSCL_DEBUG, WBSCL_DEBUG, mask_sh),\
+ SF_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_INDEX, mask_sh),\
+ SF_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_WRITE_EN, mask_sh),\
+ SF_DWB(WBSCL_TEST_DEBUG_DATA, WBSCL_TEST_DEBUG_DATA, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL1, WIDTH_WARMUP, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL1, HEIGHT_WARMUP, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL2, DATA_VALUE_WARMUP, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL2, MODE_WARMUP, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL2, DATA_DEPTH_WARMUP, mask_sh)
#define DWBC_REG_FIELD_LIST_DCN2_0(type) \
type WB_ENABLE;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index f3334f513eb4..3f3d4daa6294 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1310,6 +1310,19 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
{
new_pipe->update_flags.raw = 0;
+ /* If non-phantom pipe is being transitioned to a phantom pipe,
+ * set disable and return immediately. This is because the pipe
+ * that was previously in use must be fully disabled before we
+ * can "enable" it as a phantom pipe (since the OTG will certainly
+ * be different). The post_unlock sequence will set the correct
+ * update flags to enable the phantom pipe.
+ */
+ if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom &&
+ new_pipe->plane_state && new_pipe->plane_state->is_phantom) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
@@ -1663,6 +1676,7 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_width);
if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
}
@@ -1833,6 +1847,17 @@ void dcn20_program_front_end_for_ctx(
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
+
+ /* when dynamic ODM is active, pipes must be reconfigured when all planes are
+ * disabled, as some transitions will leave software and hardware state
+ * mismatched.
+ */
+ if (dc->debug.enable_single_display_2to1_odm_policy &&
+ pipe->stream &&
+ pipe->update_flags.bits.disable &&
+ !pipe->prev_odm_pipe &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
}
}
@@ -1872,26 +1897,6 @@ void dcn20_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- /* If an active, non-phantom pipe is being transitioned into a phantom
- * pipe, wait for the double buffer update to complete first before we do
- * phantom pipe programming (HUBP_VTG_SEL updates right away so that can
- * cause issues).
- */
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
- old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
- old_pipe->stream_res.tg->funcs->wait_for_state(
- old_pipe->stream_res.tg,
- CRTC_STATE_VBLANK);
- old_pipe->stream_res.tg->funcs->wait_for_state(
- old_pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
/* Program phantom pipe here to prevent a frame of underflow in the MPO transition
@@ -1901,6 +1906,11 @@ void dcn20_post_unlock_program_front_end(
*/
while (pipe) {
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ /* When turning on the phantom pipe we want to run through the
+ * entire enable sequence, so apply all the "enable" flags.
+ */
+ if (dc->hwss.apply_update_flags_for_phantom)
+ dc->hwss.apply_update_flags_for_phantom(pipe);
if (dc->hwss.update_phantom_vp_position)
dc->hwss.update_phantom_vp_position(dc, context, pipe);
dcn20_program_pipe(dc, pipe, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
index 7bcee5894d2e..5ab32aa51e13 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
@@ -29,13 +29,6 @@
#define TO_DCN20_MMHUBBUB(mcif_wb_base) \
container_of(mcif_wb_base, struct dcn20_mmhubbub, base)
-/* DCN */
-#define BASE_INNER(seg) \
- DCE_BASE__INST0_SEG ## seg
-
-#define BASE(seg) \
- BASE_INNER(seg)
-
#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \
SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index d0199ec045cb..8a0dd0d7134b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -124,8 +124,6 @@ enum dcn20_clk_src_array_id {
* macros to expend register list macro defined in HW object header file */
/* DCN */
-/* TODO awful hack. fixup dcn20_dwb.h */
-#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
@@ -138,6 +136,15 @@ enum dcn20_clk_src_array_id {
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define SRI2_DWB(reg_name, block, id)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+#define SF_DWB(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
index f1ef46e8da5b..e7a1b7fa2cce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
@@ -28,12 +28,6 @@
#include "vmid.h"
-#define BASE_INNER(seg) \
- DCE_BASE__INST0_SEG ## seg
-
-#define BASE(seg) \
- BASE_INNER(seg)
-
#define DCN20_VMID_REG_LIST(id)\
SRI(CNTL, DCN_VM_CONTEXT, id),\
SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index ce6c70e25703..fbcf0afeae0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -94,8 +94,6 @@
* macros to expend register list macro defined in HW object header file */
/* DCN */
-/* TODO awful hack. fixup dcn20_dwb.h */
-#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
index 1010930cf071..fc00ec0a0881 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
@@ -27,21 +27,6 @@
#define TO_DCN30_DWBC(dwbc_base) \
container_of(dwbc_base, struct dcn30_dwbc, base)
-/* DCN */
-#define BASE_INNER(seg) \
- DCE_BASE__INST0_SEG ## seg
-
-#define BASE(seg) \
- BASE_INNER(seg)
-
-#define SF_DWB(reg_name, block, id, field_name, post_fix)\
- .field_name = block ## id ## _ ## reg_name ## __ ## field_name ## post_fix
-
- /* set field name */
-#define SF_DWB2(reg_name, block, id, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
-
#define DWBC_COMMON_REG_LIST_DCN30(inst) \
SR(DWB_ENABLE_CLK_CTRL),\
SR(DWB_MEM_PWR_CTRL),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
index 7446e54bf5aa..376620a8f02f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
@@ -31,13 +31,6 @@
#define TO_DCN30_MMHUBBUB(mcif_wb_base) \
container_of(mcif_wb_base, struct dcn30_mmhubbub, base)
-/* DCN */
-#define BASE_INNER(seg) \
- DCE_BASE__INST0_SEG ## seg
-
-#define BASE(seg) \
- BASE_INNER(seg)
-
#define MCIF_WB_COMMON_REG_LIST_DCN3_0(inst) \
SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
SRI(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index af4fe695535e..c18c52a60100 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -108,8 +108,6 @@ enum dcn30_clk_src_array_id {
*/
/* DCN */
-/* TODO awful hack. fixup dcn20_dwb.h */
-#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
@@ -142,6 +140,9 @@ enum dcn30_clk_src_array_id {
.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## temp_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
@@ -1328,6 +1329,7 @@ static struct clock_source *dcn30_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index f04595b750ab..480145f09246 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -107,8 +107,6 @@ enum dcn301_clk_src_array_id {
*/
/* DCN */
-/* TODO awful hack. fixup dcn20_dwb.h */
-#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
@@ -146,6 +144,9 @@ enum dcn301_clk_src_array_id {
.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## temp_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
@@ -1288,6 +1289,7 @@ static struct clock_source *dcn301_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index d3945876aced..7d11c2a43cbe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -183,7 +183,6 @@ static const struct dc_plane_cap plane_cap = {
mm ## reg_name
/* DCN */
-#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
@@ -216,6 +215,9 @@ static const struct dc_plane_cap plane_cap = {
.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## temp_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
@@ -464,6 +466,7 @@ static struct clock_source *dcn302_clock_source_create(struct dc_context *ctx, s
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 7e7f18bef098..92393b04cc44 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -162,7 +162,6 @@ static const struct dc_plane_cap plane_cap = {
mm ## reg_name
/* DCN */
-#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
@@ -195,6 +194,9 @@ static const struct dc_plane_cap plane_cap = {
.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## temp_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
@@ -431,6 +433,7 @@ static struct clock_source *dcn303_clock_source_create(struct dc_context *ctx, s
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
index 84e1486f3d51..39a57bcd7866 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
@@ -87,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_init = hubp3_init,
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 8f5e89cb9d3e..3ca517dcc82d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -119,8 +119,6 @@ enum dcn31_clk_src_array_id {
*/
/* DCN */
-/* TODO awful hack. fixup dcn20_dwb.h */
-#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
@@ -153,6 +151,9 @@ enum dcn31_clk_src_array_id {
.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## temp_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
@@ -1629,6 +1630,7 @@ static struct clock_source *dcn31_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -1638,6 +1640,31 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
+int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ uint32_t pipe_cnt;
+ int i;
+
+ dc_assert_fp_enabled();
+
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ for (i = 0; i < pipe_cnt; i++) {
+ pipes[i].pipe.src.gpuvm = 1;
+ if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE) {
+ //pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+ pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled;
+ } else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE)
+ pipes[i].pipe.src.hostvm = false;
+ else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE)
+ pipes[i].pipe.src.hostvm = true;
+ }
+ return pipe_cnt;
+}
+
int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1649,7 +1676,7 @@ int dcn31_populate_dml_pipes_from_context(
bool upscaled = false;
DC_FP_START();
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1679,12 +1706,6 @@ int dcn31_populate_dml_pipes_from_context(
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
DC_FP_END();
- if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE)
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
- else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE)
- pipes[pipe_cnt].pipe.src.hostvm = false;
- else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE)
- pipes[pipe_cnt].pipe.src.hostvm = true;
if (pipes[pipe_cnt].dout.dsc_enable) {
switch (timing->display_color_depth) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 7e773bf7b895..38842f938bed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -49,18 +49,30 @@
#define CTX \
enc1->base.ctx
+static void enc314_reset_fifo(struct stream_encoder *enc, bool reset)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t reset_val = reset ? 1 : 0;
+ uint32_t is_symclk_on;
+
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val);
+ REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on);
+
+ if (is_symclk_on)
+ REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000);
+ else
+ udelay(10);
+}
static void enc314_enable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- /* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
- REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
- REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
- REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
+
+ enc314_reset_fifo(enc, true);
+ enc314_reset_fifo(enc, false);
+
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 47eb162f1a75..f246aab23050 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -237,11 +237,10 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
- .configure_crc = optc2_configure_crc,
+ .configure_crc = optc1_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
- .set_odm_bypass = optc3_set_odm_bypass,
.set_odm_combine = optc314_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 3b3e093e9447..4fffc7bb8088 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -184,6 +184,9 @@ enum dcn31_clk_src_array_id {
.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## temp_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 96a3d41febff..7887078c5f64 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -151,8 +151,6 @@ enum dcn31_clk_src_array_id {
*/
/* DCN */
-/* TODO awful hack. fixup dcn20_dwb.h */
-#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
@@ -185,6 +183,9 @@ enum dcn31_clk_src_array_id {
.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## temp_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
@@ -1627,6 +1628,7 @@ static struct clock_source *dcn31_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -1647,7 +1649,7 @@ static int dcn315_populate_dml_pipes_from_context(
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1666,7 +1668,6 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.immediate_flip = true;
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
@@ -1707,7 +1708,9 @@ static int dcn315_populate_dml_pipes_from_context(
dc->config.enable_4to1MPC = true;
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB;
- } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ } else if (!is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 5120
+ && pipe->stream->timing.pix_clk_100hz < dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)) {
/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index 2f643cdaf59f..b4d5076e124c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -142,8 +142,6 @@ enum dcn31_clk_src_array_id {
*/
/* DCN */
-/* TODO awful hack. fixup dcn20_dwb.h */
-#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
@@ -176,6 +174,9 @@ enum dcn31_clk_src_array_id {
.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## temp_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define DCCG_SRII(reg_name, block, id)\
.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
@@ -1650,7 +1651,7 @@ static int dcn316_populate_dml_pipes_from_context(
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1669,7 +1670,6 @@ static int dcn316_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.immediate_flip = true;
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index a88a71460521..5947c2cb0f30 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -72,6 +72,23 @@ static void dcn32_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
}
+void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ uint32_t request_limit = 3 * memory_channel_count * words_per_channel / 4;
+
+ ASSERT((request_limit & (~0xFFF)) == 0); //field is only 24 bits long
+ ASSERT(request_limit > 0); //field is only 24 bits long
+
+ if (request_limit > 0xFFF)
+ request_limit = 0xFFF;
+
+ if (request_limit > 0)
+ REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit);
+}
+
+
void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -949,6 +966,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.init_crb = dcn32_init_crb,
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
+ .set_request_limit = hubbub32_set_request_limit
};
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index cda94e0e31bf..786f9ce07f92 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -82,7 +82,8 @@
SR(DCN_VM_FAULT_ADDR_MSB),\
SR(DCN_VM_FAULT_ADDR_LSB),\
SR(DCN_VM_FAULT_CNTL),\
- SR(DCN_VM_FAULT_STATUS)
+ SR(DCN_VM_FAULT_STATUS),\
+ SR(SDPIF_REQUEST_RATE_LIMIT)
#define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
@@ -159,7 +160,8 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
- HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\
+ HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh)
bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
@@ -200,4 +202,6 @@ void hubbub32_construct(struct dcn20_hubbub *hubbub2,
int pixel_chunk_size_kb,
int config_return_buffer_size_kb);
+void hubbub32_set_request_limit(struct hubbub *hubbub, int umc_count, int words_per_umc);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 5e0018efe055..763311ffb967 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -283,8 +283,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
using the max for calculation */
if (hubp->curs_attr.width > 0) {
- // Round cursor width to next multiple of 64
- cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
+ cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
switch (pipe->stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO:
@@ -309,9 +308,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
cursor_size > 16384) {
/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
*/
- cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp +
- DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) *
- DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2;
+ cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
+ DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) /
+ dc->caps.cache_line_size + 2;
}
break;
}
@@ -727,10 +726,7 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
- //Round cursor width up to next multiple of 64
- int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
- int cursor_height = hubp->curs_attr.height;
- int cursor_size = cursor_width * cursor_height;
+ int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
switch (hubp->curs_attr.color_format) {
case CURSOR_MODE_MONO:
@@ -984,6 +980,9 @@ void dcn32_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
+ if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
+ dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
+
// Get DMCUB capabilities
if (dc->ctx->dmub_srv) {
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
@@ -1367,6 +1366,33 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
}
}
+/* Treat the phantom pipe as if it needs to be fully enabled.
+ * If the pipe was previously in use but not phantom, it would
+ * have been disabled earlier in the sequence so we need to run
+ * the full enable sequence.
+ */
+void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe)
+{
+ phantom_pipe->update_flags.raw = 0;
+ if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (phantom_pipe->stream && phantom_pipe->plane_state) {
+ phantom_pipe->update_flags.bits.enable = 1;
+ phantom_pipe->update_flags.bits.mpcc = 1;
+ phantom_pipe->update_flags.bits.dppclk = 1;
+ phantom_pipe->update_flags.bits.hubp_interdependent = 1;
+ phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ phantom_pipe->update_flags.bits.gamut_remap = 1;
+ phantom_pipe->update_flags.bits.scaler = 1;
+ phantom_pipe->update_flags.bits.viewport = 1;
+ phantom_pipe->update_flags.bits.det_size = 1;
+ if (!phantom_pipe->top_pipe && !phantom_pipe->prev_odm_pipe) {
+ phantom_pipe->update_flags.bits.odm = 1;
+ phantom_pipe->update_flags.bits.global_sync = 1;
+ }
+ }
+ }
+}
+
bool dcn32_dsc_pg_status(
struct dce_hwseq *hws,
unsigned int dsc_inst)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index ac3657a5b9ea..7de36529cf99 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -92,6 +92,8 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
struct dc_state *context,
struct pipe_ctx *phantom_pipe);
+void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe);
+
bool dcn32_dsc_pg_status(
struct dce_hwseq *hws,
unsigned int dsc_inst);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index 45a949ba6f3f..dc4649458567 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
.update_dsc_pg = dcn32_update_dsc_pg,
+ .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index 2b33eeb213e2..2ee798965bc2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -167,6 +167,13 @@ static void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000);
}
+static void optc32_disable_phantom_otg(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
+}
+
static void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
@@ -260,6 +267,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.enable_crtc = optc32_enable_crtc,
.disable_crtc = optc32_disable_crtc,
.phantom_crtc_post_enable = optc32_phantom_crtc_post_enable,
+ .disable_phantom_crtc = optc32_disable_phantom_otg,
/* used by enable_timing_synchronization. Not need for FPGA */
.is_counter_moving = optc1_is_counter_moving,
.get_position = optc1_get_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 4ba9a8662185..cdeff6de725d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -106,8 +106,6 @@ enum dcn32_clk_src_array_id {
*/
/* DCN */
-/* TODO awful hack. fixup dcn20_dwb.h */
-#undef BASE_INNER
#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define BASE(seg) BASE_INNER(seg)
@@ -167,6 +165,9 @@ enum dcn32_clk_src_array_id {
REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## temp_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define DCCG_SRII(reg_name, block, id)\
REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
@@ -724,6 +725,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_dp_dig_pixel_rate_div_policy = 1,
.allow_sw_cursor_fallback = false,
.alloc_extra_way_for_cursor = true,
+ .min_prefetch_in_strobe_ns = 60000, // 60us
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -829,6 +831,7 @@ static struct clock_source *dcn32_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -1678,7 +1681,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
/* Shadow pipe has small viewport. */
phantom_plane->clip_rect.y = 0;
- phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
+ phantom_plane->clip_rect.height = phantom_stream->src.height;
phantom_plane->is_phantom = true;
@@ -2116,6 +2119,7 @@ static bool dcn32_resource_construct(
dc->caps.cache_num_ways = 16;
dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
dc->caps.subvp_fw_processing_delay_us = 15;
+ dc->caps.subvp_drr_max_vblank_margin_us = 40;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
@@ -2411,6 +2415,9 @@ static bool dcn32_resource_construct(
pool->base.oem_device = NULL;
}
+ if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
+ dc->config.sdpif_request_limit_words_per_umc = 16;
+
DC_FP_END();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index f76120e67c16..f6bc9bd5da31 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -1244,7 +1244,8 @@ void dcn32_restore_mall_state(struct dc *dc,
SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C), \
SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D), \
SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
- SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS) \
+ SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS), \
+ SR(SDPIF_REQUEST_RATE_LIMIT) \
)
/* DCCG */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 61087f2385a9..6c79a47b6336 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -109,8 +109,6 @@ enum dcn321_clk_src_array_id {
*/
/* DCN */
-/* TODO awful hack. fixup dcn20_dwb.h */
-#undef BASE_INNER
#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define BASE(seg) BASE_INNER(seg)
@@ -174,6 +172,9 @@ enum dcn321_clk_src_array_id {
REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
#define VUPDATE_SRII(reg_name, block, id)\
REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
reg ## reg_name ## _ ## block ## id
@@ -722,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_dp_dig_pixel_rate_div_policy = 1,
.allow_sw_cursor_fallback = false,
.alloc_extra_way_for_cursor = true,
+ .min_prefetch_in_strobe_ns = 60000, // 60us
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -828,6 +830,7 @@ static struct clock_source *dcn321_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -1703,6 +1706,7 @@ static bool dcn321_resource_construct(
dc->caps.cache_num_ways = 16;
dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
dc->caps.subvp_fw_processing_delay_us = 15;
+ dc->caps.subvp_drr_max_vblank_margin_us = 40;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index e3e5c39895a3..af1c50ed905a 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -116,6 +116,11 @@ bool dm_helpers_dp_mst_start_top_mgr(
bool dm_helpers_dp_mst_stop_top_mgr(
struct dc_context *ctx,
struct dc_link *link);
+
+void dm_helpers_dp_mst_update_branch_bandwidth(
+ struct dc_context *ctx,
+ struct dc_link *link);
+
/**
* OS specific aux read callback.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 602e885ed52c..75dbb7ee193b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1296,6 +1296,8 @@ int dcn20_populate_dml_pipes_from_context(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
pipes[pipe_cnt].dout.output_type = dm_dp;
+ if (is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i]))
+ pipes[pipe_cnt].dout.output_type = dm_dp2p0;
break;
case SIGNAL_TYPE_EDP:
pipes[pipe_cnt].dout.output_type = dm_edp;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 7dd0845d1bd9..12b23bd50e19 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp(
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx, active_dpp_count = 0;
+ int i, pipe_idx, active_hubp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@@ -529,7 +529,7 @@ void dcn31_calculate_wm_and_dlg_fp(
continue;
if (context->res_ctx.pipe_ctx[i].plane_state)
- active_dpp_count++;
+ active_hubp_count++;
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -547,9 +547,19 @@ void dcn31_calculate_wm_and_dlg_fp(
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
- /* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
+ /* For 31x apu pstate change is only supported if possible in vactive*/
context->bw_ctx.bw.dcn.clk.p_state_change_support =
- context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive;
+ /* If DCN isn't making memory requests we can allow pstate change and lower clocks */
+ if (!active_hubp_count) {
+ context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ }
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -797,3 +807,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
else
dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
}
+
+int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
+{
+ return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index fd58b2561ec9..687d3522cc33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -46,5 +46,10 @@ void dcn31_calculate_wm_and_dlg_fp(
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc);
+int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
#endif /* __DCN31_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 45ab0ce50860..4e45c6d9ecdc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1056,14 +1056,12 @@ static bool CalculatePrefetchSchedule(
prefetch_bw_pr = dml_min(1, myPipe->VRatio) * prefetch_bw_pr;
max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
- prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
Tsw_oto = Lsw_oto * LineTime;
- prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC) / Tsw_oto;
#ifdef __DML_VBA_DEBUG__
dml_print("DML: HTotal: %d\n", myPipe->HTotal);
@@ -5362,6 +5360,58 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ModeSupport[i][j] = true;
} else {
v->ModeSupport[i][j] = false;
+#ifdef __DML_VBA_DEBUG__
+ if (v->ScaleRatioAndTapsSupport == false)
+ dml_print("DML SUPPORT: ScaleRatioAndTapsSupport failed");
+ if (v->SourceFormatPixelAndScanSupport == false)
+ dml_print("DML SUPPORT: SourceFormatPixelAndScanSupport failed");
+ if (v->ViewportSizeSupport[i][j] == false)
+ dml_print("DML SUPPORT: ViewportSizeSupport failed");
+ if (v->LinkCapacitySupport[i] == false)
+ dml_print("DML SUPPORT: LinkCapacitySupport failed");
+ if (v->ODMCombine4To1SupportCheckOK[i] == false)
+ dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
+ if (v->NotEnoughDSCUnits[i] == true)
+ dml_print("DML SUPPORT: NotEnoughDSCUnits");
+ if (v->DTBCLKRequiredMoreThanSupported[i] == true)
+ dml_print("DML SUPPORT: DTBCLKRequiredMoreThanSupported");
+ if (v->ROBSupport[i][j] == false)
+ dml_print("DML SUPPORT: ROBSupport failed");
+ if (v->DISPCLK_DPPCLK_Support[i][j] == false)
+ dml_print("DML SUPPORT: DISPCLK_DPPCLK_Support failed");
+ if (v->TotalAvailablePipesSupport[i][j] == false)
+ dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
+ if (EnoughWritebackUnits == false)
+ dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
+ if (v->WritebackLatencySupport == false)
+ dml_print("DML SUPPORT: WritebackLatencySupport failed");
+ if (v->WritebackScaleRatioAndTapsSupport == false)
+ dml_print("DML SUPPORT: DSC422NativeNotSupported ");
+ if (v->CursorSupport == false)
+ dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
+ if (v->PitchSupport == false)
+ dml_print("DML SUPPORT: PitchSupport failed");
+ if (ViewportExceedsSurface == true)
+ dml_print("DML SUPPORT: ViewportExceedsSurface failed");
+ if (v->PrefetchSupported[i][j] == false)
+ dml_print("DML SUPPORT: PrefetchSupported failed");
+ if (v->DynamicMetadataSupported[i][j] == false)
+ dml_print("DML SUPPORT: DSC422NativeNotSupported failed");
+ if (v->TotalVerticalActiveBandwidthSupport[i][j] == false)
+ dml_print("DML SUPPORT: TotalVerticalActiveBandwidthSupport failed");
+ if (v->VRatioInPrefetchSupported[i][j] == false)
+ dml_print("DML SUPPORT: VRatioInPrefetchSupported failed");
+ if (v->PTEBufferSizeNotExceeded[i][j] == false)
+ dml_print("DML SUPPORT: PTEBufferSizeNotExceeded failed");
+ if (v->NonsupportedDSCInputBPC == true)
+ dml_print("DML SUPPORT: NonsupportedDSCInputBPC failed");
+ if (!((v->HostVMEnable == false
+ && v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)
+ || v->ImmediateFlipSupportedForState[i][j] == true))
+ dml_print("DML SUPPORT: ImmediateFlipRequirement failed");
+ if (FMTBufferExceeded == true)
+ dml_print("DML SUPPORT: FMTBufferExceeded failed");
+#endif
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index cf420ad2b8dc..1dd51c4b6804 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -29,6 +29,7 @@
#include "dcn31/dcn31_hubbub.h"
#include "dcn314_fpu.h"
#include "dml/dcn20/dcn20_fpu.h"
+#include "dml/dcn31/dcn31_fpu.h"
#include "dml/display_mode_vba.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
@@ -146,8 +147,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.sr_exit_z8_time_us = 442.0,
.sr_enter_plus_exit_z8_time_us = 560.0,
.writeback_latency_us = 12.0,
@@ -264,11 +265,8 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
}
- if ((int)(dcn3_14_soc.dram_clock_change_latency_us * 1000)
- != dc->debug.dram_clock_change_latency_ns
- && dc->debug.dram_clock_change_latency_ns) {
- dcn3_14_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
- }
+ dcn20_patch_bounding_box(dc, &dcn3_14_soc);
+
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
else
@@ -291,7 +289,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
dc_assert_fp_enabled();
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
@@ -318,8 +316,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipes[pipe_cnt].pipe.src.immediate_flip = true;
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
- pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 53e3e7364ec6..41f0b4c1c72f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -1078,14 +1078,12 @@ static bool CalculatePrefetchSchedule(
prefetch_bw_pr = dml_min(1, myPipe->VRatio) * prefetch_bw_pr;
max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
- prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
Tsw_oto = Lsw_oto * LineTime;
- prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC) / Tsw_oto;
#ifdef __DML_VBA_DEBUG__
dml_print("DML: HTotal: %d\n", myPipe->HTotal);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 0d704e302d03..97b333b230d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -531,9 +531,12 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
unsigned int i, pipe_idx;
struct pipe_ctx *pipe;
uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
+ unsigned int num_dpp;
unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel;
unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel];
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ struct dc_stream_state *main_stream = ref_pipe->stream;
dc_assert_fp_enabled();
@@ -569,13 +572,23 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
+ // W/A for DCC corruption with certain high resolution timings.
+ // Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive.
+ num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]];
+ phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0;
+
// For backporch of phantom pipe, use vstartup of the main pipe
phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
phantom_stream->dst.y = 0;
phantom_stream->dst.height = phantom_vactive;
+ /* When scaling, DML provides the end to end required number of lines for MALL.
+ * dst.height is always correct for this case, but src.height is not which causes a
+ * delta between main and phantom pipe scaling outputs. Need to adjust src.height on
+ * phantom for this case.
+ */
phantom_stream->src.y = 0;
- phantom_stream->src.height = phantom_vactive;
+ phantom_stream->src.height = (double)phantom_vactive * (double)main_stream->src.height / (double)main_stream->dst.height;
phantom_stream->timing.v_addressable = phantom_vactive;
phantom_stream->timing.v_front_porch = 1;
@@ -1228,7 +1241,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt, int vlevel)
{
- int i, pipe_idx;
+ int i, pipe_idx, active_hubp_count = 0;
bool usr_retraining_support = false;
bool unbounded_req_enabled = false;
@@ -1273,6 +1286,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ active_hubp_count++;
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
@@ -1298,6 +1313,16 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
pipe_idx++;
}
+ /* If DCN isn't making memory requests we can allow pstate change and lower clocks */
+ if (!active_hubp_count) {
+ context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ }
/*save a original dppclock copy*/
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
@@ -1729,6 +1754,9 @@ bool dcn32_internal_validate_bw(struct dc *dc,
}
if (repopulate_pipes) {
+ int flag_max_mpc_comb = vba->maxMpcComb;
+ int flag_vlevel = vlevel;
+
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
@@ -1742,6 +1770,22 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (vlevel == context->bw_ctx.dml.soc.num_states) {
/* failed after DET size changes */
goto validate_fail;
+ } else if (flag_max_mpc_comb == 0 &&
+ flag_max_mpc_comb != context->bw_ctx.dml.vba.maxMpcComb) {
+ /* check the context constructed with pipe split flags is still valid*/
+ bool flags_valid = false;
+ for (int i = flag_vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
+ if (vba->ModeSupport[i][flag_max_mpc_comb]) {
+ vba->maxMpcComb = flag_max_mpc_comb;
+ vba->VoltageLevel = i;
+ vlevel = i;
+ flags_valid = true;
+ }
+ }
+
+ /* this should never happen */
+ if (!flags_valid)
+ goto validate_fail;
}
}
*vlevel_out = vlevel;
@@ -1800,6 +1844,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so
+ * prefetch is scheduled correctly to account for dummy pstate.
+ */
+ if (dummy_latency_index == 0)
+ context->bw_ctx.dml.soc.fclk_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
@@ -1987,6 +2037,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && dummy_latency_index == 0)
+ context->bw_ctx.dml.soc.fclk_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+
dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
if (!pstate_en)
@@ -1994,8 +2048,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context);
+ if (dummy_latency_index == 0)
+ context->bw_ctx.dml.soc.fclk_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
+ }
}
static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
@@ -2351,6 +2409,8 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
/* DML DSC delay factor workaround */
dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
+ dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0;
+
/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 3d184679f129..e5c8f6a71b5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -718,6 +718,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
do {
MaxTotalRDBandwidth = 0;
+ DestinationLineTimesForPrefetchLessThan2 = false;
+ VRatioPrefetchMoreThanMax = false;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Start loop: VStartup = %d\n", __func__, mode_lib->vba.VStartupLines);
#endif
@@ -786,6 +788,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
+ v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
@@ -2282,7 +2286,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&& (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0
|| mode_lib->vba.Output[k] == dm_edp
|| mode_lib->vba.Output[k] == dm_hdmi)
- && mode_lib->vba.OutputBppPerState[i][k] == 0) {
+ && mode_lib->vba.OutputBppPerState[i][k] == 0 && (mode_lib->vba.UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe)) {
mode_lib->vba.LinkCapacitySupport[i] = false;
}
}
@@ -3192,6 +3196,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.FCLKChangeLatency, mode_lib->vba.UrgLatency[i],
mode_lib->vba.SREnterPlusExitTime);
+ memset(&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull, 0, sizeof(DmlPipe));
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dppclk = mode_lib->vba.RequiredDPPCLK[i][j][k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dispclk = mode_lib->vba.RequiredDISPCLK[i][j];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
@@ -3244,6 +3249,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k], v->TWait,
+ v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
index c62e0991358b..c8b28c83ddf4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
@@ -46,9 +46,14 @@
// Prefetch schedule max vratio
#define __DML_MAX_VRATIO_PRE__ 4.0
+#define __DML_VBA_MAX_DST_Y_PRE__ 63.75
+
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define MEM_STROBE_FREQ_MHZ 1600
+#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
+
struct display_mode_lib;
void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 968924c491c1..debe46b24a3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -3417,6 +3417,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
+ double TPreReq,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
@@ -3667,6 +3668,7 @@ bool dml32_CalculatePrefetchSchedule(
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, __DML_VBA_MAX_DST_Y_PRE__);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
@@ -3726,7 +3728,8 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (dst_y_prefetch_equ > 1) {
+ if (dst_y_prefetch_equ > 1 &&
+ (Tpre_rounded >= TPreReq || dst_y_prefetch_equ == __DML_VBA_MAX_DST_Y_PRE__)) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
@@ -3872,7 +3875,11 @@ bool dml32_CalculatePrefetchSchedule(
}
if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
- *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ if (dst_y_prefetch_oto * LineTime < TPreReq) {
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ } else {
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ }
TimeForFetchingMetaPTE = Tvm_oto;
TimeForFetchingRowInVBlank = Tr0_oto;
*PrefetchBandwidth = prefetch_bw_oto;
@@ -4397,7 +4404,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
+ - (1.0 - 1.0 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
/ v->PixelClock[k] / v->VRatio[k];
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 2c3827546ac7..3989c2a28fae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -30,7 +30,7 @@
#include "os_types.h"
#include "../dc_features.h"
#include "../display_mode_structs.h"
-#include "dml/display_mode_vba.h"
+#include "../display_mode_vba.h"
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
@@ -743,6 +743,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
+ double TPreReq,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index ec0486efab14..432b4ecd01a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -544,6 +544,8 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
/* DML DSC delay factor workaround */
dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
+ dcn3_21_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0;
+
/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
dcn3_21_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index d7be01ac0751..64d602e6412f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -367,6 +367,7 @@ struct _vcs_dpi_ip_params_st {
/* DM workarounds */
double dsc_delay_factor_wa; // TODO: Remove after implementing root cause fix
+ double min_prefetch_in_strobe_us;
};
struct _vcs_dpi_display_xfc_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
index d635b73af46f..0ea52ba5ac82 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
@@ -108,6 +108,13 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
{
+ // add a dummy entry for cases no such port
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ },
+ {
DDC_GPIO_VGA_REG_LIST(DATA),
.ddc_setup = 0,
.phy_aux_cntl = 0,
@@ -122,6 +129,13 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
{
+ // add a dummy entry for cases no such port
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ },
+ {
DDC_GPIO_VGA_REG_LIST(CLK),
.ddc_setup = 0,
.phy_aux_cntl = 0,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 6fd38cdd68c0..525bc8881950 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -94,11 +94,14 @@ static enum gpio_result set_config(
* is required for detection of AUX mode */
if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) {
if (!ddc_data_pd_en || !ddc_clk_pd_en) {
-
- REG_SET_2(gpio.MASK_reg, regval,
+ if (hw_gpio->base.en == GPIO_DDC_LINE_DDC_VGA) {
+ // bit 4 of mask has different usage in some cases
+ REG_SET(gpio.MASK_reg, regval, DC_GPIO_DDC1DATA_PD_EN, 1);
+ } else {
+ REG_SET_2(gpio.MASK_reg, regval,
DC_GPIO_DDC1DATA_PD_EN, 1,
DC_GPIO_DDC1CLK_PD_EN, 1);
-
+ }
if (config_data->type ==
GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
msleep(3);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 58f758fcbce1..f2e1fcb668fb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -186,6 +186,7 @@ struct hubbub_funcs {
void (*program_compbuf_size)(struct hubbub *hubbub, unsigned compbuf_size_kb, bool safe_to_increase);
void (*init_crb)(struct hubbub *hubbub);
void (*force_usr_retraining_allow)(struct hubbub *hubbub, bool allow);
+ void (*set_request_limit)(struct hubbub *hubbub, int memory_channel_count, int words_per_channel);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 65f18f9dad34..0e42e721dd15 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -185,6 +185,7 @@ struct timing_generator_funcs {
#ifdef CONFIG_DRM_AMD_DC_DCN
void (*phantom_crtc_post_enable)(struct timing_generator *tg);
#endif
+ void (*disable_phantom_crtc)(struct timing_generator *tg);
bool (*immediate_disable_crtc)(struct timing_generator *tg);
bool (*is_counter_moving)(struct timing_generator *tg);
void (*get_position)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index d04b68dad413..c43523f9ff6d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -263,6 +263,7 @@ struct hw_sequencer_funcs {
void (*update_phantom_vp_position)(struct dc *dc,
struct dc_state *context,
struct pipe_ctx *phantom_pipe);
+ void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 45f99351a0ab..5f4f6dd79511 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -28,20 +28,19 @@
#include "include/logger_interface.h"
#include "../dce110/irq_service_dce110.h"
+#include "irq_service_dcn201.h"
#include "dcn/dcn_2_0_3_offset.h"
#include "dcn/dcn_2_0_3_sh_mask.h"
#include "cyan_skillfish_ip_offset.h"
#include "soc15_hw_ip.h"
-
-#include "irq_service_dcn201.h"
-
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-static enum dc_irq_source to_dal_irq_source_dcn201(struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+enum dc_irq_source to_dal_irq_source_dcn201(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
@@ -79,7 +78,6 @@ static enum dc_irq_source to_dal_irq_source_dcn201(struct irq_service *irq_servi
default:
return DC_IRQ_SOURCE_INVALID;
}
- return DC_IRQ_SOURCE_INVALID;
}
static bool hpd_ack(
@@ -138,6 +136,11 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h
index 8e27c5e219a3..0cfd2f2d62e8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 9df330c86a55..795d8811af9a 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -407,8 +407,9 @@ union dmub_fw_boot_options {
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
+ uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */
- uint32_t reserved : 16; /**< reserved */
+ uint32_t reserved : 15; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -1877,9 +1878,13 @@ struct dmub_cmd_psr_copy_settings_data {
*/
uint8_t use_phy_fsm;
/**
+ * frame delay for frame re-lock
+ */
+ uint8_t relock_delay_frame_cnt;
+ /**
* Explicit padding to 2 byte boundary.
*/
- uint8_t pad3[2];
+ uint8_t pad3;
};
/**
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 447a0ec9cbe2..f6034213c700 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -61,7 +61,7 @@ static const int32_t numerator01[] = { 31308, 180000, 0, 0, 0};
static const int32_t numerator02[] = { 12920, 4500, 0, 0, 0};
static const int32_t numerator03[] = { 55, 99, 0, 0, 0};
static const int32_t numerator04[] = { 55, 99, 0, 0, 0};
-static const int32_t numerator05[] = { 2400, 2200, 2200, 2400, 2600};
+static const int32_t numerator05[] = { 2400, 2222, 2200, 2400, 2600};
/* one-time setup of X points */
void setup_x_points_distribution(void)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 0f39ab9dc5b4..c2e00f7b8381 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -688,10 +688,10 @@ static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
if (app_tf != TRANSFER_FUNC_UNKNOWN) {
infopacket->valid = true;
- infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
-
- if (app_tf == TRANSFER_FUNC_GAMMA_22) {
- infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
+ if (app_tf != TRANSFER_FUNC_PQ2084) {
+ infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
+ if (app_tf == TRANSFER_FUNC_GAMMA_22)
+ infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
}
}
}
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index b5b1d073f8e2..4dc738c51771 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -4386,7 +4386,7 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
typedef struct _ATOM_GPIO_PIN_LUT
{
ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
+ ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[];
}ATOM_GPIO_PIN_LUT;
/****************************************************************************/
@@ -4513,7 +4513,7 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH
USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
USHORT usConnObjectId; //Connector Object ID
USHORT usGPUObjectId; //GPU ID
- USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+ USHORT usGraphicObjIds[]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
}ATOM_DISPLAY_OBJECT_PATH;
typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
@@ -4530,7 +4530,7 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
UCHAR ucNumOfDispPath;
UCHAR ucVersion;
UCHAR ucPadding[2];
- ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
+ ATOM_DISPLAY_OBJECT_PATH asDispPath[];
}ATOM_DISPLAY_OBJECT_PATH_TABLE;
typedef struct _ATOM_OBJECT //each object has this structure
@@ -4545,7 +4545,7 @@ typedef struct _ATOM_OBJECT_TABLE //Above 4 object table
{
UCHAR ucNumberOfObjects;
UCHAR ucPadding[3];
- ATOM_OBJECT asObjects[1];
+ ATOM_OBJECT asObjects[];
}ATOM_OBJECT_TABLE;
typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
@@ -4733,7 +4733,7 @@ typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucNumberOfDevice;
UCHAR ucReserved;
- ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+ ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
@@ -4793,7 +4793,7 @@ typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucFlags; // Future expnadibility
UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
- ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins
+ ATOM_GPIO_PIN_CONTROL_PAIR asGpio[]; // the real gpio pin pair determined by number of pins ucNumberOfPins
}ATOM_OBJECT_GPIO_CNTL_RECORD;
//Definitions for GPIO pin state
@@ -4982,7 +4982,7 @@ typedef struct _ATOM_BRACKET_LAYOUT_RECORD
UCHAR ucWidth;
UCHAR ucConnNum;
UCHAR ucReserved;
- ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1];
+ ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[];
}ATOM_BRACKET_LAYOUT_RECORD;
@@ -5146,7 +5146,7 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
UCHAR ucVoltageControlOffset;
UCHAR ucVoltageControlFlag; // Bit0: 0 - One byte data; 1 - Two byte data
UCHAR ulReserved[3];
- VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
+ VOLTAGE_LUT_ENTRY asVolI2cLut[]; // end with 0xff
}ATOM_I2C_VOLTAGE_OBJECT_V3;
// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
@@ -5161,7 +5161,7 @@ typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
UCHAR ucPhaseDelay; // phase delay in unit of micro second
UCHAR ucReserved;
ULONG ulGpioMaskVal; // GPIO Mask value
- VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
+ VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[];
}ATOM_GPIO_VOLTAGE_OBJECT_V3;
typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
@@ -5171,7 +5171,7 @@ typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
UCHAR ucReserved[2];
ULONG ulMaxVoltageLevel;
- LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
+ LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[];
}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
@@ -6599,7 +6599,7 @@ typedef struct _ATOM_FUSION_SYSTEM_INFO_V3
typedef struct _ATOM_I2C_DATA_RECORD
{
UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
- UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually
+ UCHAR ucI2CData[]; //I2C data in bytes, should be less than 16 bytes usually
}ATOM_I2C_DATA_RECORD;
@@ -6610,14 +6610,14 @@ typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
UCHAR ucSSChipID; //SS chip being used
UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
UCHAR ucNumOfI2CDataRecords; //number of data block
- ATOM_I2C_DATA_RECORD asI2CData[1];
+ ATOM_I2C_DATA_RECORD asI2CData[];
}ATOM_I2C_DEVICE_SETUP_INFO;
//==========================================================================================
typedef struct _ATOM_ASIC_MVDD_INFO
{
ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
+ ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[];
}ATOM_ASIC_MVDD_INFO;
//==========================================================================================
@@ -6679,7 +6679,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
{
ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only.
+ ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[]; //this is point only.
}ATOM_ASIC_INTERNAL_SS_INFO_V2;
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
@@ -6701,7 +6701,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
{
ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only.
+ ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[]; //this is pointer only.
}ATOM_ASIC_INTERNAL_SS_INFO_V3;
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index ff855cb21d3f..bbe1337a8cee 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -705,20 +705,65 @@ struct atom_gpio_pin_lut_v2_1
};
-/*
- ***************************************************************************
- Data Table vram_usagebyfirmware structure
- ***************************************************************************
-*/
+/*
+ * VBIOS/PRE-OS always reserve a FB region at the top of frame buffer. driver should not write
+ * access that region. driver can allocate their own reservation region as long as it does not
+ * overlap firwmare's reservation region.
+ * if (pre-NV1X) atom data table firmwareInfoTable version < 3.3:
+ * in this case, atom data table vram_usagebyfirmwareTable version always <= 2.1
+ * if VBIOS/UEFI GOP is posted:
+ * VBIOS/UEFIGOP update used_by_firmware_in_kb = total reserved size by VBIOS
+ * update start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb;
+ * ( total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10)
+ * driver can allocate driver reservation region under firmware reservation,
+ * used_by_driver_in_kb = driver reservation size
+ * driver reservation start address = (start_address_in_kb - used_by_driver_in_kb)
+ * Comment1[hchan]: There is only one reservation at the beginning of the FB reserved by
+ * host driver. Host driver would overwrite the table with the following
+ * used_by_firmware_in_kb = total reserved size for pf-vf info exchange and
+ * set SRIOV_MSG_SHARE_RESERVATION mask start_address_in_kb = 0
+ * else there is no VBIOS reservation region:
+ * driver must allocate driver reservation region at top of FB.
+ * driver set used_by_driver_in_kb = driver reservation size
+ * driver reservation start address = (total_mem_size_in_kb - used_by_driver_in_kb)
+ * same as Comment1
+ * else (NV1X and after):
+ * if VBIOS/UEFI GOP is posted:
+ * VBIOS/UEFIGOP update:
+ * used_by_firmware_in_kb = atom_firmware_Info_v3_3.fw_reserved_size_in_kb;
+ * start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb;
+ * (total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10)
+ * if vram_usagebyfirmwareTable version <= 2.1:
+ * driver can allocate driver reservation region under firmware reservation,
+ * driver set used_by_driver_in_kb = driver reservation size
+ * driver reservation start address = start_address_in_kb - used_by_driver_in_kb
+ * same as Comment1
+ * else driver can:
+ * allocate it reservation any place as long as it does overlap pre-OS FW reservation area
+ * set used_by_driver_region0_in_kb = driver reservation size
+ * set driver_region0_start_address_in_kb = driver reservation region start address
+ * Comment2[hchan]: Host driver can set used_by_firmware_in_kb and start_address_in_kb to
+ * zero as the reservation for VF as it doesn’t exist. And Host driver should also
+ * update atom_firmware_Info table to remove the same VBIOS reservation as well.
+ */
struct vram_usagebyfirmware_v2_1
{
- struct atom_common_table_header table_header;
- uint32_t start_address_in_kb;
- uint16_t used_by_firmware_in_kb;
- uint16_t used_by_driver_in_kb;
+ struct atom_common_table_header table_header;
+ uint32_t start_address_in_kb;
+ uint16_t used_by_firmware_in_kb;
+ uint16_t used_by_driver_in_kb;
};
+struct vram_usagebyfirmware_v2_2 {
+ struct atom_common_table_header table_header;
+ uint32_t fw_region_start_address_in_kb;
+ uint16_t used_by_firmware_in_kb;
+ uint16_t reserved;
+ uint32_t driver_region0_start_address_in_kb;
+ uint32_t used_by_driver_region0_in_kb;
+ uint32_t reserved32[7];
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_4_0.h
index a81138c9e491..03cfa0517df2 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_4_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_4_0.h
@@ -38,4 +38,7 @@
#define VCN_4_0__SRCID__JPEG6_DECODE 174 // 0xae JRBC6 Decode interrupt
#define VCN_4_0__SRCID__JPEG7_DECODE 175 // 0xaf JRBC7 Decode interrupt
+#define VCN_4_0__SRCID_UVD_POISON 160
+#define VCN_4_0__SRCID_DJPEG0_POISON 161
+#define VCN_4_0__SRCID_EJPEG0_POISON 162
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 1159ae114dd0..304190d5c9d2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1508,7 +1508,7 @@ static void pp_pm_compute_clocks(void *handle)
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
- if (!amdgpu_device_has_dc_support(adev)) {
+ if (!adev->dc_enabled) {
amdgpu_dpm_get_active_displays(adev);
adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index dad3e3741a4e..190af79f3236 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -67,22 +67,21 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
- uint32_t current_rpm;
- uint32_t percent = 0;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
- if (vega10_get_current_rpm(hwmgr, &current_rpm))
- return -1;
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+ CG_THERMAL_STATUS, FDO_PWM_DUTY);
- if (hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM != 0)
- percent = current_rpm * 255 /
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM;
+ if (!duty100)
+ return -EINVAL;
- *speed = MIN(percent, 255);
+ tmp64 = (uint64_t)duty * 255;
+ do_div(tmp64, duty100);
+ *speed = MIN((uint32_t)tmp64, 255);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 97b3ad369046..b30684c84e20 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -2961,7 +2961,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
data->od8_settings.od8_settings_array;
OverDriveTable_t *od_table =
&(data->smc_state_table.overdrive_table);
- int32_t input_index, input_clk, input_vol, i;
+ int32_t input_clk, input_vol, i;
+ uint32_t input_index;
int od8_id;
int ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 4fe75dd2b329..20e5f66f853f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1156,22 +1156,21 @@ static int smu_smc_hw_setup(struct smu_context *smu)
uint64_t features_supported;
int ret = 0;
- if (adev->in_suspend && smu_is_dpm_running(smu)) {
- dev_info(adev->dev, "dpm has been enabled\n");
- /* this is needed specifically */
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(11, 0, 7):
- case IP_VERSION(11, 0, 11):
- case IP_VERSION(11, 5, 0):
- case IP_VERSION(11, 0, 12):
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 0, 12):
+ if (adev->in_suspend && smu_is_dpm_running(smu)) {
+ dev_info(adev->dev, "dpm has been enabled\n");
ret = smu_system_features_control(smu, true);
if (ret)
dev_err(adev->dev, "Failed system features control!\n");
- break;
- default:
- break;
+ return ret;
}
- return ret;
+ break;
+ default:
+ break;
}
ret = smu_init_display_count(smu, 0);
@@ -1449,6 +1448,7 @@ static int smu_disable_dpms(struct smu_context *smu)
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
return 0;
default:
break;
@@ -1517,7 +1517,7 @@ static int smu_disable_dpms(struct smu_context *smu)
}
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
- adev->gfx.rlc.funcs->stop)
+ !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index e2fa3b066b96..44bbf17e4bef 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -568,6 +568,10 @@ struct smu_context
u32 param_reg;
u32 msg_reg;
u32 resp_reg;
+
+ u32 debug_param_reg;
+ u32 debug_msg_reg;
+ u32 debug_resp_reg;
};
struct i2c_adapter;
@@ -1388,6 +1392,14 @@ enum smu_cmn2asic_mapping_type {
CMN2ASIC_MAPPING_WORKLOAD,
};
+enum smu_baco_seq {
+ BACO_SEQ_BACO = 0,
+ BACO_SEQ_MSR,
+ BACO_SEQ_BAMACO,
+ BACO_SEQ_ULPS,
+ BACO_SEQ_COUNT,
+};
+
#define MSG_MAP(msg, index, valid_in_vf) \
[SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h
index d9b0cd752200..f4d6c07b56ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h
@@ -54,14 +54,14 @@
#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
-#define PPSMC_MSG_EnableGfxOff 0x04 ///< Enable GFXOFF
-#define PPSMC_MSG_DisableGfxOff 0x05 ///< Disable GFXOFF
+#define PPSMC_MSG_SPARE0 0x04 ///< SPARE
+#define PPSMC_MSG_SPARE1 0x05 ///< SPARE
#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN
#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default
#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
-#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Needs update
-#define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF
+#define PPSMC_MSG_SPARE2 0x0A ///< SPARE
+#define PPSMC_MSG_SPARE3 0x0B ///< SPARE
#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
@@ -73,8 +73,7 @@
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)
-
-#define PPSMC_MSG_EnableGfxImu 0x16 ///< Needs update
+#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
#define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency
#define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency
@@ -102,8 +101,8 @@
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
-#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
-#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
+#define PPSMC_MSG_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
+#define PPSMC_MSG_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
#define PPSMC_Message_Count 0x31 ///< Total number of PPSMC messages
/** @}*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index a9215494dcdd..d466db6f0ad4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -147,14 +147,6 @@ struct smu_11_5_power_context {
uint32_t max_fast_ppt_limit;
};
-enum smu_v11_0_baco_seq {
- BACO_SEQ_BACO = 0,
- BACO_SEQ_MSR,
- BACO_SEQ_BAMACO,
- BACO_SEQ_ULPS,
- BACO_SEQ_COUNT,
-};
-
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
int smu_v11_0_init_microcode(struct smu_context *smu);
@@ -257,7 +249,7 @@ int smu_v11_0_baco_enter(struct smu_context *smu);
int smu_v11_0_baco_exit(struct smu_context *smu);
int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
- enum smu_v11_0_baco_seq baco_seq);
+ enum smu_baco_seq baco_seq);
int smu_v11_0_mode1_reset(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 80fb583b18d9..b7f4569aff2a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -124,14 +124,6 @@ struct smu_13_0_power_context {
enum smu_13_0_power_state power_state;
};
-enum smu_v13_0_baco_seq {
- BACO_SEQ_BACO = 0,
- BACO_SEQ_MSR,
- BACO_SEQ_BAMACO,
- BACO_SEQ_ULPS,
- BACO_SEQ_COUNT,
-};
-
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
int smu_v13_0_init_microcode(struct smu_context *smu);
@@ -218,6 +210,9 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu);
int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
+int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq);
+
bool smu_v13_0_baco_is_support(struct smu_context *smu);
enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 74996a8fb671..697e98a0a20a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -377,7 +377,13 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
if (((adev->pdev->device == 0x73A1) &&
(adev->pdev->revision == 0x00)) ||
((adev->pdev->device == 0x73BF) &&
- (adev->pdev->revision == 0xCF)))
+ (adev->pdev->revision == 0xCF)) ||
+ ((adev->pdev->device == 0x7422) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73A3) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73E3) &&
+ (adev->pdev->revision == 0x00)))
smu_baco->platform_support = false;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index dccbd9f70723..70b560737687 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1576,7 +1576,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
}
int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
- enum smu_v11_0_baco_seq baco_seq)
+ enum smu_baco_seq baco_seq)
{
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 43fb102a65f5..89f0f6eb19f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2230,6 +2230,15 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
return ret;
}
+int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ArmD3,
+ baco_seq,
+ NULL);
+}
+
bool smu_v13_0_baco_is_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 29529328152d..5bcb61f77e41 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -70,6 +70,26 @@
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_75 0x028b
+#define mmMP1_SMN_C2PMSG_75_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_53 0x0275
+#define mmMP1_SMN_C2PMSG_53_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_54 0x0276
+#define mmMP1_SMN_C2PMSG_54_BASE_IDX 0
+
+#define DEBUGSMC_MSG_Mode1Reset 2
+
static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -120,6 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -1566,6 +1587,31 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
NULL);
}
+static int smu_v13_0_0_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ return smu_v13_0_baco_set_armd3_sequence(smu,
+ smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ else
+ return smu_v13_0_baco_enter(smu);
+}
+
+static int smu_v13_0_0_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v13_0_baco_exit(smu);
+ }
+}
+
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1763,6 +1809,35 @@ static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
NULL);
}
+static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
+{
+ int ret;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
+ else
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+
+ if (!ret)
+ msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
+
+static void smu_v13_0_0_set_smu_mailbox_registers(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+
+ smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53);
+ smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75);
+ smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54);
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -1827,10 +1902,10 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.baco_is_support = smu_v13_0_baco_is_support,
.baco_get_state = smu_v13_0_baco_get_state,
.baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_baco_enter,
- .baco_exit = smu_v13_0_baco_exit,
+ .baco_enter = smu_v13_0_0_baco_enter,
+ .baco_exit = smu_v13_0_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
- .mode1_reset = smu_v13_0_mode1_reset,
+ .mode1_reset = smu_v13_0_0_mode1_reset,
.set_mp1_state = smu_v13_0_0_set_mp1_state,
.set_df_cstate = smu_v13_0_0_set_df_cstate,
};
@@ -1844,5 +1919,5 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v13_0_0_table_map;
smu->pwr_src_map = smu_v13_0_0_pwr_src_map;
smu->workload_map = smu_v13_0_0_workload_map;
- smu_v13_0_set_smu_mailbox_registers(smu);
+ smu_v13_0_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c4102cfb734c..d74debc584f8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -122,6 +122,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1578,6 +1579,31 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_7_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ return smu_v13_0_baco_set_armd3_sequence(smu,
+ smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ else
+ return smu_v13_0_baco_enter(smu);
+}
+
+static int smu_v13_0_7_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v13_0_baco_exit(smu);
+ }
+}
+
static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1655,8 +1681,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.baco_is_support = smu_v13_0_baco_is_support,
.baco_get_state = smu_v13_0_baco_get_state,
.baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_baco_enter,
- .baco_exit = smu_v13_0_baco_exit,
+ .baco_enter = smu_v13_0_7_baco_enter,
+ .baco_exit = smu_v13_0_7_baco_exit,
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index e4f8f90ac5aa..768b6e7dbd77 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -233,6 +233,18 @@ static void __smu_cmn_send_msg(struct smu_context *smu,
WREG32(smu->msg_reg, msg);
}
+static int __smu_cmn_send_debug_msg(struct smu_context *smu,
+ u32 msg,
+ u32 param)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32(smu->debug_param_reg, param);
+ WREG32(smu->debug_msg_reg, msg);
+ WREG32(smu->debug_resp_reg, 0);
+
+ return 0;
+}
/**
* smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
* @smu: pointer to an SMU context
@@ -386,6 +398,12 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
read_arg);
}
+int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
+ uint32_t msg)
+{
+ return __smu_cmn_send_debug_msg(smu, msg, 0);
+}
+
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
enum smu_cmn2asic_mapping_type type,
uint32_t index)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 1526ce09c399..f82cf76dd3a4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -42,6 +42,9 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
enum smu_message_type msg,
uint32_t *read_arg);
+int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
+ uint32_t msg);
+
int smu_cmn_wait_for_response(struct smu_context *smu);
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index ecd22c038c8c..51a46689cda7 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -5186,7 +5186,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm
mst_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(mst_state))
- return -EINVAL;
+ return PTR_ERR(mst_state);
list_for_each_entry(pos, &mst_state->payloads, next) {
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 52819e7f1fca..97a277f9a25e 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,4 +1,34 @@
# SPDX-License-Identifier: MIT
+
+config DRM_RADEON
+ tristate "ATI Radeon"
+ depends on DRM && PCI && MMU
+ depends on AGP || !AGP
+ select FW_LOADER
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
+ select SND_HDA_COMPONENT if SND_HDA_CORE
+ select POWER_SUPPLY
+ select HWMON
+ select BACKLIGHT_CLASS_DEVICE
+ select INTERVAL_TREE
+ # radeon depends on ACPI_VIDEO when ACPI is enabled, for select to work
+ # ACPI_VIDEO's dependencies must also be selected.
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ # On x86 ACPI_VIDEO also needs ACPI_WMI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
+ help
+ Choose this option if you have an ATI Radeon graphics card. There
+ are both PCI and AGP versions. You don't need to choose this to
+ run the Radeon in plain VGA mode.
+
+ If M is selected, the module will be called radeon.
+
config DRM_RADEON_USERPTR
bool "Always enable userptr support"
depends on DRM_RADEON
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 166c18d62f6d..2e7161acd443 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -79,6 +79,7 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/drm_gem.h>
+#include <drm/drm_audio_component.h>
#include "radeon_family.h"
#include "radeon_mode.h"
@@ -1796,6 +1797,9 @@ struct r600_audio {
struct radeon_audio_funcs *hdmi_funcs;
struct radeon_audio_funcs *dp_funcs;
struct radeon_audio_basic_funcs *funcs;
+ struct drm_audio_component *component;
+ bool component_registered;
+ struct mutex component_mutex;
};
/*
@@ -2994,6 +2998,10 @@ void radeon_irq_kms_set_irq_n_enabled(struct radeon_device *rdev,
bool enable, const char *name,
unsigned n);
+/* Audio component binding */
+void radeon_audio_component_init(struct radeon_device *rdev);
+void radeon_audio_component_fini(struct radeon_device *rdev);
+
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 7c5e80d03fc9..d6ccaf24ee0c 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -23,6 +23,7 @@
*/
#include <linux/gcd.h>
+#include <linux/component.h>
#include <drm/drm_crtc.h>
#include "dce6_afmt.h"
@@ -180,6 +181,8 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
.dpms = evergreen_dp_enable,
};
+static void radeon_audio_component_notify(struct radeon_device *rdev, int port);
+
static void radeon_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin, u8 enable_mask)
{
@@ -207,6 +210,8 @@ static void radeon_audio_enable(struct radeon_device *rdev,
if (rdev->audio.funcs->enable)
rdev->audio.funcs->enable(rdev, pin, enable_mask);
+
+ radeon_audio_component_notify(rdev, pin->id);
}
static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -721,3 +726,115 @@ unsigned int radeon_audio_decode_dfs_div(unsigned int div)
else
return 0;
}
+
+/*
+ * Audio component support
+ */
+static void radeon_audio_component_notify(struct radeon_device *rdev, int port)
+{
+ struct drm_audio_component *acomp;
+
+ mutex_lock(&rdev->audio.component_mutex);
+ acomp = rdev->audio.component;
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ port, -1);
+ mutex_unlock(&rdev->audio.component_mutex);
+}
+
+static int radeon_audio_component_get_eld(struct device *kdev, int port,
+ int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ *enabled = false;
+ if (!rdev->audio.enabled || !rdev->mode_info.mode_config_initialized)
+ return 0;
+
+ list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ if (!radeon_encoder_is_digital(encoder))
+ continue;
+ radeon_encoder = to_radeon_encoder(encoder);
+ dig = radeon_encoder->enc_priv;
+ if (!dig->pin || dig->pin->id != port)
+ continue;
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ continue;
+ *enabled = true;
+ ret = drm_eld_size(connector->eld);
+ memcpy(buf, connector->eld, min(max_bytes, ret));
+ break;
+ }
+
+ return ret;
+}
+
+static const struct drm_audio_component_ops radeon_audio_component_ops = {
+ .get_eld = radeon_audio_component_get_eld,
+};
+
+static int radeon_audio_component_bind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_audio_component *acomp = data;
+
+ if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
+ return -ENOMEM;
+
+ mutex_lock(&rdev->audio.component_mutex);
+ acomp->ops = &radeon_audio_component_ops;
+ acomp->dev = kdev;
+ rdev->audio.component = acomp;
+ mutex_unlock(&rdev->audio.component_mutex);
+
+ return 0;
+}
+
+static void radeon_audio_component_unbind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_audio_component *acomp = data;
+
+ device_link_remove(hda_kdev, kdev);
+
+ mutex_lock(&rdev->audio.component_mutex);
+ rdev->audio.component = NULL;
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ mutex_unlock(&rdev->audio.component_mutex);
+}
+
+static const struct component_ops radeon_audio_component_bind_ops = {
+ .bind = radeon_audio_component_bind,
+ .unbind = radeon_audio_component_unbind,
+};
+
+void radeon_audio_component_init(struct radeon_device *rdev)
+{
+ if (rdev->audio.component_registered ||
+ !radeon_audio || !radeon_audio_chipset_supported(rdev))
+ return;
+
+ if (!component_add(rdev->dev, &radeon_audio_component_bind_ops))
+ rdev->audio.component_registered = true;
+}
+
+void radeon_audio_component_fini(struct radeon_device *rdev)
+{
+ if (rdev->audio.component_registered) {
+ component_del(rdev->dev, &radeon_audio_component_bind_ops);
+ rdev->audio.component_registered = false;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 33121655d50b..1d99c9a2b56e 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -612,13 +612,14 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
unsigned offset;
+ bool r = false;
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
- return false;
+ goto out;
}
vfct = (UEFI_ACPI_VFCT *)hdr;
@@ -631,13 +632,13 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
offset += sizeof(VFCT_IMAGE_HEADER);
if (offset > tbl_size) {
DRM_ERROR("ACPI VFCT image header truncated\n");
- return false;
+ goto out;
}
offset += vhdr->ImageLength;
if (offset > tbl_size) {
DRM_ERROR("ACPI VFCT image truncated\n");
- return false;
+ goto out;
}
if (vhdr->ImageLength &&
@@ -649,15 +650,18 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
rdev->bios = kmemdup(&vbios->VbiosContent,
vhdr->ImageLength,
GFP_KERNEL);
+ if (rdev->bios)
+ r = true;
- if (!rdev->bios)
- return false;
- return true;
+ goto out;
}
}
DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
- return false;
+
+out:
+ acpi_put_table(hdr);
+ return r;
}
#else
static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 262e2bcb30c3..6344454a7721 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1312,6 +1312,7 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
+ mutex_init(&rdev->audio.component_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
@@ -1451,6 +1452,8 @@ int radeon_device_init(struct radeon_device *rdev,
goto failed;
}
+ radeon_audio_component_init(rdev);
+
r = radeon_ib_ring_tests(rdev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
@@ -1513,6 +1516,7 @@ void radeon_device_fini(struct radeon_device *rdev)
rdev->shutdown = true;
/* evict vram memory */
radeon_bo_evict_vram(rdev);
+ radeon_audio_component_fini(rdev);
radeon_fini(rdev);
if (!pci_is_thunderbolt_attached(rdev->pdev))
vga_switcheroo_unregister_client(rdev->pdev);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 6ce04c2e90c0..31f3a1267be4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -417,27 +417,6 @@ static void drm_sched_job_timedout(struct work_struct *work)
}
}
- /**
- * drm_sched_increase_karma - Update sched_entity guilty flag
- *
- * @bad: The job guilty of time out
- *
- * Increment on every hang caused by the 'bad' job. If this exceeds the hang
- * limit of the scheduler then the respective sched entity is marked guilty and
- * jobs from it will not be scheduled further
- */
-void drm_sched_increase_karma(struct drm_sched_job *bad)
-{
- drm_sched_increase_karma_ext(bad, 1);
-}
-EXPORT_SYMBOL(drm_sched_increase_karma);
-
-void drm_sched_reset_karma(struct drm_sched_job *bad)
-{
- drm_sched_increase_karma_ext(bad, 0);
-}
-EXPORT_SYMBOL(drm_sched_reset_karma);
-
/**
* drm_sched_stop - stop the scheduler
*
@@ -579,31 +558,14 @@ EXPORT_SYMBOL(drm_sched_start);
*/
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
{
- drm_sched_resubmit_jobs_ext(sched, INT_MAX);
-}
-EXPORT_SYMBOL(drm_sched_resubmit_jobs);
-
-/**
- * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
- *
- * @sched: scheduler instance
- * @max: job numbers to relaunch
- *
- */
-void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
-{
struct drm_sched_job *s_job, *tmp;
uint64_t guilty_context;
bool found_guilty = false;
struct dma_fence *fence;
- int i = 0;
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
- if (i >= max)
- break;
-
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
found_guilty = true;
guilty_context = s_job->s_fence->scheduled.context;
@@ -613,7 +575,6 @@ void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
fence = sched->ops->run_job(s_job);
- i++;
if (IS_ERR_OR_NULL(fence)) {
if (IS_ERR(fence))
@@ -629,7 +590,7 @@ void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
}
}
}
-EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
+EXPORT_SYMBOL(drm_sched_resubmit_jobs);
/**
* drm_sched_job_init - init a scheduler job
@@ -1165,13 +1126,15 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
EXPORT_SYMBOL(drm_sched_fini);
/**
- * drm_sched_increase_karma_ext - Update sched_entity guilty flag
+ * drm_sched_increase_karma - Update sched_entity guilty flag
*
* @bad: The job guilty of time out
- * @type: type for increase/reset karma
*
+ * Increment on every hang caused by the 'bad' job. If this exceeds the hang
+ * limit of the scheduler then the respective sched entity is marked guilty and
+ * jobs from it will not be scheduled further
*/
-void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
+void drm_sched_increase_karma(struct drm_sched_job *bad)
{
int i;
struct drm_sched_entity *tmp;
@@ -1183,10 +1146,7 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
* corrupt but keep in mind that kernel jobs always considered good.
*/
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
- if (type == 0)
- atomic_set(&bad->karma, 0);
- else if (type == 1)
- atomic_inc(&bad->karma);
+ atomic_inc(&bad->karma);
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
i++) {
@@ -1197,7 +1157,7 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
if (bad->s_fence->scheduled.context ==
entity->fence_context) {
if (entity->guilty)
- atomic_set(entity->guilty, type);
+ atomic_set(entity->guilty, 1);
break;
}
}
@@ -1207,4 +1167,4 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
}
}
}
-EXPORT_SYMBOL(drm_sched_increase_karma_ext);
+EXPORT_SYMBOL(drm_sched_increase_karma);