summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/dc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c266
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c1136
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c185
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c194
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h231
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c229
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c295
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c245
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h9
129 files changed, 3986 insertions, 1528 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 352e9afb85c6..e295a839ab47 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -24,7 +24,7 @@
*/
#include "dm_services.h"
-#include "conversion.h"
+#include "basics/conversion.h"
#define DIVIDER 10000
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 84aeccf36b4b..6d2924114a3e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -50,12 +50,11 @@ bool dal_vector_construct(
return true;
}
-static bool dal_vector_presized_costruct(
- struct vector *vector,
- struct dc_context *ctx,
- uint32_t count,
- void *initial_value,
- uint32_t struct_size)
+static bool dal_vector_presized_costruct(struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t count,
+ void *initial_value,
+ uint32_t struct_size)
{
uint32_t i;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 27af9d3c2b73..6b3190447581 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -96,7 +96,7 @@ struct dc_bios *bios_parser_create(
struct bp_init_data *init,
enum dce_version dce_version)
{
- struct bios_parser *bp = NULL;
+ struct bios_parser *bp;
bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
if (!bp)
@@ -2576,7 +2576,7 @@ static struct integrated_info *bios_parser_create_integrated_info(
struct dc_bios *dcb)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- struct integrated_info *info = NULL;
+ struct integrated_info *info;
info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
@@ -2593,11 +2593,10 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
-static enum bp_result update_slot_layout_info(
- struct dc_bios *dcb,
- unsigned int i,
- struct slot_layout_info *slot_layout_info,
- unsigned int record_offset)
+static enum bp_result update_slot_layout_info(struct dc_bios *dcb,
+ unsigned int i,
+ struct slot_layout_info *slot_layout_info,
+ unsigned int record_offset)
{
unsigned int j;
struct bios_parser *bp;
@@ -2696,10 +2695,9 @@ static enum bp_result update_slot_layout_info(
}
-static enum bp_result get_bracket_layout_record(
- struct dc_bios *dcb,
- unsigned int bracket_layout_id,
- struct slot_layout_info *slot_layout_info)
+static enum bp_result get_bracket_layout_record(struct dc_bios *dcb,
+ unsigned int bracket_layout_id,
+ struct slot_layout_info *slot_layout_info)
{
unsigned int i;
unsigned int record_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index cce47d3f1a13..484d62bcf2c2 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -340,9 +340,8 @@ static struct atom_display_object_path_v2 *get_bios_object(
}
/* from graphics_object_id, find display path which includes the object_id */
-static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(
- struct bios_parser *bp,
- struct graphics_object_id id)
+static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(struct bios_parser *bp,
+ struct graphics_object_id id)
{
unsigned int i;
struct graphics_object_id obj_id = {0};
@@ -521,9 +520,8 @@ static enum bp_result get_gpio_i2c_info(
return BP_RESULT_OK;
}
-static struct atom_hpd_int_record *get_hpd_record_for_path_v3(
- struct bios_parser *bp,
- struct atom_display_object_path_v3 *object)
+static struct atom_hpd_int_record *get_hpd_record_for_path_v3(struct bios_parser *bp,
+ struct atom_display_object_path_v3 *object)
{
struct atom_common_record_header *header;
uint32_t offset;
@@ -774,20 +772,20 @@ static enum bp_result bios_parser_get_device_tag(
return BP_RESULT_BADINPUT;
switch (bp->object_info_tbl.revision.minor) {
- case 4:
- default:
+ case 4:
+ default:
/* getBiosObject will return MXM object */
- object = get_bios_object(bp, connector_object_id);
+ object = get_bios_object(bp, connector_object_id);
if (!object) {
BREAK_TO_DEBUGGER(); /* Invalid object id */
return BP_RESULT_BADINPUT;
}
- info->acpi_device = 0; /* BIOS no longer provides this */
- info->dev_id = device_type_from_device_id(object->device_tag);
- break;
- case 5:
+ info->acpi_device = 0; /* BIOS no longer provides this */
+ info->dev_id = device_type_from_device_id(object->device_tag);
+ break;
+ case 5:
object_path_v3 = get_bios_object_from_path_v3(bp, connector_object_id);
if (!object_path_v3) {
@@ -1582,13 +1580,13 @@ static bool bios_parser_is_device_id_supported(
uint32_t mask = get_support_mask_for_device_id(id);
switch (bp->object_info_tbl.revision.minor) {
- case 4:
- default:
- return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) & mask) != 0;
- break;
- case 5:
- return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0;
- break;
+ case 4:
+ default:
+ return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) & mask) != 0;
+ break;
+ case 5:
+ return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0;
+ break;
}
return false;
@@ -1757,7 +1755,7 @@ static enum bp_result bios_parser_get_firmware_info(
case 2:
case 3:
result = get_firmware_info_v3_2(bp, info);
- break;
+ break;
case 4:
result = get_firmware_info_v3_4(bp, info);
break;
@@ -2175,9 +2173,8 @@ static struct atom_disp_connector_caps_record *get_disp_connector_caps_record(
return NULL;
}
-static struct atom_connector_caps_record *get_connector_caps_record(
- struct bios_parser *bp,
- struct atom_display_object_path_v3 *object)
+static struct atom_connector_caps_record *get_connector_caps_record(struct bios_parser *bp,
+ struct atom_display_object_path_v3 *object)
{
struct atom_common_record_header *header;
uint32_t offset;
@@ -2228,7 +2225,7 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
return BP_RESULT_BADINPUT;
switch (bp->object_info_tbl.revision.minor) {
- case 4:
+ case 4:
default:
object = get_bios_object(bp, object_id);
@@ -2264,9 +2261,8 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
return BP_RESULT_OK;
}
-static struct atom_connector_speed_record *get_connector_speed_cap_record(
- struct bios_parser *bp,
- struct atom_display_object_path_v3 *object)
+static struct atom_connector_speed_record *get_connector_speed_cap_record(struct bios_parser *bp,
+ struct atom_display_object_path_v3 *object)
{
struct atom_common_record_header *header;
uint32_t offset;
@@ -3090,7 +3086,7 @@ static struct integrated_info *bios_parser_create_integrated_info(
struct dc_bios *dcb)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- struct integrated_info *info = NULL;
+ struct integrated_info *info;
info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
@@ -3679,7 +3675,7 @@ struct dc_bios *firmware_parser_create(
struct bp_init_data *init,
enum dce_version dce_version)
{
- struct bios_parser *bp = NULL;
+ struct bios_parser *bp;
bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
if (!bp)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 6127d6045336..dcedf9645161 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -117,6 +117,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
continue;
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
dc->link_srv->edp_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
+ dc->link_srv->edp_set_replay_allow_active(edp_link, &allow_active, false, false, NULL);
}
}
@@ -137,6 +138,8 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
continue;
dc->link_srv->edp_set_psr_allow_active(edp_link,
&clk_mgr->psr_allow_active_cache, false, false, NULL);
+ dc->link_srv->edp_set_replay_allow_active(edp_link,
+ &clk_mgr->psr_allow_active_cache, false, false, NULL);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 7ccd96959256..3db4ef564b99 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
+
+ /* Checking stream / link detection ensuring that PHY is active*/
+ if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
+ display_count++;
+
}
for (i = 0; i < dc->link_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 2f7c8996b19d..7326b7565846 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -783,7 +783,6 @@ void dcn314_clk_mgr_construct(
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
- //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz);
clk_mgr->base.base.bw_params = &dcn314_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 925d6e13620e..3e0da873cf4c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -33,28 +33,26 @@
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
#define regBIF_BX_PF2_RSMU_INDEX 0x0000
#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index d7de756301cf..09151cc56ce4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -45,24 +45,14 @@
#define MAX_INSTANCE 7
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
-static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0 } },
- { { 0x00016E00, 0x02401C00, 0, 0, 0, 0 } },
- { { 0x00017000, 0x02402000, 0, 0, 0, 0 } },
- { { 0x00017200, 0x02402400, 0, 0, 0, 0 } },
- { { 0x0001B000, 0x0242D800, 0, 0, 0, 0 } },
- { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0 } },
- { { 0x0001B400, 0x0242E000, 0, 0, 0, 0 } } } };
-
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
@@ -73,9 +63,6 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
-#define REG(reg_name) \
- (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-
#define TO_CLK_MGR_DCN316(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn316, base)
@@ -577,36 +564,6 @@ static struct clk_mgr_funcs dcn316_funcs = {
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
-static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
-{
- /* get FbMult value */
- struct fixed31_32 pll_req;
- unsigned int fbmult_frac_val = 0;
- unsigned int fbmult_int_val = 0;
-
- /*
- * Register value of fbmult is in 8.16 format, we are converting to 31.32
- * to leverage the fix point operations available in driver
- */
-
- REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
- REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
-
- pll_req = dc_fixpt_from_int(fbmult_int_val);
-
- /*
- * since fractional part is only 16 bit in register definition but is 32 bit
- * in our fix point definiton, need to shift left by 16 to obtain correct value
- */
- pll_req.value |= fbmult_frac_val << 16;
-
- /* multiply by REFCLK period */
- pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
-
- /* integer part is now VCO frequency in kHz */
- return dc_fixpt_floor(pll_req);
-}
-
void dcn316_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn316 *clk_mgr,
@@ -660,7 +617,8 @@ void dcn316_clk_mgr_construct(
clk_mgr->base.smu_present = true;
// Skip this for now as it did not work on DCN315, renable during bring up
- clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+ //clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+ clk_mgr->base.base.dentist_vco_freq_khz = 2500000;
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
index 457a9254ae1c..3ed19197a755 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
@@ -34,23 +34,21 @@
#define MAX_INSTANCE 7
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index cb992aca760d..984b52923534 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -297,7 +297,7 @@ void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz, prev_dppclk_khz;
+ int dpp_inst = 0, dppclk_khz, prev_dppclk_khz;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
@@ -555,6 +555,11 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+ dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, true);
+ else
+ dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, false);
+
/* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
update_fclk = true;
@@ -802,7 +807,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
else
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
+ clk_mgr_base->bw_params->max_memclk_mhz);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index fb524fe4ab26..700ce42036d7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -139,3 +139,10 @@ unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, ui
return response;
}
+
+void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ smu_print("PMFW to wait for DMCUB ack for MCLK : %d\n", enable);
+
+ dcn32_smu_send_msg_with_param(clk_mgr, 0x14, enable ? 1 : 0, NULL);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
index a68038a41972..a34c258c19dc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
@@ -43,5 +43,6 @@ void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
+void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable);
#endif /* __DCN32_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d133e4186a52..566d7045b2de 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -586,18 +586,15 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
struct crc_params *crc_window, bool enable, bool continuous)
{
- int i;
struct pipe_ctx *pipe;
struct crc_params param;
struct timing_generator *tg;
- for (i = 0; i < MAX_PIPES; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
- break;
- }
+ pipe = resource_get_otg_master_for_stream(
+ &dc->current_state->res_ctx, stream);
+
/* Stream not found */
- if (i == MAX_PIPES)
+ if (pipe == NULL)
return false;
/* By default, capture the full frame */
@@ -1047,8 +1044,10 @@ static void disable_all_writeback_pipes_for_stream(
stream->writeback_info[i].wb_enabled = false;
}
-static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
- struct dc_stream_state *stream, bool lock)
+static void apply_ctx_interdependent_lock(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream,
+ bool lock)
{
int i;
@@ -1062,7 +1061,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex
// Copied conditions that were previously in dce110_apply_ctx_for_surface
if (stream == pipe_ctx->stream) {
- if (!pipe_ctx->top_pipe &&
+ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
}
@@ -1923,6 +1922,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_trigger_sync(dc, context);
+ /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
+ for (i = 0; i < context->stream_count; i++) {
+ uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
+
+ context->streams[i]->update_flags.raw = 0xFFFFFFFF;
+ context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
+ }
+
/* Program all planes within new context*/
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
@@ -2001,6 +2008,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
+ /* Clear update flags that were set earlier to avoid redundant programming */
+ for (i = 0; i < context->stream_count; i++) {
+ context->streams[i]->update_flags.raw = 0x0;
+ }
+
old_state = dc->current_state;
dc->current_state = context;
@@ -2480,9 +2492,7 @@ static enum surface_update_type get_scaling_info_update_type(
if (!u->scaling_info)
return UPDATE_TYPE_FAST;
- if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
- || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
- || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+ if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height
|| u->scaling_info->scaling_quality.integer_scaling !=
u->surface->scaling_quality.integer_scaling
@@ -2686,96 +2696,6 @@ static enum surface_update_type check_update_surfaces_for_stream(
return overall_type;
}
-static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect)
-{
- int view_height, view_width, clip_x, clip_y, clip_width, clip_height;
-
- view_height = src.height;
- view_width = src.width;
-
- clip_x = clip_rect.x;
- clip_y = clip_rect.y;
-
- clip_width = clip_rect.width;
- clip_height = clip_rect.height;
-
- /* check for centered video accounting for off by 1 scaling truncation */
- if ((view_height - clip_y - clip_height <= clip_y + 1) &&
- (view_width - clip_x - clip_width <= clip_x + 1) &&
- (view_height - clip_y - clip_height >= clip_y - 1) &&
- (view_width - clip_x - clip_width >= clip_x - 1)) {
-
- /* when OS scales up/down to letter box, it may end up
- * with few blank pixels on the border due to truncating.
- * Add offset margin to account for this
- */
- if (clip_x <= 4 || clip_y <= 4)
- return true;
- }
-
- return false;
-}
-
-static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc,
- struct dc_surface_update *srf_updates, int surface_count,
- enum surface_update_type update_type)
-{
- enum surface_update_type new_update_type = update_type;
- int i, j;
- struct pipe_ctx *pipe = NULL;
- struct dc_stream_state *stream;
-
- /* Check that we are in windowed MPO with ODM
- * - look for MPO pipe by scanning pipes for first pipe matching
- * surface that has moved ( position change )
- * - MPO pipe will have top pipe
- * - check that top pipe has ODM pointer
- */
- if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) {
- for (i = 0; i < surface_count; i++) {
- if (srf_updates[i].surface && srf_updates[i].scaling_info
- && srf_updates[i].surface->update_flags.bits.position_change) {
-
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[j];
- stream = pipe->stream;
- break;
- }
- }
-
- if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream
- && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) {
- struct rect old_clip_rect, new_clip_rect;
- bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle;
- bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle;
-
- old_clip_rect = srf_updates[i].surface->clip_rect;
- new_clip_rect = srf_updates[i].scaling_info->clip_rect;
-
- old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
- old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
- old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right;
-
- new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
- new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
- new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right;
-
- if (old_clip_rect_left && new_clip_rect_middle)
- new_update_type = UPDATE_TYPE_FULL;
- else if (old_clip_rect_middle && new_clip_rect_right)
- new_update_type = UPDATE_TYPE_FULL;
- else if (old_clip_rect_right && new_clip_rect_middle)
- new_update_type = UPDATE_TYPE_FULL;
- else if (old_clip_rect_middle && new_clip_rect_left)
- new_update_type = UPDATE_TYPE_FULL;
- }
- }
- }
- }
- return new_update_type;
-}
-
/*
* dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
*
@@ -2807,10 +2727,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
- if (type == UPDATE_TYPE_MED)
- type = check_boundary_crossing_for_windowed_mpo_with_odm(dc,
- updates, surface_count, type);
-
if (type == UPDATE_TYPE_FAST) {
// If there's an available clock comparator, we use that.
if (dc->clk_mgr->funcs->are_clock_states_equal) {
@@ -3245,7 +3161,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
@@ -3370,6 +3286,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
&& stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
+ if (stream->link->replay_settings.config.replay_supported)
+ return true;
+
return false;
}
@@ -3524,16 +3443,9 @@ static void commit_planes_for_stream_fast(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
dc_z10_restore(dc);
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- if (!pipe_ctx->top_pipe &&
- !pipe_ctx->prev_odm_pipe &&
- pipe_ctx->stream &&
- pipe_ctx->stream == stream) {
- top_pipe_to_program = pipe_ctx;
- }
- }
+ top_pipe_to_program = resource_get_otg_master_for_stream(
+ &context->res_ctx,
+ stream);
if (dc->debug.visual_confirm) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -3582,9 +3494,9 @@ static void commit_planes_for_stream_fast(struct dc *dc,
context->block_sequence_steps);
/* Clear update flags so next flip doesn't have redundant programming
* (if there's no stream update, the update flags are not cleared).
+ * Surface updates are cleared unconditionally at the beginning of each flip,
+ * so no need to clear here.
*/
- if (top_pipe_to_program->plane_state)
- top_pipe_to_program->plane_state->update_flags.raw = 0;
if (top_pipe_to_program->stream)
top_pipe_to_program->stream->update_flags.raw = 0;
}
@@ -3638,16 +3550,9 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context);
}
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- if (!pipe_ctx->top_pipe &&
- !pipe_ctx->prev_odm_pipe &&
- pipe_ctx->stream &&
- pipe_ctx->stream == stream) {
- top_pipe_to_program = pipe_ctx;
- }
- }
+ top_pipe_to_program = resource_get_otg_master_for_stream(
+ &context->res_ctx,
+ stream);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4088,9 +3993,9 @@ static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context = dc_create_state(dc);
- enum pipe_split_policy tmp_mpc_policy;
- bool temp_dynamic_odm_policy;
- bool temp_subvp_policy;
+ enum pipe_split_policy tmp_mpc_policy = 0;
+ bool temp_dynamic_odm_policy = 0;
+ bool temp_subvp_policy = 0;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
unsigned int pipe_in_use = 0;
@@ -4284,7 +4189,8 @@ static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_c
return false;
}
-static bool full_update_required(struct dc_surface_update *srf_updates,
+static bool full_update_required(struct dc *dc,
+ struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_update *stream_update,
struct dc_stream_state *stream)
@@ -4292,6 +4198,7 @@ static bool full_update_required(struct dc_surface_update *srf_updates,
int i;
struct dc_stream_status *stream_status;
+ const struct dc_state *context = dc->current_state;
for (i = 0; i < surface_count; i++) {
if (srf_updates &&
@@ -4302,7 +4209,11 @@ static bool full_update_required(struct dc_surface_update *srf_updates,
srf_updates[i].in_transfer_func ||
srf_updates[i].func_shaper ||
srf_updates[i].lut3d_func ||
- srf_updates[i].blend_tf))
+ srf_updates[i].blend_tf ||
+ srf_updates[i].surface->force_full_update ||
+ (srf_updates[i].flip_addr &&
+ srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
+ !is_surface_in_context(context, srf_updates[i].surface)))
return true;
}
@@ -4340,18 +4251,21 @@ static bool full_update_required(struct dc_surface_update *srf_updates,
if (stream_status == NULL || stream_status->plane_count != surface_count)
return true;
}
+ if (dc->idle_optimizations_allowed)
+ return true;
return false;
}
-static bool fast_update_only(struct dc_fast_update *fast_update,
+static bool fast_update_only(struct dc *dc,
+ struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_update *stream_update,
struct dc_stream_state *stream)
{
return fast_updates_exist(fast_update, surface_count)
- && !full_update_required(srf_updates, surface_count, stream_update, stream);
+ && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
bool dc_update_planes_and_stream(struct dc *dc,
@@ -4369,8 +4283,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
- bool force_minimal_pipe_splitting;
- bool is_plane_addition;
+ bool force_minimal_pipe_splitting = 0;
+ bool is_plane_addition = 0;
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
@@ -4423,7 +4337,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
}
update_seamless_boot_flags(dc, context, surface_count, stream);
- if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) &&
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
!dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
@@ -4569,7 +4483,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
update_seamless_boot_flags(dc, context, surface_count, stream);
- if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) &&
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
!dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
@@ -5245,6 +5159,9 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (link->psr_settings.psr_feature_enabled)
return;
+ if (link->replay_settings.replay_feature_enabled)
+ return;
+
/*find primary pipe associated with stream*/
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -5273,3 +5190,70 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
}
+
+/*****************************************************************************
+ * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
+ * ABM
+ * @dc: dc structure
+ * @stream: stream where vsync int state changed
+ * @pData: abm hw states
+ *
+ ****************************************************************************/
+bool dc_abm_save_restore(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct abm_save_restore *pData)
+{
+ int i;
+ int edp_num;
+ struct pipe_ctx *pipe = NULL;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+
+
+ /*find primary pipe associated with stream*/
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg)
+ break;
+ }
+
+ if (i == MAX_PIPES) {
+ ASSERT(0);
+ return false;
+ }
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+
+ /* Determine panel inst */
+ for (i = 0; i < edp_num; i++)
+ if (edp_links[i] == link)
+ break;
+
+ if (i == edp_num)
+ return false;
+
+ if (pipe->stream_res.abm &&
+ pipe->stream_res.abm->funcs->save_restore)
+ return pipe->stream_res.abm->funcs->save_restore(
+ pipe->stream_res.abm,
+ i,
+ pData);
+ return false;
+}
+
+void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
+{
+ unsigned int i;
+ bool subvp_in_use = false;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) {
+ subvp_in_use = true;
+ break;
+ }
+ }
+ properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index cb2bf9a466f5..f99ec1b0efaf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -187,6 +187,7 @@ static bool is_ycbcr709_limited_type(
ret = true;
return ret;
}
+
static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
{
enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 18e098568cb4..ed94187c2afa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -314,6 +314,24 @@ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link)
return link->dc->link_srv->dp_get_verified_link_cap(link);
}
+enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link)
+{
+ if (dc_is_dp_signal(link->connector_signal)) {
+ if (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_DVI_DONGLE &&
+ link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE)
+ return DC_LINK_ENCODING_HDMI_TMDS;
+ else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) ==
+ DP_8b_10b_ENCODING)
+ return DC_LINK_ENCODING_DP_8b_10b;
+ else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) ==
+ DP_128b_132b_ENCODING)
+ return DC_LINK_ENCODING_DP_128b_132b;
+ } else if (dc_is_hdmi_signal(link->connector_signal)) {
+ }
+
+ return DC_LINK_ENCODING_UNSPECIFIED;
+}
+
bool dc_link_is_dp_sink_present(struct dc_link *link)
{
return link->dc->link_srv->dp_is_sink_present(link);
@@ -449,6 +467,11 @@ bool dc_link_setup_psr(struct dc_link *link,
return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
}
+bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state)
+{
+ return link->dc->link_srv->edp_get_replay_state(link, state);
+}
+
bool dc_link_wait_for_t12(struct dc_link *link)
{
return link->dc->link_srv->edp_wait_for_t12(link);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 2f3d9a698486..f7b51aca6020 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -45,6 +45,8 @@
#include "link/hwss/link_hwss_dio.h"
#include "link/hwss/link_hwss_dpia.h"
#include "link/hwss/link_hwss_hpo_dp.h"
+#include "link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h"
+#include "link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h"
#if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/dce60_resource.h"
@@ -69,9 +71,20 @@
#include "../dcn32/dcn32_resource.h"
#include "../dcn321/dcn321_resource.h"
+#define VISUAL_CONFIRM_BASE_DEFAULT 3
+#define VISUAL_CONFIRM_BASE_MIN 1
+#define VISUAL_CONFIRM_BASE_MAX 10
+/* we choose 240 because it is a common denominator of common v addressable
+ * such as 2160, 1440, 1200, 960. So we take 1/240 portion of v addressable as
+ * the visual confirm dpp offset height. So visual confirm height can stay
+ * relatively the same independent from timing used.
+ */
+#define VISUAL_CONFIRM_DPP_OFFSET_DENO 240
#define DC_LOGGER_INIT(logger)
+#define UNABLE_TO_SPLIT -1
+
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
{
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -719,10 +732,10 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
-int get_num_mpc_splits(struct pipe_ctx *pipe)
+int resource_get_num_mpc_splits(const struct pipe_ctx *pipe)
{
int mpc_split_count = 0;
- struct pipe_ctx *other_pipe = pipe->bottom_pipe;
+ const struct pipe_ctx *other_pipe = pipe->bottom_pipe;
while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
mpc_split_count++;
@@ -737,48 +750,46 @@ int get_num_mpc_splits(struct pipe_ctx *pipe)
return mpc_split_count;
}
-int get_num_odm_splits(struct pipe_ctx *pipe)
+int resource_get_num_odm_splits(const struct pipe_ctx *pipe)
{
int odm_split_count = 0;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
- while (next_pipe) {
- odm_split_count++;
- next_pipe = next_pipe->next_odm_pipe;
- }
- pipe = pipe->prev_odm_pipe;
- while (pipe) {
+
+ pipe = resource_get_otg_master(pipe);
+
+ while (pipe->next_odm_pipe) {
odm_split_count++;
- pipe = pipe->prev_odm_pipe;
+ pipe = pipe->next_odm_pipe;
}
return odm_split_count;
}
-static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
+static int get_odm_split_index(struct pipe_ctx *pipe_ctx)
{
- *split_count = get_num_odm_splits(pipe_ctx);
- *split_idx = 0;
- if (*split_count == 0) {
- /*Check for mpc split*/
- struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
-
- *split_count = get_num_mpc_splits(pipe_ctx);
- while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
- (*split_idx)++;
- split_pipe = split_pipe->top_pipe;
- }
+ int index = 0;
- /* MPO window on right side of ODM split */
- if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe)
- (*split_idx)++;
- } else {
- /*Get odm split index*/
- struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
+ pipe_ctx = resource_get_opp_head(pipe_ctx);
+ if (!pipe_ctx)
+ return 0;
- while (split_pipe) {
- (*split_idx)++;
- split_pipe = split_pipe->prev_odm_pipe;
- }
+ while (pipe_ctx->prev_odm_pipe) {
+ index++;
+ pipe_ctx = pipe_ctx->prev_odm_pipe;
+ }
+
+ return index;
+}
+
+static int get_mpc_split_index(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+ int index = 0;
+
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ index++;
+ split_pipe = split_pipe->top_pipe;
}
+
+ return index;
}
/*
@@ -800,82 +811,366 @@ static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
}
}
-static void calculate_recout(struct pipe_ctx *pipe_ctx)
+static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
{
- const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- const struct dc_stream_state *stream = pipe_ctx->stream;
- struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
- struct rect surf_clip = plane_state->clip_rect;
- bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
- int split_count, split_idx;
+ struct rect rec;
+ int r0_x_end = r0->x + r0->width;
+ int r1_x_end = r1->x + r1->width;
+ int r0_y_end = r0->y + r0->height;
+ int r1_y_end = r1->y + r1->height;
+
+ rec.x = r0->x > r1->x ? r0->x : r1->x;
+ rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x;
+ rec.y = r0->y > r1->y ? r0->y : r1->y;
+ rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y;
+
+ /* in case that there is no intersection */
+ if (rec.width < 0 || rec.height < 0)
+ memset(&rec, 0, sizeof(rec));
+
+ return rec;
+}
- calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
- if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
- split_idx = 0;
+static struct rect shift_rec(const struct rect *rec_in, int x, int y)
+{
+ struct rect rec_out = *rec_in;
+ rec_out.x += x;
+ rec_out.y += y;
+
+ return rec_out;
+}
+
+static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ int odm_slice_count = resource_get_num_odm_splits(pipe_ctx) + 1;
+ int odm_slice_idx = get_odm_split_index(pipe_ctx);
+ bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count;
+ int h_active = stream->timing.h_addressable +
+ stream->timing.h_border_left +
+ stream->timing.h_border_right;
+ int odm_slice_width = h_active / odm_slice_count;
+ struct rect odm_rec;
+
+ odm_rec.x = odm_slice_width * odm_slice_idx;
+ odm_rec.width = is_last_odm_slice ?
+ /* last slice width is the reminder of h_active */
+ h_active - odm_slice_width * (odm_slice_count - 1) :
+ /* odm slice width is the floor of h_active / count */
+ odm_slice_width;
+ odm_rec.y = 0;
+ odm_rec.height = stream->timing.v_addressable +
+ stream->timing.v_border_bottom +
+ stream->timing.v_border_top;
+
+ return odm_rec;
+}
+
+static struct rect calculate_plane_rec_in_timing_active(
+ struct pipe_ctx *pipe_ctx,
+ const struct rect *rec_in)
+{
/*
- * Only the leftmost ODM pipe should be offset by a nonzero distance
+ * The following diagram shows an example where we map a 1920x1200
+ * desktop to a 2560x1440 timing with a plane rect in the middle
+ * of the screen. To map a plane rect from Stream Source to Timing
+ * Active space, we first multiply stream scaling ratios (i.e 2304/1920
+ * horizontal and 1440/1200 vertical) to the plane's x and y, then
+ * we add stream destination offsets (i.e 128 horizontal, 0 vertical).
+ * This will give us a plane rect's position in Timing Active. However
+ * we have to remove the fractional. The rule is that we find left/right
+ * and top/bottom positions and round the value to the adjacent integer.
+ *
+ * Stream Source Space
+ * ------------
+ * __________________________________________________
+ * |Stream Source (1920 x 1200) ^ |
+ * | y |
+ * | <------- w --------|> |
+ * | __________________V |
+ * |<-- x -->|Plane//////////////| ^ |
+ * | |(pre scale)////////| | |
+ * | |///////////////////| | |
+ * | |///////////////////| h |
+ * | |///////////////////| | |
+ * | |///////////////////| | |
+ * | |///////////////////| V |
+ * | |
+ * | |
+ * |__________________________________________________|
+ *
+ *
+ * Timing Active Space
+ * ---------------------------------
+ *
+ * Timing Active (2560 x 1440)
+ * __________________________________________________
+ * |*****| Stteam Destination (2304 x 1440) |*****|
+ * |*****| |*****|
+ * |<128>| |*****|
+ * |*****| __________________ |*****|
+ * |*****| |Plane/////////////| |*****|
+ * |*****| |(post scale)//////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |*****|
+ * |*****| |*****|
+ * |*****| |*****|
+ * |*****|______________________________________|*****|
+ *
+ * So the resulting formulas are shown below:
+ *
+ * recout_x = 128 + round(plane_x * 2304 / 1920)
+ * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x
+ * recout_y = 0 + round(plane_y * 1440 / 1280)
+ * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y
+ *
+ * NOTE: fixed point division is not error free. To reduce errors
+ * introduced by fixed point division, we divide only after
+ * multiplication is complete.
*/
- if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) {
- /* MPO window on right side of ODM split */
- data->recout.x = stream->dst.x + (surf_clip.x - stream->src.x - stream->src.width/2) *
- stream->dst.width / stream->src.width;
- } else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
- data->recout.x = stream->dst.x;
- if (stream->src.x < surf_clip.x)
- data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
- / stream->src.width;
- } else
- data->recout.x = 0;
-
- if (stream->src.x > surf_clip.x)
- surf_clip.width -= stream->src.x - surf_clip.x;
- data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
- if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
- data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
-
- data->recout.y = stream->dst.y;
- if (stream->src.y < surf_clip.y)
- data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
- / stream->src.height;
- else if (stream->src.y > surf_clip.y)
- surf_clip.height -= stream->src.y - surf_clip.y;
-
- data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
- if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
- data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
-
- /* Handle h & v split */
- if (split_tb) {
- ASSERT(data->recout.height % 2 == 0);
- data->recout.height /= 2;
- } else if (split_count) {
- if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
- /* extra pixels in the division remainder need to go to pipes after
- * the extra pixel index minus one(epimo) defined here as:
- */
- int epimo = split_count - data->recout.width % (split_count + 1);
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect rec_out = {0};
+ struct fixed31_32 temp;
- data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
- if (split_idx > epimo)
- data->recout.x += split_idx - epimo - 1;
- ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0);
- data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
- } else {
- /* odm */
- if (split_idx == split_count) {
- /* rightmost pipe is the remainder recout */
- data->recout.width -= data->h_active * split_count - data->recout.x;
-
- /* ODM combine cases with MPO we can get negative widths */
- if (data->recout.width < 0)
- data->recout.width = 0;
-
- data->recout.x = 0;
- } else
- data->recout.width = data->h_active - data->recout.x;
- }
+ temp = dc_fixpt_from_fraction(rec_in->x * stream->dst.width,
+ stream->src.width);
+ rec_out.x = stream->dst.x + dc_fixpt_round(temp);
+
+ temp = dc_fixpt_from_fraction(
+ (rec_in->x + rec_in->width) * stream->dst.width,
+ stream->src.width);
+ rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x;
+
+ temp = dc_fixpt_from_fraction(rec_in->y * stream->dst.height,
+ stream->src.height);
+ rec_out.y = stream->dst.y + dc_fixpt_round(temp);
+
+ temp = dc_fixpt_from_fraction(
+ (rec_in->y + rec_in->height) * stream->dst.height,
+ stream->src.height);
+ rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y;
+
+ return rec_out;
+}
+
+static struct rect calculate_mpc_slice_in_timing_active(
+ struct pipe_ctx *pipe_ctx,
+ struct rect *plane_clip_rec)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ int mpc_slice_count = resource_get_num_mpc_splits(pipe_ctx) + 1;
+ int mpc_slice_idx = get_mpc_split_index(pipe_ctx);
+ int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
+ struct rect mpc_rec;
+
+ mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
+ mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ ASSERT(mpc_slice_count == 1 ||
+ stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ mpc_rec.width % 2 == 0);
+
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ if (mpc_slice_idx > epimo) {
+ mpc_rec.x += mpc_slice_idx - epimo - 1;
+ mpc_rec.width += 1;
+ }
+
+ if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+ ASSERT(mpc_rec.height % 2 == 0);
+ mpc_rec.height /= 2;
+ }
+ return mpc_rec;
+}
+
+static void adjust_recout_for_visual_confirm(struct rect *recout,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ int dpp_offset, base_offset;
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
+ return;
+
+ dpp_offset = pipe_ctx->stream->timing.v_addressable / VISUAL_CONFIRM_DPP_OFFSET_DENO;
+ dpp_offset *= pipe_ctx->plane_res.dpp->inst;
+
+ if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) &&
+ dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX)
+ base_offset = dc->debug.visual_confirm_rect_height;
+ else
+ base_offset = VISUAL_CONFIRM_BASE_DEFAULT;
+
+ recout->height -= base_offset;
+ recout->height -= dpp_offset;
+}
+
+/*
+ * The function maps a plane clip from Stream Source Space to ODM Slice Space
+ * and calculates the rec of the overlapping area of MPC slice of the plane
+ * clip, ODM slice associated with the pipe context and stream destination rec.
+ */
+static void calculate_recout(struct pipe_ctx *pipe_ctx)
+{
+ /*
+ * A plane clip represents the desired plane size and position in Stream
+ * Source Space. Stream Source is the destination where all planes are
+ * blended (i.e. positioned, scaled and overlaid). It is a canvas where
+ * all planes associated with the current stream are drawn together.
+ * After Stream Source is completed, we will further scale and
+ * reposition the entire canvas of the stream source to Stream
+ * Destination in Timing Active Space. This could be due to display
+ * overscan adjustment where we will need to rescale and reposition all
+ * the planes so they can fit into a TV with overscan or downscale
+ * upscale features such as GPU scaling or VSR.
+ *
+ * This two step blending is a virtual procedure in software. In
+ * hardware there is no such thing as Stream Source. all planes are
+ * blended once in Timing Active Space. Software virtualizes a Stream
+ * Source space to decouple the math complicity so scaling param
+ * calculation focuses on one step at a time.
+ *
+ * In the following two diagrams, user applied 10% overscan adjustment
+ * so the Stream Source needs to be scaled down a little before mapping
+ * to Timing Active Space. As a result the Plane Clip is also scaled
+ * down by the same ratio, Plane Clip position (i.e. x and y) with
+ * respect to Stream Source is also scaled down. To map it in Timing
+ * Active Space additional x and y offsets from Stream Destination are
+ * added to Plane Clip as well.
+ *
+ * Stream Source Space
+ * ------------
+ * __________________________________________________
+ * |Stream Source (3840 x 2160) ^ |
+ * | y |
+ * | | |
+ * | __________________V |
+ * |<-- x -->|Plane Clip/////////| |
+ * | |(pre scale)////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |
+ * | |
+ * |__________________________________________________|
+ *
+ *
+ * Timing Active Space (3840 x 2160)
+ * ---------------------------------
+ *
+ * Timing Active
+ * __________________________________________________
+ * | y_____________________________________________ |
+ * |x |Stream Destination (3456 x 1944) | |
+ * | | | |
+ * | | __________________ | |
+ * | | |Plane Clip////////| | |
+ * | | |(post scale)//////| | |
+ * | | |//////////////////| | |
+ * | | |//////////////////| | |
+ * | | |//////////////////| | |
+ * | | |//////////////////| | |
+ * | | | |
+ * | | | |
+ * | |____________________________________________| |
+ * |__________________________________________________|
+ *
+ *
+ * In Timing Active Space a plane clip could be further sliced into
+ * pieces called MPC slices. Each Pipe Context is responsible for
+ * processing only one MPC slice so the plane processing workload can be
+ * distributed to multiple DPP Pipes. MPC slices could be blended
+ * together to a single ODM slice. Each ODM slice is responsible for
+ * processing a portion of Timing Active divided horizontally so the
+ * output pixel processing workload can be distributed to multiple OPP
+ * pipes. All ODM slices are mapped together in ODM block so all MPC
+ * slices belong to different ODM slices could be pieced together to
+ * form a single image in Timing Active. MPC slices must belong to
+ * single ODM slice. If an MPC slice goes across ODM slice boundary, it
+ * needs to be divided into two MPC slices one for each ODM slice.
+ *
+ * In the following diagram the output pixel processing workload is
+ * divided horizontally into two ODM slices one for each OPP blend tree.
+ * OPP0 blend tree is responsible for processing left half of Timing
+ * Active, while OPP2 blend tree is responsible for processing right
+ * half.
+ *
+ * The plane has two MPC slices. However since the right MPC slice goes
+ * across ODM boundary, two DPP pipes are needed one for each OPP blend
+ * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree).
+ *
+ * Assuming that we have a Pipe Context associated with OPP0 and DPP1
+ * working on processing the plane in the diagram. We want to know the
+ * width and height of the shaded rectangle and its relative position
+ * with respect to the ODM slice0. This is called the recout of the pipe
+ * context.
+ *
+ * Planes can be at arbitrary size and position and there could be an
+ * arbitrary number of MPC and ODM slices. The algorithm needs to take
+ * all scenarios into account.
+ *
+ * Timing Active Space (3840 x 2160)
+ * ---------------------------------
+ *
+ * Timing Active
+ * __________________________________________________
+ * |OPP0(ODM slice0)^ |OPP2(ODM slice1) |
+ * | y | |
+ * | | <- w -> |
+ * | _____V________|____ |
+ * | |DPP0 ^ |DPP1 |DPP2| |
+ * |<------ x |-----|->|/////| | |
+ * | | | |/////| | |
+ * | | h |/////| | |
+ * | | | |/////| | |
+ * | |_____V__|/////|____| |
+ * | | |
+ * | | |
+ * | | |
+ * |_________________________|________________________|
+ *
+ *
+ */
+ struct rect plane_clip;
+ struct rect mpc_slice_of_plane_clip;
+ struct rect odm_slice;
+ struct rect overlapping_area;
+
+ plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx,
+ &pipe_ctx->plane_state->clip_rect);
+ /* guard plane clip from drawing beyond stream dst here */
+ plane_clip = intersect_rec(&plane_clip,
+ &pipe_ctx->stream->dst);
+ mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active(
+ pipe_ctx, &plane_clip);
+ odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx);
+ overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice);
+ if (overlapping_area.height > 0 &&
+ overlapping_area.width > 0) {
+ /* shift the overlapping area so it is with respect to current
+ * ODM slice's position
+ */
+ pipe_ctx->plane_res.scl_data.recout = shift_rec(
+ &overlapping_area,
+ -odm_slice.x, -odm_slice.y);
+ adjust_recout_for_visual_confirm(
+ &pipe_ctx->plane_res.scl_data.recout,
+ pipe_ctx);
+ } else {
+ /* if there is no overlap, zero recout */
+ memset(&pipe_ctx->plane_res.scl_data.recout, 0,
+ sizeof(struct rect));
}
+
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -997,33 +1292,30 @@ static void calculate_init_and_vp(
static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- const struct dc_stream_state *stream = pipe_ctx->stream;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = plane_state->src_rect;
+ struct rect recout_dst_in_active_timing;
+ struct rect recout_clip_in_active_timing;
+ struct rect recout_clip_in_recout_dst;
+ struct rect overlap_in_active_timing;
+ struct rect odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx);
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
- /*
- * recout full is what the recout would have been if we didnt clip
- * the source plane at all. We only care about left(ro_lb) and top(ro_tb)
- * offsets of recout within recout full because those are the directions
- * we scan from and therefore the only ones that affect inits.
- */
- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
- * stream->dst.width / stream->src.width;
- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
- * stream->dst.height / stream->src.height;
- if (pipe_ctx->prev_odm_pipe && split_idx)
- ro_lb = data->h_active * split_idx - recout_full_x;
- else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe)
- ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x;
+ recout_clip_in_active_timing = shift_rec(
+ &data->recout, odm_slice.x, odm_slice.y);
+ recout_dst_in_active_timing = calculate_plane_rec_in_timing_active(
+ pipe_ctx, &plane_state->dst_rect);
+ overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing,
+ &recout_dst_in_active_timing);
+ if (overlap_in_active_timing.width > 0 &&
+ overlap_in_active_timing.height > 0)
+ recout_clip_in_recout_dst = shift_rec(&overlap_in_active_timing,
+ -recout_dst_in_active_timing.x,
+ -recout_dst_in_active_timing.y);
else
- ro_lb = data->recout.x - recout_full_x;
- ro_tb = data->recout.y - recout_full_y;
- ASSERT(ro_lb >= 0 && ro_tb >= 0);
+ memset(&recout_clip_in_recout_dst, 0, sizeof(struct rect));
/*
* Work in recout rotation since that requires less transformations
@@ -1042,7 +1334,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
calculate_init_and_vp(
flip_horz_scan_dir,
- ro_lb,
+ recout_clip_in_recout_dst.x,
data->recout.width,
src.width,
data->taps.h_taps,
@@ -1052,7 +1344,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
&data->viewport.width);
calculate_init_and_vp(
flip_horz_scan_dir,
- ro_lb,
+ recout_clip_in_recout_dst.x,
data->recout.width,
src.width / vpc_div,
data->taps.h_taps_c,
@@ -1062,7 +1354,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
&data->viewport_c.width);
calculate_init_and_vp(
flip_vert_scan_dir,
- ro_tb,
+ recout_clip_in_recout_dst.y,
data->recout.height,
src.height,
data->taps.v_taps,
@@ -1072,7 +1364,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
&data->viewport.height);
calculate_init_and_vp(
flip_vert_scan_dir,
- ro_tb,
+ recout_clip_in_recout_dst.y,
data->recout.height,
src.height / vpc_div,
data->taps.v_taps_c,
@@ -1097,6 +1389,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx);
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -1121,30 +1414,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->dst.y += timing->v_border_top;
/* Calculate H and V active size */
- pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
- timing->h_border_left + timing->h_border_right;
- pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
- timing->v_border_top + timing->v_border_bottom;
- if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) {
- pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
-
- DC_LOG_SCALER("%s pipe %d: next_odm_pipe:%d prev_odm_pipe:%d\n",
- __func__,
- pipe_ctx->pipe_idx,
- pipe_ctx->next_odm_pipe ? pipe_ctx->next_odm_pipe->pipe_idx : -1,
- pipe_ctx->prev_odm_pipe ? pipe_ctx->prev_odm_pipe->pipe_idx : -1);
- } /* ODM + windows MPO, where window is on either right or left ODM half */
- else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) {
-
- pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1;
-
- DC_LOG_SCALER("%s ODM + windows MPO: pipe:%d top_pipe:%d top_pipe->next_odm_pipe:%d top_pipe->prev_odm_pipe:%d\n",
- __func__,
- pipe_ctx->pipe_idx,
- pipe_ctx->top_pipe->pipe_idx,
- pipe_ctx->top_pipe->next_odm_pipe ? pipe_ctx->top_pipe->next_odm_pipe->pipe_idx : -1,
- pipe_ctx->top_pipe->prev_odm_pipe ? pipe_ctx->top_pipe->prev_odm_pipe->pipe_idx : -1);
- }
+ pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width;
+ pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height;
+
/* depends on h_active */
calculate_recout(pipe_ctx);
/* depends on pixel format */
@@ -1226,17 +1498,12 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
}
- if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) {
- if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
- pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
- res = false;
- } else {
- /* Clamp minimum viewport size */
- if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE)
- pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE;
- if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
- pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE;
- }
+ /* Clamp minimum viewport size */
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE;
+ if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE;
+
DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n"
"src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n",
@@ -1288,7 +1555,7 @@ enum dc_status resource_build_scaling_params_for_context(
return DC_OK;
}
-struct pipe_ctx *find_idle_secondary_pipe(
+struct pipe_ctx *resource_find_free_secondary_pipe_legacy(
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
@@ -1348,73 +1615,182 @@ struct pipe_ctx *find_idle_secondary_pipe(
return secondary_pipe;
}
-struct pipe_ctx *resource_get_head_pipe_for_stream(
- struct resource_context *res_ctx,
- struct dc_stream_state *stream)
+int resource_find_free_pipe_used_in_cur_mpc_blending_tree(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct pipe_ctx *cur_opp_head)
{
+ const struct pipe_ctx *cur_sec_dpp = cur_opp_head->bottom_pipe;
+ struct pipe_ctx *new_pipe;
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+
+ while (cur_sec_dpp) {
+ /* find a free pipe used in current opp blend tree,
+ * this is to avoid MPO pipe switching to different opp blending
+ * tree
+ */
+ new_pipe = &new_res_ctx->pipe_ctx[cur_sec_dpp->pipe_idx];
+ if (resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = cur_sec_dpp->pipe_idx;
+ break;
+ }
+ cur_sec_dpp = cur_sec_dpp->bottom_pipe;
+ }
+
+ return free_pipe_idx;
+}
+
+int recource_find_free_pipe_not_used_in_cur_res_ctx(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
+{
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe, *cur_pipe;
int i;
- for (i = 0; i < MAX_PIPES; i++) {
- if (res_ctx->pipe_ctx[i].stream == stream
- && !res_ctx->pipe_ctx[i].top_pipe
- && !res_ctx->pipe_ctx[i].prev_odm_pipe)
- return &res_ctx->pipe_ctx[i];
+ for (i = 0; i < pool->pipe_count; i++) {
+ cur_pipe = &cur_res_ctx->pipe_ctx[i];
+ new_pipe = &new_res_ctx->pipe_ctx[i];
+
+ if (resource_is_pipe_type(cur_pipe, FREE_PIPE) &&
+ resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
+ }
}
- return NULL;
+
+ return free_pipe_idx;
}
-static struct pipe_ctx *resource_get_tail_pipe(
- struct resource_context *res_ctx,
- struct pipe_ctx *head_pipe)
+int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
{
- struct pipe_ctx *tail_pipe;
-
- tail_pipe = head_pipe->bottom_pipe;
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe, *cur_pipe;
+ int i;
- while (tail_pipe) {
- head_pipe = tail_pipe;
- tail_pipe = tail_pipe->bottom_pipe;
+ for (i = 0; i < pool->pipe_count; i++) {
+ cur_pipe = &cur_res_ctx->pipe_ctx[i];
+ new_pipe = &new_res_ctx->pipe_ctx[i];
+
+ if (resource_is_pipe_type(cur_pipe, DPP_PIPE) &&
+ !resource_is_pipe_type(cur_pipe, OPP_HEAD) &&
+ resource_is_for_mpcc_combine(cur_pipe) &&
+ resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
+ }
}
- return head_pipe;
+ return free_pipe_idx;
}
-/*
- * A free_pipe for a stream is defined here as a pipe
- * that has no surface attached yet
- */
-static struct pipe_ctx *acquire_free_pipe_for_head(
- struct dc_state *context,
- const struct resource_pool *pool,
- struct pipe_ctx *head_pipe)
+int resource_find_any_free_pipe(struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
{
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe;
int i;
- struct resource_context *res_ctx = &context->res_ctx;
- if (!head_pipe->plane_state)
- return head_pipe;
+ for (i = 0; i < pool->pipe_count; i++) {
+ new_pipe = &new_res_ctx->pipe_ctx[i];
- /* Re-use pipe already acquired for this stream if available*/
- for (i = pool->pipe_count - 1; i >= 0; i--) {
- if (res_ctx->pipe_ctx[i].stream == head_pipe->stream &&
- !res_ctx->pipe_ctx[i].plane_state) {
- return &res_ctx->pipe_ctx[i];
+ if (resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
}
}
- /*
- * At this point we have no re-useable pipe for this stream and we need
- * to acquire an idle one to satisfy the request
+ return free_pipe_idx;
+}
+
+bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type)
+{
+#ifdef DBG
+ if (pipe_ctx->stream == NULL) {
+ /* a free pipe with dangling states */
+ ASSERT(!pipe_ctx->plane_state);
+ ASSERT(!pipe_ctx->prev_odm_pipe);
+ ASSERT(!pipe_ctx->next_odm_pipe);
+ ASSERT(!pipe_ctx->top_pipe);
+ ASSERT(!pipe_ctx->bottom_pipe);
+ } else if (pipe_ctx->top_pipe) {
+ /* a secondary DPP pipe must be signed to a plane */
+ ASSERT(pipe_ctx->plane_state)
+ }
+ /* Add more checks here to prevent corrupted pipe ctx. It is very hard
+ * to debug this issue afterwards because we can't pinpoint the code
+ * location causing inconsistent pipe context states.
*/
+#endif
+ switch (type) {
+ case OTG_MASTER:
+ return !pipe_ctx->prev_odm_pipe &&
+ !pipe_ctx->top_pipe &&
+ pipe_ctx->stream;
+ case OPP_HEAD:
+ return !pipe_ctx->top_pipe && pipe_ctx->stream;
+ case DPP_PIPE:
+ return pipe_ctx->plane_state && pipe_ctx->stream;
+ case FREE_PIPE:
+ return !pipe_ctx->plane_state && !pipe_ctx->stream;
+ default:
+ return false;
+ }
+}
- if (!pool->funcs->acquire_idle_pipe_for_layer) {
- if (!pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer)
- return NULL;
- else
- return pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer(context, pool, head_pipe->stream, head_pipe);
+bool resource_is_for_mpcc_combine(const struct pipe_ctx *pipe_ctx)
+{
+ return resource_get_num_mpc_splits(pipe_ctx) > 0;
+}
+
+struct pipe_ctx *resource_get_otg_master_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream == stream &&
+ resource_is_pipe_type(&res_ctx->pipe_ctx[i], OTG_MASTER))
+ return &res_ctx->pipe_ctx[i];
+ }
+ return NULL;
+}
+
+struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *otg_master = resource_get_opp_head(pipe_ctx);
+
+ while (otg_master->prev_odm_pipe)
+ otg_master = otg_master->prev_odm_pipe;
+ return otg_master;
+}
+
+struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *opp_head = (struct pipe_ctx *) pipe_ctx;
+
+ ASSERT(!resource_is_pipe_type(opp_head, FREE_PIPE));
+ while (opp_head->top_pipe)
+ opp_head = opp_head->top_pipe;
+ return opp_head;
+}
+
+static struct pipe_ctx *get_tail_pipe(
+ struct pipe_ctx *head_pipe)
+{
+ struct pipe_ctx *tail_pipe = head_pipe->bottom_pipe;
+
+ while (tail_pipe) {
+ head_pipe = tail_pipe;
+ tail_pipe = tail_pipe->bottom_pipe;
}
- return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);
+ return head_pipe;
}
static int acquire_first_split_pipe(
@@ -1447,275 +1823,126 @@ static int acquire_first_split_pipe(
split_pipe->stream = stream;
return i;
- } else if (split_pipe->prev_odm_pipe &&
- split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
- split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
- if (split_pipe->next_odm_pipe)
- split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
-
- if (split_pipe->prev_odm_pipe->plane_state)
- resource_build_scaling_params(split_pipe->prev_odm_pipe);
-
- memset(split_pipe, 0, sizeof(*split_pipe));
- split_pipe->stream_res.tg = pool->timing_generators[i];
- split_pipe->plane_res.hubp = pool->hubps[i];
- split_pipe->plane_res.ipp = pool->ipps[i];
- split_pipe->plane_res.dpp = pool->dpps[i];
- split_pipe->stream_res.opp = pool->opps[i];
- split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
- split_pipe->pipe_idx = i;
-
- split_pipe->stream = stream;
- return i;
}
}
- return -1;
+ return UNABLE_TO_SPLIT;
}
-bool dc_add_plane_to_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
+static bool add_plane_to_opp_head_pipes(struct pipe_ctx *otg_master_pipe,
struct dc_plane_state *plane_state,
struct dc_state *context)
{
- int i;
- struct resource_pool *pool = dc->res_pool;
- struct pipe_ctx *head_pipe, *tail_pipe, *free_pipe;
- struct dc_stream_status *stream_status = NULL;
- struct pipe_ctx *prev_right_head = NULL;
- struct pipe_ctx *free_right_pipe = NULL;
- struct pipe_ctx *prev_left_head = NULL;
+ struct pipe_ctx *opp_head_pipe = otg_master_pipe;
- DC_LOGGER_INIT(stream->ctx->logger);
- for (i = 0; i < context->stream_count; i++)
- if (context->streams[i] == stream) {
- stream_status = &context->stream_status[i];
- break;
+ while (opp_head_pipe) {
+ if (opp_head_pipe->plane_state) {
+ ASSERT(0);
+ return false;
}
- if (stream_status == NULL) {
- dm_error("Existing stream not found; failed to attach surface!\n");
- return false;
+ opp_head_pipe->plane_state = plane_state;
+ opp_head_pipe = opp_head_pipe->next_odm_pipe;
}
+ return true;
+}
- if (stream_status->plane_count == MAX_SURFACE_NUM) {
- dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
- return false;
+static void insert_secondary_dpp_pipe_with_plane(struct pipe_ctx *opp_head_pipe,
+ struct pipe_ctx *sec_pipe, struct dc_plane_state *plane_state)
+{
+ struct pipe_ctx *tail_pipe = get_tail_pipe(opp_head_pipe);
+
+ tail_pipe->bottom_pipe = sec_pipe;
+ sec_pipe->top_pipe = tail_pipe;
+ if (tail_pipe->prev_odm_pipe) {
+ ASSERT(tail_pipe->prev_odm_pipe->bottom_pipe);
+ sec_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
+ tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = sec_pipe;
}
+ sec_pipe->plane_state = plane_state;
+}
- head_pipe = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+/* for each opp head pipe of an otg master pipe, acquire a secondary dpp pipe
+ * and add the plane. So the plane is added to all MPC blend trees associated
+ * with the otg master pipe.
+ */
+static bool acquire_secondary_dpp_pipes_and_add_plane(
+ struct pipe_ctx *otg_master_pipe,
+ struct dc_plane_state *plane_state,
+ struct dc_state *new_ctx,
+ struct dc_state *cur_ctx,
+ struct resource_pool *pool)
+{
+ struct pipe_ctx *opp_head_pipe, *sec_pipe;
- if (!head_pipe) {
- dm_error("Head pipe not found for stream_state %p !\n", stream);
+ if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe)
return false;
- }
- /* retain new surface, but only once per stream */
- dc_plane_state_retain(plane_state);
-
- while (head_pipe) {
- free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
+ opp_head_pipe = otg_master_pipe;
+ while (opp_head_pipe) {
+ sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe(
+ cur_ctx,
+ new_ctx,
+ pool,
+ opp_head_pipe);
+ if (!sec_pipe) {
+ /* try tearing down MPCC combine */
+ int pipe_idx = acquire_first_split_pipe(
+ &new_ctx->res_ctx, pool,
+ otg_master_pipe->stream);
- if (!free_pipe) {
- int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
if (pipe_idx >= 0)
- free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
+ sec_pipe = &new_ctx->res_ctx.pipe_ctx[pipe_idx];
}
- if (!free_pipe) {
- dc_plane_state_release(plane_state);
+ if (!sec_pipe)
return false;
- }
-
- free_pipe->plane_state = plane_state;
-
- if (head_pipe != free_pipe) {
- tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
- ASSERT(tail_pipe);
-
- /* ODM + window MPO, where MPO window is on right half only */
- if (free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2) &&
- tail_pipe->next_odm_pipe) {
-
- /* For ODM + window MPO, in 3 plane case, if we already have a MPO window on
- * the right side, then we will invalidate a 2nd one on the right side
- */
- if (head_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
- dc_plane_state_release(plane_state);
- return false;
- }
-
- DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d tail_pipe->next_odm_pipe:%d\n",
- __func__,
- free_pipe->pipe_idx,
- tail_pipe->next_odm_pipe ? tail_pipe->next_odm_pipe->pipe_idx : -1);
-
- /*
- * We want to avoid the case where the right side already has a pipe assigned to
- * it and is different from free_pipe ( which would cause trigger a pipe
- * reallocation ).
- * Check the old context to see if the right side already has a pipe allocated
- * - If not, continue to use free_pipe
- * - If the right side already has a pipe, use that pipe instead if its available
- */
-
- /*
- * We also want to avoid the case where with three plane ( 2 MPO videos ), we have
- * both videos on the left side so one of the videos is invalidated. Then we
- * move the invalidated video back to the right side. If the order of the plane
- * states is such that the right MPO plane is processed first, the free pipe
- * selected by the head will be the left MPO pipe. But since there was no right
- * MPO pipe, it will assign the free pipe to the right MPO pipe instead and
- * a pipe reallocation will occur.
- * Check the old context to see if the left side already has a pipe allocated
- * - If not, continue to use free_pipe
- * - If the left side is already using this pipe, then pick another pipe for right
- */
-
- prev_right_head = &dc->current_state->res_ctx.pipe_ctx[tail_pipe->next_odm_pipe->pipe_idx];
- if ((prev_right_head->bottom_pipe) &&
- (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) {
- free_right_pipe = acquire_free_pipe_for_head(context, pool, tail_pipe->next_odm_pipe);
- } else {
- prev_left_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->pipe_idx];
- if ((prev_left_head->bottom_pipe) &&
- (free_pipe->pipe_idx == prev_left_head->bottom_pipe->pipe_idx)) {
- free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
- }
- }
-
- if (free_right_pipe) {
- free_pipe->stream = NULL;
- memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource));
- memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource));
- free_pipe->plane_state = NULL;
- free_pipe->pipe_idx = 0;
- free_right_pipe->plane_state = plane_state;
- free_pipe = free_right_pipe;
- }
-
- free_pipe->stream_res.tg = tail_pipe->next_odm_pipe->stream_res.tg;
- free_pipe->stream_res.abm = tail_pipe->next_odm_pipe->stream_res.abm;
- free_pipe->stream_res.opp = tail_pipe->next_odm_pipe->stream_res.opp;
- free_pipe->stream_res.stream_enc = tail_pipe->next_odm_pipe->stream_res.stream_enc;
- free_pipe->stream_res.audio = tail_pipe->next_odm_pipe->stream_res.audio;
- free_pipe->clock_source = tail_pipe->next_odm_pipe->clock_source;
-
- free_pipe->top_pipe = tail_pipe->next_odm_pipe;
- tail_pipe->next_odm_pipe->bottom_pipe = free_pipe;
- } else if (free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)
- && head_pipe->next_odm_pipe) {
-
- /* For ODM + window MPO, support 3 plane ( 2 MPO ) case.
- * Here we have a desktop ODM + left window MPO and a new MPO window appears
- * on the right side only. It fails the first case, because tail_pipe is the
- * left window MPO, so it has no next_odm_pipe. So in this scenario, we check
- * for head_pipe->next_odm_pipe instead
- */
- DC_LOG_SCALER("%s - ODM + win MPO (left) + win MPO (right). free_pipe:%d head_pipe->next_odm:%d\n",
- __func__,
- free_pipe->pipe_idx,
- head_pipe->next_odm_pipe ? head_pipe->next_odm_pipe->pipe_idx : -1);
-
- /*
- * We want to avoid the case where the right side already has a pipe assigned to
- * it and is different from free_pipe ( which would cause trigger a pipe
- * reallocation ).
- * Check the old context to see if the right side already has a pipe allocated
- * - If not, continue to use free_pipe
- * - If the right side already has a pipe, use that pipe instead if its available
- */
- prev_right_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->next_odm_pipe->pipe_idx];
- if ((prev_right_head->bottom_pipe) &&
- (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) {
- free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe->next_odm_pipe);
- if (free_right_pipe) {
- free_pipe->stream = NULL;
- memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource));
- memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource));
- free_pipe->plane_state = NULL;
- free_pipe->pipe_idx = 0;
- free_right_pipe->plane_state = plane_state;
- free_pipe = free_right_pipe;
- }
- }
- free_pipe->stream_res.tg = head_pipe->next_odm_pipe->stream_res.tg;
- free_pipe->stream_res.abm = head_pipe->next_odm_pipe->stream_res.abm;
- free_pipe->stream_res.opp = head_pipe->next_odm_pipe->stream_res.opp;
- free_pipe->stream_res.stream_enc = head_pipe->next_odm_pipe->stream_res.stream_enc;
- free_pipe->stream_res.audio = head_pipe->next_odm_pipe->stream_res.audio;
- free_pipe->clock_source = head_pipe->next_odm_pipe->clock_source;
-
- free_pipe->top_pipe = head_pipe->next_odm_pipe;
- head_pipe->next_odm_pipe->bottom_pipe = free_pipe;
- } else {
-
- /* For ODM + window MPO, in 3 plane case, if we already have a MPO window on
- * the left side, then we will invalidate a 2nd one on the left side
- */
- if (head_pipe->next_odm_pipe && tail_pipe->top_pipe) {
- dc_plane_state_release(plane_state);
- return false;
- }
-
- free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
- free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
- free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
- free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
- free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
- free_pipe->clock_source = tail_pipe->clock_source;
-
- free_pipe->top_pipe = tail_pipe;
- tail_pipe->bottom_pipe = free_pipe;
-
- /* Connect MPO pipes together if MPO window is in the centre */
- if (!(free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
- free_pipe->stream->src.x + free_pipe->stream->src.width/2))) {
- if (!free_pipe->next_odm_pipe &&
- tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
- free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe;
- tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe;
- }
- if (!free_pipe->prev_odm_pipe &&
- tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) {
- free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
- tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe;
- }
- }
- }
- }
+ insert_secondary_dpp_pipe_with_plane(opp_head_pipe, sec_pipe,
+ plane_state);
+ opp_head_pipe = opp_head_pipe->next_odm_pipe;
+ }
+ return true;
+}
- /* ODM + window MPO, where MPO window is on left half only */
- if (free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
- free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
- DC_LOG_SCALER("%s - ODM + window MPO(left). free_pipe:%d\n",
- __func__,
- free_pipe->pipe_idx);
- break;
- }
- /* ODM + window MPO, where MPO window is on right half only */
- if (free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
- DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d\n",
- __func__,
- free_pipe->pipe_idx);
- break;
- }
+bool dc_add_plane_to_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context)
+{
+ struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *otg_master_pipe;
+ struct dc_stream_status *stream_status = NULL;
+ bool added = false;
- head_pipe = head_pipe->next_odm_pipe;
+ stream_status = dc_stream_get_status_from_state(context, stream);
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to attach surface!\n");
+ goto out;
+ } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+ plane_state, MAX_SURFACE_NUM);
+ goto out;
}
- /* assign new surfaces*/
- stream_status->plane_states[stream_status->plane_count] = plane_state;
- stream_status->plane_count++;
+ otg_master_pipe = resource_get_otg_master_for_stream(
+ &context->res_ctx, stream);
+ if (otg_master_pipe->plane_state == NULL)
+ added = add_plane_to_opp_head_pipes(otg_master_pipe,
+ plane_state, context);
+ else
+ added = acquire_secondary_dpp_pipes_and_add_plane(
+ otg_master_pipe, plane_state, context,
+ dc->current_state, pool);
+ if (added) {
+ stream_status->plane_states[stream_status->plane_count] =
+ plane_state;
+ stream_status->plane_count++;
+ dc_plane_state_retain(plane_state);
+ }
- return true;
+out:
+ return added;
}
bool dc_remove_plane_from_context(
@@ -2219,7 +2446,7 @@ enum dc_status dc_remove_stream_from_ctx(
{
int i;
struct dc_context *dc_ctx = dc->ctx;
- struct pipe_ctx *del_pipe = resource_get_head_pipe_for_stream(&new_ctx->res_ctx, stream);
+ struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(&new_ctx->res_ctx, stream);
struct pipe_ctx *odm_pipe;
if (!del_pipe) {
@@ -3473,7 +3700,7 @@ enum dc_status resource_map_clock_resources(
{
/* acquire new resources */
const struct resource_pool *pool = dc->res_pool;
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(
&context->res_ctx, stream);
if (!pipe_ctx)
@@ -3863,10 +4090,7 @@ void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i];
pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx_old->stream)
- continue;
-
- if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe_ctx_old, OTG_MASTER))
continue;
if (!pipe_ctx->stream ||
@@ -3990,11 +4214,13 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
* with an hpo encoder. Or we can return a very dummy one that doesn't
* do work for all functions
*/
- return get_hpo_dp_link_hwss();
+ return (requires_fixed_vs_pe_retimer_hpo_link_hwss(link) ?
+ get_hpo_fixed_vs_pe_retimer_dp_link_hwss() : get_hpo_dp_link_hwss());
else if (can_use_dpia_link_hwss(link, link_res))
return get_dpia_link_hwss();
else if (can_use_dio_link_hwss(link, link_res))
- return get_dio_link_hwss();
+ return (requires_fixed_vs_pe_retimer_dio_link_hwss(link)) ?
+ get_dio_fixed_vs_pe_retimer_link_hwss() : get_dio_link_hwss();
else
return get_virtual_link_hwss();
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 6e11d2b701f8..01fe2d2fd241 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -71,8 +71,7 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
/* Copy audio modes */
/* TODO - Remove this translation */
- for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
- {
+ for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++) {
stream->audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count;
stream->audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code;
stream->audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate;
@@ -306,6 +305,32 @@ bool dc_optimize_timing_for_fsft(
}
#endif
+static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
+{
+ uint32_t refresh_rate;
+ struct dc *dc = stream->ctx->dc;
+
+ refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
+ stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
+
+ /* If there's any stream that fits the SubVP high refresh criteria,
+ * we must return true. This is because cursor updates are asynchronous
+ * with full updates, so we could transition into a SubVP config and
+ * remain in HW cursor mode if there's no cursor update which will
+ * then cause corruption.
+ */
+ if ((refresh_rate >= 120 && refresh_rate <= 175 &&
+ stream->timing.v_addressable >= 1440 &&
+ stream->timing.v_addressable <= 2160) &&
+ (dc->current_state->stream_count > 1 ||
+ (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
+ return true;
+
+ return false;
+}
+
/*
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
@@ -334,12 +359,13 @@ bool dc_stream_set_cursor_attributes(
/* SubVP is not compatible with HW cursor larger than 64 x 64 x 4.
* Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case:
- * 1. For single display cases, if resolution is >= 5K and refresh rate < 120hz
- * 2. For multi display cases, if resolution is >= 4K and refresh rate < 120hz
- *
- * [< 120hz is a requirement for SubVP configs]
+ * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs)
+ * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
+ * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
*/
if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) {
+ if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
+ return false;
if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 63948170fd6d..0d0bef8eb331 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -40,12 +40,14 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
+struct abm_save_restore;
+
/* forward declaration */
struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.241"
+#define DC_VER "3.2.247"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -428,6 +430,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_REPLAY = 12,
VISUAL_CONFIRM_SUBVP = 14,
VISUAL_CONFIRM_MCLK_SWITCH = 16,
};
@@ -506,7 +509,7 @@ enum dcn_zstate_support_state {
DCN_ZSTATE_SUPPORT_DISALLOW,
};
-/**
+/*
* struct dc_clocks - DC pipe clocks
*
* For any clocks that may differ per pipe only the max is stored in this
@@ -728,7 +731,7 @@ struct resource_pool;
struct dce_hwseq;
struct link_service;
-/**
+/*
* struct dc_debug_options - DC debug struct
*
* This struct provides a simple mechanism for developers to change some
@@ -756,7 +759,7 @@ struct dc_debug_options {
bool use_max_lb;
enum dcc_option disable_dcc;
- /**
+ /*
* @pipe_split_policy: Define which pipe split policy is used by the
* display core.
*/
@@ -861,6 +864,7 @@ struct dc_debug_options {
bool psr_skip_crtc_disable;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
+ uint32_t fixed_vs_aux_delay_config_wa;
bool force_disable_subvp;
bool force_subvp_mclk_switch;
bool allow_sw_cursor_fallback;
@@ -902,9 +906,18 @@ struct dc_debug_options {
uint32_t fpo_vactive_max_blank_us;
bool enable_legacy_fast_update;
bool disable_dc_mode_overwrite;
+ bool replay_skip_crtc_disabled;
};
struct gpu_info_soc_bounding_box_v1_0;
+
+/* Generic structure that can be used to query properties of DC. More fields
+ * can be added as required.
+ */
+struct dc_current_properties {
+ unsigned int cursor_size_limit;
+};
+
struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
@@ -1334,7 +1347,7 @@ struct dc_validation_set {
struct dc_stream_state *stream;
/**
- * @plane_state: Surface state
+ * @plane_states: Surface state
*/
struct dc_plane_state *plane_states[MAX_SURFACES];
@@ -1409,10 +1422,14 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+void dc_set_disable_128b_132b_stream_overhead(bool disable);
+
/* The function returns minimum bandwidth required to drive a given timing
* return - minimum required timing bandwidth in kbps.
*/
-uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing);
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding);
/* Link Interfaces */
/*
@@ -1481,6 +1498,7 @@ struct dc_link {
enum engine_id eng_id;
bool test_pattern_enabled;
+ enum dp_test_pattern current_test_pattern;
union compliance_test_state compliance_test_state;
void *priv;
@@ -1514,8 +1532,11 @@ struct dc_link {
enum edp_revision edp_revision;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+ struct backlight_settings backlight_settings;
struct psr_settings psr_settings;
+ struct replay_settings replay_settings;
+
/* Drive settings read from integrated info table */
struct dc_lane_settings bios_forced_drive_settings;
@@ -1849,6 +1870,14 @@ enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
*/
const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link);
+/* Get the highest encoding format that the link supports; highest meaning the
+ * encoding format which supports the maximum bandwidth.
+ *
+ * @link - a link with DP RX connection
+ * return - highest encoding format link supports.
+ */
+enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link);
+
/* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected
* to a link with dp connector signal type.
* @link - a link with dp connector signal type
@@ -1983,6 +2012,8 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context);
+bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state);
+
/* On eDP links this function call will stall until T12 has elapsed.
* If the panel is not in power off state, this function will return
* immediately.
@@ -2230,6 +2261,11 @@ void dc_z10_save_init(struct dc *dc);
bool dc_is_dmub_outbox_supported(struct dc *dc);
bool dc_enable_dmub_notifications(struct dc *dc);
+bool dc_abm_save_restore(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct abm_save_restore *pData);
+
void dc_enable_dmub_outbox(struct dc *dc);
bool dc_process_dmub_aux_transfer_async(struct dc *dc,
@@ -2255,6 +2291,8 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
void dc_print_dmub_diagnostic_data(const struct dc *dc);
+void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties);
+
/* DSC Interfaces */
#include "dc_dsc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index c753c6f30dd7..4c5ef3ef8dbd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -31,6 +31,7 @@
#include "core_types.h"
#include "../basics/conversion.h"
#include "cursor_reg_cache.h"
+#include "resource.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -356,7 +357,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->stream && pipe->stream->fpo_in_use) {
+ if (resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->fpo_in_use) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
@@ -381,6 +382,9 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
{
union dmub_rb_cmd cmd = { 0 };
+ if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
+ return;
+
memset(&cmd, 0, sizeof(cmd));
/* Prepare fw command */
@@ -528,7 +532,8 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
- if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(pipe, DPP_PIPE))
continue;
// Find the SubVP pipe
@@ -725,12 +730,10 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->stream)
- continue;
-
/* For SubVP pipe count, only count the top most (ODM / MPC) pipe
*/
- if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_is_pipe_type(pipe, DPP_PIPE) &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipes[subvp_count++] = pipe;
}
@@ -747,12 +750,14 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
* Any ODM or MPC splits being used in SubVP will be handled internally in
* populate_subvp_cmd_pipe_info
*/
- if (pipe->plane_state && pipe->stream->mall_stream_config.paired_stream &&
- !pipe->top_pipe && !pipe->prev_odm_pipe &&
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->stream->mall_stream_config.paired_stream &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
- } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE &&
- !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ } else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->stream->mall_stream_config.type == SUBVP_NONE) {
// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
// we run through DML without calculating "natural" P-state support
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
@@ -894,6 +899,9 @@ static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
+ if (pipe_ctx->stream->link->replay_settings.config.replay_supported)
+ return true;
+
return false;
}
@@ -1018,3 +1026,32 @@ bool dc_dmub_check_min_version(struct dmub_srv *srv)
return true;
return srv->hw_funcs.is_psrsu_supported(srv);
}
+
+void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ struct dmub_srv *dmub;
+ enum dmub_status status;
+ static const uint32_t timeout_us = 30;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
+ DC_LOG_ERROR("%s: invalid parameters.", __func__);
+ return;
+ }
+
+ dmub = dc_dmub_srv->dmub;
+
+ status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us);
+ if (status != DMUB_STATUS_OK) {
+ DC_LOG_ERROR("timeout updating trace buffer mask word\n");
+ return;
+ }
+
+ status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us);
+ if (status != DMUB_STATUS_OK) {
+ DC_LOG_ERROR("timeout updating trace buffer mask word\n");
+ return;
+ }
+
+ DC_LOG_DEBUG("Enabled DPIA trace\n");
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 099f94b6107c..bb3fe162dd93 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -87,4 +87,7 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
bool dc_dmub_check_min_version(struct dmub_srv *srv);
+
+void dc_dmub_srv_enable_dpia_trace(const struct dc *dc);
+
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 55139d7bf422..cfaa39c5dd16 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1117,6 +1117,11 @@ struct edp_psr_info {
uint8_t force_psrsu_cap;
};
+struct replay_info {
+ uint8_t pixel_deviation_per_line;
+ uint8_t max_deviation_line;
+};
+
struct dprx_states {
bool cable_id_written;
};
@@ -1236,6 +1241,8 @@ struct dpcd_caps {
uint8_t edp_rev;
union edp_alpm_caps alpm_caps;
struct edp_psr_info psr_info;
+
+ struct replay_info pr_info;
};
union dpcd_sink_ext_caps {
@@ -1276,6 +1283,28 @@ union dpcd_psr_configuration {
unsigned char raw;
};
+union replay_enable_and_configuration {
+ struct {
+ unsigned char FREESYNC_PANEL_REPLAY_MODE :1;
+ unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1;
+ unsigned char STATE_TRANSITION_ERROR_DETECTION :1;
+ unsigned char RESERVED0 :1;
+ unsigned char RESERVED1 :4;
+ } bits;
+ unsigned char raw;
+};
+
+union dpcd_replay_configuration {
+ struct {
+ unsigned char STATE_TRANSITION_ERROR_STATUS : 1;
+ unsigned char DESYNC_ERROR_STATUS : 1;
+ unsigned char SINK_DEVICE_REPLAY_STATUS : 3;
+ unsigned char SINK_FRAME_LOCKED : 2;
+ unsigned char RESERVED : 1;
+ } bits;
+ unsigned char raw;
+};
+
union dpcd_alpm_configuration {
struct {
unsigned char ENABLE : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 9491b76d61f5..fe3078b8789e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -73,6 +73,7 @@ bool dc_dsc_compute_bandwidth_range(
uint32_t max_bpp_x16,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range);
bool dc_dsc_compute_config(
@@ -81,6 +82,7 @@ bool dc_dsc_compute_config(
const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg);
uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 0ce7728a5a4b..445ad79001ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -189,7 +189,6 @@ struct dc_panel_patch {
unsigned int disable_fams;
unsigned int skip_avmute;
unsigned int mst_start_top_delay;
- unsigned int delay_disable_aux_intercept_ms;
};
struct dc_edid_caps {
@@ -879,7 +878,7 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_overall_throughput_0_mps; /* In MPs */
uint32_t branch_overall_throughput_1_mps; /* In MPs */
uint32_t branch_max_line_width;
- bool is_dp;
+ bool is_dp; /* Decoded format */
};
struct dc_golden_table {
@@ -902,6 +901,14 @@ enum dc_gpu_mem_alloc_type {
DC_MEM_ALLOC_TYPE_AGP
};
+enum dc_link_encoding_format {
+ DC_LINK_ENCODING_UNSPECIFIED = 0,
+ DC_LINK_ENCODING_DP_8b_10b,
+ DC_LINK_ENCODING_DP_128b_132b,
+ DC_LINK_ENCODING_HDMI_TMDS,
+ DC_LINK_ENCODING_HDMI_FRL
+};
+
enum dc_psr_version {
DC_PSR_VERSION_1 = 0,
DC_PSR_VERSION_SU_1 = 1,
@@ -995,6 +1002,10 @@ struct link_mst_stream_allocation_table {
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
+struct backlight_settings {
+ uint32_t backlight_millinits;
+};
+
/* PSR feature flags */
struct psr_settings {
bool psr_feature_enabled; // PSR is supported by sink
@@ -1014,6 +1025,45 @@ struct psr_settings {
unsigned int psr_power_opt;
};
+enum replay_coasting_vtotal_type {
+ PR_COASTING_TYPE_NOM = 0,
+ PR_COASTING_TYPE_STATIC,
+ PR_COASTING_TYPE_FULL_SCREEN_VIDEO,
+ PR_COASTING_TYPE_TEST_HARNESS,
+ PR_COASTING_TYPE_NUM,
+};
+
+union replay_error_status {
+ struct {
+ unsigned char STATE_TRANSITION_ERROR :1;
+ unsigned char LINK_CRC_ERROR :1;
+ unsigned char DESYNC_ERROR :1;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+struct replay_config {
+ bool replay_supported; // Replay feature is supported
+ unsigned int replay_power_opt_supported; // Power opt flags that are supported
+ bool replay_smu_opt_supported; // SMU optimization is supported
+ unsigned int replay_enable_option; // Replay enablement option
+ uint32_t debug_flags; // Replay debug flags
+ bool replay_timing_sync_supported; // Replay desync is supported
+ union replay_error_status replay_error_status; // Replay error status
+};
+
+/* Replay feature flags */
+struct replay_settings {
+ struct replay_config config; // Replay configuration
+ bool replay_feature_enabled; // Replay feature is ready for activating
+ bool replay_allow_active; // Replay is currently active
+ unsigned int replay_power_opt_active; // Power opt flags that are activated currently
+ bool replay_smu_opt_enable; // SMU optimization is enabled
+ uint16_t coasting_vtotal; // Current Coasting vtotal
+ uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table
+};
+
/* To split out "global" and "per-panel" config settings.
* Add a struct dc_panel_config under dc_link
*/
@@ -1040,9 +1090,11 @@ struct dc_panel_config {
struct psr {
bool disable_psr;
bool disallow_psrsu;
+ bool disallow_replay;
bool rc_disable;
bool rc_allow_static_screen;
bool rc_allow_fullscreen_VPB;
+ unsigned int replay_enable_option;
} psr;
/* ABM */
struct varib {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 01490c9ba958..15b64c26d5a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -30,7 +30,7 @@ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dmub_abm_lcd.o dce_panel_cntl.o \
-dmub_hw_lock_mgr.o dmub_outbox.o
+dmub_hw_lock_mgr.o dmub_outbox.o dmub_replay.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 07359eb89efc..e7acd6eec1fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -640,7 +640,7 @@ static void dce11_pplib_apply_display_requirements(
* on power saving.
*
*/
- pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
+ pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4) ?
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 63009db8b5a7..b87bfecb7755 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -76,9 +76,9 @@ static bool dce_dmcu_init(struct dmcu *dmcu)
}
static bool dce_dmcu_load_iram(struct dmcu *dmcu,
- unsigned int start_offset,
- const char *src,
- unsigned int bytes)
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
unsigned int count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index a3fee929cd12..86233f94db4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -98,6 +98,29 @@
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+#define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1),\
+ SRII(PIXEL_RATE_CNTL, blk, 2),\
+ SRII(PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PIXEL_RATE_CNTL, blk, 4)
+
+#define HWSEQ_PHYPLL_REG_LIST_302(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4)
+
+#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1)
+
+#define HWSEQ_PHYPLL_REG_LIST_303(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1)
+
+
#define HWSEQ_PHYPLL_REG_LIST_201(blk) \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1)
@@ -387,7 +410,11 @@
SR(MPC_CRC_RESULT_C), \
SR(MPC_CRC_RESULT_AR), \
SR(AZALIA_AUDIO_DTO), \
- SR(AZALIA_CONTROLLER_CLOCK_GATING)
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ SR(HPO_TOP_CLOCK_CONTROL), \
+ SR(ODM_MEM_PWR_CTRL3), \
+ SR(DMU_MEM_PWR_CNTL), \
+ SR(MMHUBBUB_MEM_PWR_CNTL)
#define HWSEQ_DCN301_REG_LIST()\
SR(REFCLK_CNTL), \
@@ -508,8 +535,11 @@
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
+ HWSEQ_PIXEL_RATE_REG_LIST_302(OTG), \
+ HWSEQ_PHYPLL_REG_LIST_302(OTG), \
SR(AZALIA_AUDIO_DTO), \
- SR(AZALIA_CONTROLLER_CLOCK_GATING)
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ SR(HPO_TOP_CLOCK_CONTROL)
#define HWSEQ_DCN303_REG_LIST() \
HWSEQ_DCN_REG_LIST(), \
@@ -540,28 +570,6 @@
SR(AZALIA_CONTROLLER_CLOCK_GATING), \
SR(HPO_TOP_CLOCK_CONTROL)
-#define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \
- SRII(PIXEL_RATE_CNTL, blk, 0), \
- SRII(PIXEL_RATE_CNTL, blk, 1),\
- SRII(PIXEL_RATE_CNTL, blk, 2),\
- SRII(PIXEL_RATE_CNTL, blk, 3), \
- SRII(PIXEL_RATE_CNTL, blk, 4)
-
-#define HWSEQ_PHYPLL_REG_LIST_302(blk) \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4)
-
-#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \
- SRII(PIXEL_RATE_CNTL, blk, 0), \
- SRII(PIXEL_RATE_CNTL, blk, 1)
-
-#define HWSEQ_PHYPLL_REG_LIST_303(blk) \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1)
-
struct dce_hwseq_registers {
uint32_t DCFE_CLOCK_CONTROL[6];
uint32_t DCFEV_CLOCK_CONTROL;
@@ -663,14 +671,15 @@ struct dce_hwseq_registers {
uint32_t MC_VM_XGMI_LFB_CNTL;
uint32_t AZALIA_AUDIO_DTO;
uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
+ /* MMHUB VM */
+ uint32_t MC_VM_FB_LOCATION_BASE;
+ uint32_t MC_VM_FB_LOCATION_TOP;
+ uint32_t MC_VM_FB_OFFSET;
+ uint32_t MMHUBBUB_MEM_PWR_CNTL;
uint32_t HPO_TOP_CLOCK_CONTROL;
uint32_t ODM_MEM_PWR_CTRL3;
uint32_t DMU_MEM_PWR_CNTL;
- uint32_t MMHUBBUB_MEM_PWR_CNTL;
uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
- uint32_t MC_VM_FB_LOCATION_BASE;
- uint32_t MC_VM_FB_LOCATION_TOP;
- uint32_t MC_VM_FB_OFFSET;
uint32_t HPO_TOP_HW_CONTROL;
};
/* set field name */
@@ -915,6 +924,7 @@ struct dce_hwseq_registers {
#define HWSEQ_DCN30_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN2_MASK_SH_LIST(mask_sh), \
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
+ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
@@ -1012,7 +1022,8 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
- HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh)
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
+ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh)
#define HWSEQ_DCN303_MASK_SH_LIST(mask_sh) \
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 6d1b01c267b7..4f552c3e7663 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -442,10 +442,9 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
return dce_i2c_hw;
}
-static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
- struct dce_i2c_hw *dce_i2c_hw,
- uint32_t timeout,
- enum i2c_channel_operation_result expected_result)
+static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(struct dce_i2c_hw *dce_i2c_hw,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result)
{
enum i2c_channel_operation_result result;
uint32_t i = 0;
@@ -509,11 +508,10 @@ static uint32_t get_transaction_timeout_hw(
return period_timeout * num_of_clock_stretches;
}
-static bool dce_i2c_hw_engine_submit_payload(
- struct dce_i2c_hw *dce_i2c_hw,
- struct i2c_payload *payload,
- bool middle_of_transaction,
- uint32_t speed)
+static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
+ struct i2c_payload *payload,
+ bool middle_of_transaction,
+ uint32_t speed)
{
struct i2c_request_transaction_data request;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index f1aeb6d1967c..e188447c8156 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -367,6 +367,7 @@ static bool dce_i2c_sw_engine_acquire_engine(
return true;
}
+
bool dce_i2c_engine_acquire_sw(
struct dce_i2c_sw *dce_i2c_sw,
struct ddc *ddc_handle)
@@ -392,12 +393,8 @@ bool dce_i2c_engine_acquire_sw(
return result;
}
-
-
-
-static void dce_i2c_sw_engine_submit_channel_request(
- struct dce_i2c_sw *engine,
- struct i2c_request_transaction_data *req)
+static void dce_i2c_sw_engine_submit_channel_request(struct dce_i2c_sw *engine,
+ struct i2c_request_transaction_data *req)
{
struct ddc *ddc = engine->ddc;
uint16_t clock_delay_div_4 = engine->clock_delay >> 2;
@@ -439,10 +436,9 @@ static void dce_i2c_sw_engine_submit_channel_request(
I2C_CHANNEL_OPERATION_FAILED;
}
-static bool dce_i2c_sw_engine_submit_payload(
- struct dce_i2c_sw *engine,
- struct i2c_payload *payload,
- bool middle_of_transaction)
+static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
+ struct i2c_payload *payload,
+ bool middle_of_transaction)
{
struct i2c_request_transaction_data request;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index fa314493ffc5..136bd93c3b65 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -941,9 +941,7 @@ bool dce110_link_encoder_validate_output_with_stream(
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_LVDS:
- is_valid =
- (stream->timing.
- pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
break;
case SIGNAL_TYPE_VIRTUAL:
is_valid = true;
@@ -1645,7 +1643,7 @@ void dce110_link_encoder_enable_hpd(struct link_encoder *enc)
uint32_t hpd_enable = 0;
uint32_t value = dm_read_reg(ctx, addr);
- get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN);
+ hpd_enable = get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN);
if (hpd_enable == 0)
set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 2fb9572ce25d..d3e6544022b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -27,6 +27,7 @@
#include "dmub_abm_lcd.h"
#include "dc.h"
#include "core_types.h"
+#include "dmub_cmd.h"
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
@@ -118,6 +119,32 @@ static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int pane
return ret;
}
+/*****************************************************************************
+ * dmub_abm_save_restore_ex() - calls dmub_abm_save_restore for preserving DMUB's
+ * Varibright states for LCD only. OLED is TBD
+ * @abm: used to check get dc context
+ * @panel_inst: panel instance index
+ * @pData: contains command to pause/un-pause abm and abm parameters
+ *
+ *
+ ***************************************************************************/
+static bool dmub_abm_save_restore_ex(
+ struct abm *abm,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData)
+{
+ bool ret = false;
+ unsigned int feature_support;
+ struct dc_context *dc = abm->ctx;
+
+ feature_support = abm_feature_support(abm, panel_inst);
+
+ if (feature_support == ABM_LCD_SUPPORT)
+ ret = dmub_abm_save_restore(dc, panel_inst, pData);
+
+ return ret;
+}
+
static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
{
bool ret = false;
@@ -155,6 +182,7 @@ static const struct abm_funcs abm_funcs = {
.get_target_backlight = dmub_abm_get_target_backlight_ex,
.init_abm_config = dmub_abm_init_config_ex,
.set_abm_pause = dmub_abm_set_pause_ex,
+ .save_restore = dmub_abm_save_restore_ex,
.set_pipe_ex = dmub_abm_set_pipe_ex,
.set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 39da73eba86e..592a8f7a1c6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -208,6 +208,52 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un
return true;
}
+
+/*****************************************************************************
+ * dmub_abm_save_restore() - dmub interface for abm save+pause and restore+
+ * un-pause
+ * @dc: dc context
+ * @panel_inst: panel instance index
+ * @pData: contains command to pause/un-pause abm and exchange abm parameters
+ *
+ * When called Pause will get abm data and store in pData, and un-pause will
+ * set/apply abm data stored in pData.
+ *
+ *****************************************************************************/
+bool dmub_abm_save_restore(
+ struct dc_context *dc,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData)
+{
+ union dmub_rb_cmd cmd;
+ uint8_t panel_mask = 0x01 << panel_inst;
+ unsigned int bytes = sizeof(struct abm_save_restore);
+
+ // TODO: Optimize by only reading back final 4 bytes
+ dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+ // Copy iramtable into cw7
+ memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)pData, bytes);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_save_restore.header.type = DMUB_CMD__ABM;
+ cmd.abm_save_restore.header.sub_type = DMUB_CMD__ABM_SAVE_RESTORE;
+
+ cmd.abm_save_restore.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.abm_save_restore.abm_init_config_data.bytes = bytes;
+ cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
+
+ cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ // Copy iramtable data into local structure
+ memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+
+ return true;
+}
+
bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
{
union dmub_rb_cmd cmd;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
index 00b4e268768e..853564d7f471 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
@@ -28,6 +28,8 @@
#include "abm.h"
+struct abm_save_restore;
+
void dmub_abm_init(struct abm *abm, uint32_t backlight);
bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
unsigned int dmub_abm_get_current_backlight(struct abm *abm);
@@ -38,6 +40,10 @@ void dmub_abm_init_config(struct abm *abm,
unsigned int inst);
bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst);
+bool dmub_abm_save_restore(
+ struct dc_context *dc,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData);
bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst);
bool dmub_abm_set_backlight_level(struct abm *abm,
unsigned int backlight_pwm_u16_16,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
new file mode 100644
index 000000000000..28149e53c2a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
+#include "core_types.h"
+#include "dmub_replay.h"
+
+#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+
+#define MAX_PIPES 6
+
+/*
+ * Get Replay state from firmware.
+ */
+static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst)
+{
+ struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+ /* uint32_t raw_state = 0; */
+ uint32_t retry_count = 0;
+ enum dmub_status status;
+
+ do {
+ // Send gpint command and wait for ack
+ status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_REPLAY_STATE, panel_inst, 30);
+
+ if (status == DMUB_STATUS_OK) {
+ // GPINT was executed, get response
+ dmub_srv_get_gpint_response(srv, (uint32_t *)state);
+ } else
+ // Return invalid state when GPINT times out
+ *state = REPLAY_STATE_INVALID;
+ } while (++retry_count <= 1000 && *state == REPLAY_STATE_INVALID);
+
+ // Assert if max retry hit
+ if (retry_count >= 1000 && *state == REPLAY_STATE_INVALID) {
+ ASSERT(0);
+ /* To-do: Add retry fail log */
+ }
+}
+
+/*
+ * Enable/Disable Replay.
+ */
+static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+ uint32_t retry_count;
+ enum replay_state state = REPLAY_STATE_0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_enable.data.panel_inst = panel_inst;
+
+ cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
+ if (enable)
+ cmd.replay_enable.data.enable = REPLAY_ENABLE;
+ else
+ cmd.replay_enable.data.enable = REPLAY_DISABLE;
+
+ cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ /* Below loops 1000 x 500us = 500 ms.
+ * Exit REPLAY may need to wait 1-2 frames to power up. Timeout after at
+ * least a few frames. Should never hit the max retry assert below.
+ */
+ if (wait) {
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dmub_replay_get_state(dmub, &state, panel_inst);
+
+ if (enable) {
+ if (state != REPLAY_STATE_0)
+ break;
+ } else {
+ if (state == REPLAY_STATE_0)
+ break;
+ }
+
+ fsleep(500);
+ }
+
+ /* assert if max retry hit */
+ if (retry_count >= 1000)
+ ASSERT(0);
+ }
+
+}
+
+/*
+ * Set REPLAY power optimization flags.
+ */
+static void dmub_replay_set_power_opt(struct dmub_replay *dmub, unsigned int power_opt, uint8_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_set_power_opt.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_set_power_opt.header.sub_type = DMUB_CMD__SET_REPLAY_POWER_OPT;
+ cmd.replay_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_power_opt_data);
+ cmd.replay_set_power_opt.replay_set_power_opt_data.power_opt = power_opt;
+ cmd.replay_set_power_opt.replay_set_power_opt_data.panel_inst = panel_inst;
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/*
+ * Setup Replay by programming phy registers and sending replay hw context values to firmware.
+ */
+static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
+ struct dc_link *link,
+ struct replay_context *replay_context,
+ uint8_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+ struct dmub_cmd_replay_copy_settings_data *copy_settings_data
+ = &cmd.replay_copy_settings.replay_copy_settings_data;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ //TODO: refactor for multi edp support
+ break;
+ }
+ }
+
+ if (!pipe_ctx)
+ return false;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_copy_settings.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_copy_settings.header.sub_type = DMUB_CMD__REPLAY_COPY_SETTINGS;
+ cmd.replay_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_replay_copy_settings_data);
+
+ // HW insts
+ copy_settings_data->aux_inst = replay_context->aux_inst;
+ copy_settings_data->digbe_inst = replay_context->digbe_inst;
+ copy_settings_data->digfe_inst = replay_context->digfe_inst;
+
+ if (pipe_ctx->plane_res.dpp)
+ copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
+ else
+ copy_settings_data->dpp_inst = 0;
+ if (pipe_ctx->stream_res.tg)
+ copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst;
+ else
+ copy_settings_data->otg_inst = 0;
+
+ copy_settings_data->dpphy_inst = link->link_enc->transmitter;
+
+ // Misc
+ copy_settings_data->line_time_in_ns = replay_context->line_time_in_ns;
+ copy_settings_data->panel_inst = panel_inst;
+ copy_settings_data->debug.u32All = link->replay_settings.config.debug_flags;
+ copy_settings_data->pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line;
+ copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
+ copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable;
+ copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported;
+
+ copy_settings_data->flags.u32All = 0;
+ copy_settings_data->flags.bitfields.fec_enable_status = (link->fec_state == dc_link_fec_enabled);
+ copy_settings_data->flags.bitfields.dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1);
+ // WA for PSRSU+DSC on specific TCON, if DSC is enabled, force PSRSU as ffu mode(full frame update)
+ if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
+ !link->dc->debug.disable_fec) &&
+ (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+ !link->panel_config.dsc.disable_dsc_edp &&
+ link->dc->caps.edp_dsc_support)) &&
+ link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 /*&&
+ (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
+ sizeof(DP_SINK_DEVICE_STR_ID_1)) ||
+ !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
+ sizeof(DP_SINK_DEVICE_STR_ID_2)))*/)
+ copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 1;
+ else
+ copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
+
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+/*
+ * Set coasting vtotal.
+ */
+static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
+ uint16_t coasting_vtotal,
+ uint8_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_set_coasting_vtotal.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_set_coasting_vtotal.header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
+ cmd.replay_set_coasting_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
+ cmd.replay_set_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/*
+ * Get Replay residency from firmware.
+ */
+static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
+ uint32_t *residency, const bool is_start, const bool is_alpm)
+{
+ struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+ uint16_t param = (uint16_t)(panel_inst << 8);
+
+ if (is_alpm)
+ param |= REPLAY_RESIDENCY_MODE_ALPM;
+
+ if (is_start)
+ param |= REPLAY_RESIDENCY_ENABLE;
+
+ // Send gpint command and wait for ack
+ dmub_srv_send_gpint_command(srv, DMUB_GPINT__REPLAY_RESIDENCY, param, 30);
+
+ if (!is_start)
+ dmub_srv_get_gpint_response(srv, residency);
+ else
+ *residency = 0;
+}
+
+static const struct dmub_replay_funcs replay_funcs = {
+ .replay_copy_settings = dmub_replay_copy_settings,
+ .replay_enable = dmub_replay_enable,
+ .replay_get_state = dmub_replay_get_state,
+ .replay_set_power_opt = dmub_replay_set_power_opt,
+ .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal,
+ .replay_residency = dmub_replay_residency,
+};
+
+/*
+ * Construct Replay object.
+ */
+static void dmub_replay_construct(struct dmub_replay *replay, struct dc_context *ctx)
+{
+ replay->ctx = ctx;
+ replay->funcs = &replay_funcs;
+}
+
+/*
+ * Allocate and initialize Replay object.
+ */
+struct dmub_replay *dmub_replay_create(struct dc_context *ctx)
+{
+ struct dmub_replay *replay = kzalloc(sizeof(struct dmub_replay), GFP_KERNEL);
+
+ if (replay == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dmub_replay_construct(replay, ctx);
+
+ return replay;
+}
+
+/*
+ * Deallocate Replay object.
+ */
+void dmub_replay_destroy(struct dmub_replay **dmub)
+{
+ kfree(*dmub);
+ *dmub = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
new file mode 100644
index 000000000000..e8385bbf51fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_REPLAY_H_
+#define _DMUB_REPLAY_H_
+
+#include "dc_types.h"
+#include "dmub_cmd.h"
+struct dc_link;
+struct dmub_replay_funcs;
+
+struct dmub_replay {
+ struct dc_context *ctx;
+ const struct dmub_replay_funcs *funcs;
+};
+
+struct dmub_replay_funcs {
+ void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
+ uint8_t panel_inst);
+ void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
+ uint8_t panel_inst);
+ bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
+ struct replay_context *replay_context, uint8_t panel_inst);
+ void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
+ uint8_t panel_inst);
+ void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal,
+ uint8_t panel_inst);
+ void (*replay_residency)(struct dmub_replay *dmub,
+ uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm);
+};
+
+struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
+void dmub_replay_destroy(struct dmub_replay **dmub);
+
+
+#endif /* _DMUB_REPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 42e9b6a529f6..899b25b0bad8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -824,7 +824,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 6c9ca43d1040..ad967b58d7be 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -219,7 +219,7 @@ static bool dce110_enable_display_power_gating(
if (controller_id == underlay_idx)
controller_id = CONTROLLER_ID_UNDERLAY0 - 1;
- if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) {
bp_result = dcb->funcs->enable_disp_power_gating(
dcb, controller_id + 1, cntl);
@@ -777,7 +777,8 @@ void dce110_edp_wait_for_hpd_ready(
dal_gpio_destroy_irq(&hpd);
/* ensure that the panel is detected */
- ASSERT(edp_hpd_high);
+ if (!edp_hpd_high)
+ DC_LOG_DC("%s: wait timed out!\n", __func__);
}
void dce110_edp_power_control(
@@ -1150,6 +1151,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct dtbclk_dto_params dto_params = {0};
int dp_hpo_inst;
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
@@ -1176,7 +1179,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- }
+ } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->disable_symclk_se)
+ dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
+ link_enc->transmitter - TRANSMITTER_UNIPHY_A);
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
@@ -1585,6 +1590,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
*/
if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
+ pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false;
}
return DC_OK;
}
@@ -1792,10 +1798,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
hws->funcs.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false);
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
}
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
}
@@ -2013,6 +2022,10 @@ static bool should_enable_fbc(struct dc *dc,
if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled)
return false;
+ /* Replay should not be enabled */
+ if (pipe_ctx->stream->link->replay_settings.replay_feature_enabled)
+ return false;
+
/* Nothing to compress */
if (!pipe_ctx->plane_state)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 46eca5a21e1c..1289b9418877 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -942,7 +942,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1119,13 +1119,15 @@ static enum dc_status dce110_add_stream_to_ctx(
}
static struct pipe_ctx *dce110_acquire_underlay(
- struct dc_state *context,
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream)
+ const struct pipe_ctx *opp_head_pipe)
{
+ struct dc_stream_state *stream = opp_head_pipe->stream;
struct dc *dc = stream->ctx->dc;
struct dce_hwseq *hws = dc->hwseq;
- struct resource_context *res_ctx = &context->res_ctx;
+ struct resource_context *res_ctx = &new_ctx->res_ctx;
unsigned int underlay_idx = pool->underlay_pipe_index;
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
@@ -1173,7 +1175,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
stream->timing.h_total,
stream->timing.v_total,
stream->timing.pix_clk_100hz / 10,
- context->stream_count);
+ new_ctx->stream_count);
color_space_to_black_color(dc,
COLOR_SPACE_YCBCR601, &black_color);
@@ -1233,7 +1235,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
.panel_cntl_create = dce110_panel_cntl_create,
.validate_bandwidth = dce110_validate_bandwidth,
.validate_plane = dce110_validate_plane,
- .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dce110_acquire_underlay,
.add_stream_to_ctx = dce110_add_stream_to_ctx,
.validate_global = dce110_validate_global,
.find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 27cbb5b42c7e..6424e7f279dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -288,7 +288,7 @@ bool dce110_timing_generator_program_timing_generator(
uint32_t vsync_offset = dc_crtc_timing->v_border_bottom +
dc_crtc_timing->v_front_porch;
- uint32_t v_sync_start =dc_crtc_timing->v_addressable + vsync_offset;
+ uint32_t v_sync_start = dc_crtc_timing->v_addressable + vsync_offset;
uint32_t hsync_offset = dc_crtc_timing->h_border_right +
dc_crtc_timing->h_front_porch;
@@ -603,7 +603,7 @@ void dce110_timing_generator_program_blanking(
{
uint32_t vsync_offset = timing->v_border_bottom +
timing->v_front_porch;
- uint32_t v_sync_start =timing->v_addressable + vsync_offset;
+ uint32_t v_sync_start = timing->v_addressable + vsync_offset;
uint32_t hsync_offset = timing->h_border_right +
timing->h_front_porch;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
index 690caaaff019..0ef9ebb3c1e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
@@ -127,7 +127,7 @@ static bool dce112_enable_display_power_gating(
else
cntl = ASIC_PIPE_DISABLE;
- if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) {
bp_result = dcb->funcs->enable_disp_power_gating(
dcb, controller_id + 1, cntl);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index e115ff91aaaa..2b20180f1a32 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -873,7 +873,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -964,7 +964,7 @@ enum dc_status resource_map_phy_clock_resources(
{
/* acquire new resources */
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(
&context->res_ctx, stream);
if (!pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 3935fd455f0f..061221394ce0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -58,13 +58,13 @@
#include "dce/dce_i2c.h"
/* TODO remove this include */
-#include "dce80_resource.h"
-
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#endif
+#include "dce80/dce80_resource.h"
+
#ifndef mmDP_DPHY_INTERNAL_CTRL
#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index b33955928bd0..5ca9ab8a76e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -39,9 +39,6 @@
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
-#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3
-#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1
-#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10
#define REG(reg)\
dpp->tf_regs->reg
@@ -200,8 +197,7 @@ static void dpp1_dscl_set_lb(
DITHER_EN, 0, /* Dithering enable: Disabled */
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
- }
- else {
+ } else {
/* DSCL caps: pixel data processed in float format */
REG_SET_2(LB_DATA_FORMAT, 0,
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
@@ -591,18 +587,6 @@ static void dpp1_dscl_set_manual_ratio_init(
static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
const struct rect *recout)
{
- int visual_confirm_on = 0;
- unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT;
-
- if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
- visual_confirm_on = 1;
-
- /* Check bounds to ensure the VC bar height was set to a sane value */
- if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) &&
- (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) {
- visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height;
- }
-
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT in the active OTG area */
RECOUT_START_X, recout->x,
@@ -613,8 +597,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
/* Number of RECOUT horizontal pixels */
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
- RECOUT_HEIGHT, recout->height
- - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height));
+ RECOUT_HEIGHT, recout->height);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index a50309039d08..9834b75f1837 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
- if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+ if (pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index ee08b545aaea..377f1ba1a81b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1056,7 +1056,7 @@ void dcn10_link_encoder_disable_output(
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
- if (!dcn10_is_dig_enabled(enc)) {
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
/*in DP_Alt_No_Connect case, we turn off the dig already,
after excuation the PHY w/a sequence, not allow touch PHY any more*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 8e9384094f6d..f2f55565e98a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -212,8 +212,9 @@ struct mpcc *mpc1_insert_plane(
/* check insert_above_mpcc exist in tree->opp_list */
struct mpcc *temp_mpcc = tree->opp_list;
- while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
- temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc != insert_above_mpcc)
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc == NULL)
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 4b02f8443534..9f9145742f14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1055,7 +1055,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1083,14 +1083,15 @@ static enum dc_status dcn10_add_stream_to_ctx(
return result;
}
-static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
- struct dc_state *context,
+static struct pipe_ctx *dcn10_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream)
+ const struct pipe_ctx *opp_head_pipe)
{
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
+ struct resource_context *res_ctx = &new_ctx->res_ctx;
+ struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
+ struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
if (!head_pipe) {
ASSERT(0);
@@ -1271,7 +1272,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.link_enc_create = dcn10_link_encoder_create,
.panel_cntl_create = dcn10_panel_cntl_create,
.validate_bandwidth = dcn10_validate_bandwidth,
- .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn10_acquire_free_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 4492bc2392b6..65fa9e21ad9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1054,9 +1054,9 @@ void dcn20_blank_pixel_data(
enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
struct pipe_ctx *odm_pipe;
int odm_cnt = 1;
-
- int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
- int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ int odm_slice_width, last_odm_slice_width, offset = 0;
if (stream->link->test_pattern_enabled)
return;
@@ -1066,8 +1066,8 @@ void dcn20_blank_pixel_data(
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_cnt++;
-
- width = width / odm_cnt;
+ odm_slice_width = h_active / odm_cnt;
+ last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
if (blank) {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
@@ -1080,29 +1080,32 @@ void dcn20_blank_pixel_data(
test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
}
- dc->hwss.set_disp_pattern_generator(dc,
- pipe_ctx,
- test_pattern,
- test_pattern_color_space,
- stream->timing.display_color_depth,
- &black_color,
- width,
- height,
- 0);
+ odm_pipe = pipe_ctx;
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ while (odm_pipe->next_odm_pipe) {
dc->hwss.set_disp_pattern_generator(dc,
odm_pipe,
- dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
- CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
+ test_pattern,
test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
- width,
- height,
- 0);
+ odm_slice_width,
+ v_active,
+ offset);
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
}
+ dc->hwss.set_disp_pattern_generator(dc,
+ odm_pipe,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ &black_color,
+ last_odm_slice_width,
+ v_active,
+ offset);
+
if (!blank && dc->debug.enable_single_display_2to1_odm_policy) {
/* when exiting dynamic ODM need to reinit DPG state for unused pipes */
struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe;
@@ -1266,20 +1269,21 @@ void dcn20_pipe_control_lock(
}
if (flip_immediate && lock) {
- const int TIMEOUT_FOR_FLIP_PENDING = 100000;
+ const int TIMEOUT_FOR_FLIP_PENDING_US = 100000;
+ unsigned int polling_interval_us = 1;
int i;
temp_pipe = pipe;
while (temp_pipe) {
if (temp_pipe->plane_state && temp_pipe->plane_state->flip_immediate) {
- for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
+ for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING_US / polling_interval_us; ++i) {
if (!temp_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(temp_pipe->plane_res.hubp))
break;
- udelay(1);
+ udelay(polling_interval_us);
}
/* no reason it should take this long for immediate flips */
- ASSERT(i != TIMEOUT_FOR_FLIP_PENDING);
+ ASSERT(i != TIMEOUT_FOR_FLIP_PENDING_US);
}
temp_pipe = temp_pipe->bottom_pipe;
}
@@ -1634,6 +1638,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|| pipe_ctx->update_flags.bits.plane_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
+ || plane_state->update_flags.bits.gamut_remap_change
|| pipe_ctx->stream->update_flags.bits.out_csc) {
/* dpp/cm gamut remap*/
dc->hwss.program_gamut_remap(pipe_ctx);
@@ -1949,7 +1954,8 @@ void dcn20_post_unlock_program_front_end(
struct dc_state *context)
{
int i;
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000;
+ unsigned int polling_interval_us = 1;
struct dce_hwseq *hwseq = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1971,10 +1977,9 @@ void dcn20_post_unlock_program_front_end(
pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
-
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
- udelay(1);
+ udelay(polling_interval_us);
}
}
@@ -2123,6 +2128,15 @@ void dcn20_optimize_bandwidth(
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ dc_dmub_srv_p_state_delegate(dc,
+ true, context);
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->clks.fw_based_mclk_switching = true;
+ } else {
+ dc->clk_mgr->clks.fw_based_mclk_switching = false;
+ }
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
@@ -2707,6 +2721,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct dce_hwseq *hws = dc->hwseq;
unsigned int k1_div = PIXEL_RATE_DIV_NA;
unsigned int k2_div = PIXEL_RATE_DIV_NA;
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
@@ -2726,7 +2742,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- }
+ } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->enable_symclk_se)
+ dccg->funcs->enable_symclk_se(dccg,
+ stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A);
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 4cc8de2627ce..d587f807dfd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -1294,7 +1294,7 @@ static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
{
enum dc_status status = DC_OK;
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1948,7 +1948,7 @@ int dcn20_validate_apply_pipe_split_flags(
v->ODMCombineEnablePerState[vlevel][pipe_plane];
if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
- if (get_num_mpc_splits(pipe) == 1) {
+ if (resource_get_num_mpc_splits(pipe) == 1) {
/*If need split for mpc but 2 way split already*/
if (split[i] == 4)
split[i] = 2; /* 2 -> 4 MPC */
@@ -1956,7 +1956,7 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = 0; /* 2 -> 2 MPC */
else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
merge[i] = true; /* 2 -> 1 MPC */
- } else if (get_num_mpc_splits(pipe) == 3) {
+ } else if (resource_get_num_mpc_splits(pipe) == 3) {
/*If need split for mpc but 4 way split already*/
if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
|| !pipe->bottom_pipe)) {
@@ -1965,7 +1965,7 @@ int dcn20_validate_apply_pipe_split_flags(
pipe->top_pipe->plane_state == pipe->plane_state)
merge[i] = true; /* 4 -> 1 MPC */
split[i] = 0;
- } else if (get_num_odm_splits(pipe)) {
+ } else if (resource_get_num_odm_splits(pipe)) {
/* ODM -> MPC transition */
if (pipe->prev_odm_pipe) {
split[i] = 0;
@@ -1973,7 +1973,7 @@ int dcn20_validate_apply_pipe_split_flags(
}
}
} else {
- if (get_num_odm_splits(pipe) == 1) {
+ if (resource_get_num_odm_splits(pipe) == 1) {
/*If need split for odm but 2 way split already*/
if (split[i] == 4)
split[i] = 2; /* 2 -> 4 ODM */
@@ -1983,7 +1983,7 @@ int dcn20_validate_apply_pipe_split_flags(
ASSERT(0); /* NOT expected yet */
merge[i] = true; /* exit ODM */
}
- } else if (get_num_odm_splits(pipe) == 3) {
+ } else if (resource_get_num_odm_splits(pipe) == 3) {
/*If need split for odm but 4 way split already*/
if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
|| !pipe->next_odm_pipe)) {
@@ -1993,7 +1993,7 @@ int dcn20_validate_apply_pipe_split_flags(
merge[i] = true; /* exit ODM */
}
split[i] = 0;
- } else if (get_num_mpc_splits(pipe)) {
+ } else if (resource_get_num_mpc_splits(pipe)) {
/* MPC -> ODM transition */
ASSERT(0); /* NOT expected yet */
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
@@ -2147,31 +2147,31 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
return voltage_supported;
}
-struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
- struct dc_state *state,
+struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream)
+ const struct pipe_ctx *opp_head)
{
- struct resource_context *res_ctx = &state->res_ctx;
- struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
+ struct resource_context *res_ctx = &new_ctx->res_ctx;
+ struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream);
+ struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master);
- if (!head_pipe)
- ASSERT(0);
+ ASSERT(otg_master);
- if (!idle_pipe)
+ if (!sec_dpp_pipe)
return NULL;
- idle_pipe->stream = head_pipe->stream;
- idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
- idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+ sec_dpp_pipe->stream = opp_head->stream;
+ sec_dpp_pipe->stream_res.tg = opp_head->stream_res.tg;
+ sec_dpp_pipe->stream_res.opp = opp_head->stream_res.opp;
- idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
+ sec_dpp_pipe->plane_res.hubp = pool->hubps[sec_dpp_pipe->pipe_idx];
+ sec_dpp_pipe->plane_res.ipp = pool->ipps[sec_dpp_pipe->pipe_idx];
+ sec_dpp_pipe->plane_res.dpp = pool->dpps[sec_dpp_pipe->pipe_idx];
+ sec_dpp_pipe->plane_res.mpcc_inst = pool->dpps[sec_dpp_pipe->pipe_idx]->inst;
- return idle_pipe;
+ return sec_dpp_pipe;
}
bool dcn20_get_dcc_compression_cap(const struct dc *dc,
@@ -2216,7 +2216,7 @@ static const struct resource_funcs dcn20_res_pool_funcs = {
.link_enc_create = dcn20_link_encoder_create,
.panel_cntl_create = dcn20_panel_cntl_create,
.validate_bandwidth = dcn20_validate_bandwidth,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index da0241e8c255..6d1a8924e57b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -58,10 +58,11 @@ unsigned int dcn20_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
-struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
- struct dc_state *state,
+struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream);
+ const struct pipe_ctx *opp_head_pipe);
struct stream_encoder *dcn20_stream_encoder_create(
enum engine_id eng_id,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
index fdba8a9f5c30..2dc4d2c1410b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -992,14 +992,15 @@ static struct hubp *dcn201_hubp_create(
return NULL;
}
-static struct pipe_ctx *dcn201_acquire_idle_pipe_for_layer(
- struct dc_state *context,
+static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream)
+ const struct pipe_ctx *opp_head_pipe)
{
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
+ struct resource_context *res_ctx = &new_ctx->res_ctx;
+ struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
+ struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
if (!head_pipe)
ASSERT(0);
@@ -1067,7 +1068,7 @@ static struct resource_funcs dcn201_res_pool_funcs = {
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.add_dsc_to_stream_resource = NULL,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
- .acquire_idle_pipe_for_layer = dcn201_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn201_acquire_free_pipe_for_layer,
.populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
index 33fc9aa8621b..d07c04458d31 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
@@ -43,7 +43,7 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
index e44a37491c1e..b7efa777ec73 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
@@ -32,6 +32,5 @@ struct dccg *dccg21_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
-void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
#endif /* __DCN21_DCCG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index d693ea42d033..d1a25fe6c44f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -660,6 +660,7 @@ static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -854,8 +855,8 @@ bool dcn21_fast_validate_bw(struct dc *dc,
/* We only support full screen mpo with ODM */
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
@@ -1387,7 +1388,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index e5b7ef7422b8..50dc83404644 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes(
int cur_rom_en = 0;
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
- color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
- cur_rom_en = 1;
+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
+ }
REG_UPDATE_3(CURSOR0_CONTROL,
CUR0_MODE, color_format,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index bf8864bc8a99..6cef62d7a2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -461,6 +461,11 @@ void dcn30_init_hw(struct dc *dc)
REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
}
+ if (dc->debug.enable_mem_low_power.bits.vga) {
+ // Power down VGA memory
+ REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
+ }
+
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
@@ -949,13 +954,36 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
}
void dcn30_prepare_bandwidth(struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context)
{
+ bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ /* Any transition into an FPO config should disable MCLK switching first to avoid
+ * driver and FW P-State synchronization issues.
+ */
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ dc->optimized_required = true;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ }
+
if (dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
+ /*
+ * enabled -> enabled: do not disable
+ * enabled -> disabled: disable
+ * disabled -> enabled: don't care
+ * disabled -> disabled: don't care
+ */
+ if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+ dc_dmub_srv_p_state_delegate(dc, false, context);
+
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ /* After disabling P-State, restore the original value to ensure we get the correct P-State
+ * on the next optimize. */
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index dfb8f62765f2..5bf4d0aa6230 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,
optc1->opp_count = 1;
}
-static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
}
-static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
+void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
index fb06dc9a4893..d3a056c12b0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
@@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing);
+void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
void optc3_tg_init(struct timing_generator *optc);
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
#endif /* __DC_OPTC_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index abe4c12a10b5..88c0b24a3249 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -733,6 +733,7 @@ static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
};
@@ -1705,8 +1706,8 @@ noinline bool dcn30_internal_validate_bw(
/* We only support full screen mpo with ODM */
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
@@ -2062,7 +2063,8 @@ bool dcn30_validate_bandwidth(struct dc *dc,
}
DC_FP_START();
- dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ if (dc->res_pool->funcs->calculate_wm_and_dlg)
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
BW_VAL_TRACE_END_WATERMARKS();
@@ -2214,7 +2216,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 7aa628c21973..9002cb10a6ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -11,7 +11,8 @@
# Makefile for dcn30.
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
- dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
+ dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \
+ dcn301_optc.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
new file mode 100644
index 000000000000..b3cfcb887905
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn301_optc.h"
+#include "dc.h"
+#include "dcn_calc_math.h"
+#include "dc_dmub_srv.h"
+
+#include "dml/dcn30/dcn30_fpu.h"
+#include "dc_trace.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+
+/**
+ * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
+ *
+ * @optc: timing_generator instance.
+ * @params: parameters used for Dynamic Refresh Rate.
+ */
+void optc301_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ if (params->vertical_total_mid != 0) {
+
+ REG_SET(OTG_V_TOTAL_MID, 0,
+ OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
+
+ REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
+ OTG_VTOTAL_MID_FRAME_NUM,
+ (uint8_t)params->vertical_total_mid_frame_num);
+
+ }
+
+ optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
+
+ REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, 0);
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
+
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
+}
+
+
+void optc301_setup_manual_trigger(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_8(OTG_TRIGA_CNTL, 0,
+ OTG_TRIGA_SOURCE_SELECT, 21,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
+ OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
+ OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
+ OTG_TRIGA_POLARITY_SELECT, 0,
+ OTG_TRIGA_FREQUENCY_SELECT, 0,
+ OTG_TRIGA_DELAY, 0,
+ OTG_TRIGA_CLEAR, 1);
+}
+
+static struct timing_generator_funcs dcn30_tg_funcs = {
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc2_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
+ .set_early_control = optc1_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank_color = optc3_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .triplebuffer_lock = optc3_triplebuffer_lock,
+ .triplebuffer_unlock = optc2_triplebuffer_unlock,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .lock = optc3_lock,
+ .unlock = optc1_unlock,
+ .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
+ .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc301_set_drr,
+ .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc3_set_vtotal_min_max,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .tg_init = optc3_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
+ .setup_global_swap_lock = NULL,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc2_configure_crc,
+ .set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
+ .set_dwb_source = NULL,
+ .set_odm_bypass = optc3_set_odm_bypass,
+ .set_odm_combine = optc3_set_odm_combine,
+ .get_optc_source = optc2_get_optc_source,
+ .set_out_mux = optc3_set_out_mux,
+ .set_drr_trigger_window = optc3_set_drr_trigger_window,
+ .set_vtotal_change_limit = optc3_set_vtotal_change_limit,
+ .set_gsl = optc2_set_gsl,
+ .set_gsl_source_select = optc2_set_gsl_source_select,
+ .set_vtg_params = optc1_set_vtg_params,
+ .program_manual_trigger = optc2_program_manual_trigger,
+ .setup_manual_trigger = optc301_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
+ .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
+};
+
+void dcn301_timing_generator_init(struct optc *optc1)
+{
+ optc1->base.funcs = &dcn30_tg_funcs;
+
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
+
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 4;
+ optc1->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
new file mode 100644
index 000000000000..b49585682a15
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPTC_DCN301_H__
+#define __DC_OPTC_DCN301_H__
+
+#include "dcn20/dcn20_optc.h"
+#include "dcn30/dcn30_optc.h"
+
+void dcn301_timing_generator_init(struct optc *optc1);
+void optc301_setup_manual_trigger(struct timing_generator *optc);
+void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params);
+
+#endif /* __DC_OPTC_DCN301_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 3485fbb1093e..79d6697d13b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -42,7 +42,7 @@
#include "dcn30/dcn30_hubp.h"
#include "irq/dcn30/irq_service_dcn30.h"
#include "dcn30/dcn30_dpp.h"
-#include "dcn30/dcn30_optc.h"
+#include "dcn301/dcn301_optc.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
@@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create(
tgn10->tg_shift = &optc_shift;
tgn10->tg_mask = &optc_mask;
- dcn30_timing_generator_init(tgn10);
+ dcn301_timing_generator_init(tgn10);
return &tgn10->base;
}
@@ -1379,7 +1379,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1425,9 +1425,9 @@ static bool dcn301_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 5ad6a22ee47d..447abcd593be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -103,6 +103,7 @@ static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
};
@@ -1135,7 +1136,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 45956ef6f3f9..adf4989177f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -85,6 +85,7 @@ static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
};
@@ -1061,7 +1062,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 65c1d754e2d6..8664f0c4c9b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(
struct dcn_dccg *dccg_dcn,
enum phyd32clk_clock_source src)
{
- if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (src == PHYD32CLKC)
src = PHYD32CLKF;
if (src == PHYD32CLKD)
@@ -284,19 +285,11 @@ void dccg31_enable_symclk32_le(
/* select one of the PHYD32CLKs as the source for symclk32_le */
switch (hpo_le_inst) {
case 0:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE0_GATE_DISABLE, 1,
- SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, phyd32clk,
SYMCLK32_LE0_EN, 1);
break;
case 1:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE1_GATE_DISABLE, 1,
- SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, phyd32clk,
SYMCLK32_LE1_EN, 1);
@@ -319,19 +312,38 @@ void dccg31_disable_symclk32_le(
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, 0,
SYMCLK32_LE0_EN, 0);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE0_GATE_DISABLE, 0,
- SYMCLK32_ROOT_LE0_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, 0,
SYMCLK32_LE1_EN, 0);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE1_GATE_DISABLE, 0,
- SYMCLK32_ROOT_LE1_GATE_DISABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ return;
+
+ switch (hpo_le_inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0);
break;
default:
BREAK_TO_DEBUGGER();
@@ -660,10 +672,8 @@ void dccg31_init(struct dccg *dccg)
dccg31_disable_symclk32_se(dccg, 2);
dccg31_disable_symclk32_se(dccg, 3);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) {
- dccg31_disable_symclk32_le(dccg, 0);
- dccg31_disable_symclk32_le(dccg, 1);
- }
+ dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false);
+ dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
dccg31_disable_dpstreamclk(dccg, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
index 0902ce5eb8a1..e3caaacf7493 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -179,6 +179,11 @@ void dccg31_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst);
+void dccg31_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable);
+
void dccg31_set_physymclk(
struct dccg *dccg,
int phy_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index bd62502380d8..4596f3bac1b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -558,7 +558,7 @@ void dcn31_link_encoder_disable_output(
struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
struct dc_link *link;
- if (!dcn10_is_dig_enabled(enc))
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc))
return;
link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 0278bae50a9d..45143459eedd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -154,7 +154,7 @@ static void dcn31_hpo_dp_stream_enc_dp_blank(
VID_STREAM_STATUS, 0,
10, 5000);
- /* Disable SDP tranmission */
+ /* Disable SDP transmission */
REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
SDP_STREAM_ENABLE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 7445ed27852a..1f4e0b6261ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -1018,8 +1018,8 @@ void hubbub31_init(struct hubbub *hubbub)
/*done in hwseq*/
/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
- DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
- DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
+ DISPCLK_R_DCHUBBUB_GATE_DIS, 1,
+ DCFCLK_R_DCHUBBUB_GATE_DIS, 1);
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index fc33b5fcabe1..82de4fe2637f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -96,6 +96,7 @@
#include "dce/dmub_psr.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dce/dmub_replay.h"
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
@@ -896,6 +897,7 @@ static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1479,6 +1481,9 @@ static void dcn31_resource_destruct(struct dcn31_resource_pool *pool)
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
}
@@ -1776,8 +1781,8 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
-
- dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ if (dc->res_pool->funcs->calculate_wm_and_dlg)
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -1818,7 +1823,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn31_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -2085,6 +2090,14 @@ static bool dcn31_resource_construct(
goto create_fail;
}
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 0746ed31d1d1..ad3f019a784f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -362,6 +362,7 @@ static const struct dccg_funcs dccg314_funcs = {
.disable_symclk32_se = dccg31_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
.disable_symclk32_le = dccg31_disable_symclk32_le,
+ .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
.set_physymclk = dccg31_set_physymclk,
.set_dtbclk_dto = dccg314_set_dtbclk_dto,
.set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 6a9024aa3285..1c1fb2fa0822 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -93,6 +93,7 @@
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dml/dcn314/display_mode_vba_314.h"
@@ -870,6 +871,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_z9_disable_interface = true,
.minimum_z8_residency_time = 2000,
.psr_skip_crtc_disable = true,
+ .replay_skip_crtc_disabled = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
@@ -908,15 +910,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.root_clock_optimization = {
.bits = {
.dpp = true,
- .dsc = false,
- .hdmistream = false,
- .hdmichar = false,
- .dpstream = false,
- .symclk32_se = false,
- .symclk32_le = false,
- .symclk_fe = false,
- .physymclk = false,
- .dpiasymclk = false,
+ .dsc = true,
+ .hdmistream = true,
+ .hdmichar = true,
+ .dpstream = true,
+ .symclk32_se = true,
+ .symclk32_le = true,
+ .symclk_fe = true,
+ .physymclk = true,
+ .dpiasymclk = true,
}
},
@@ -945,6 +947,7 @@ static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1528,6 +1531,9 @@ static void dcn314_resource_destruct(struct dcn314_resource_pool *pool)
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
}
@@ -1683,7 +1689,9 @@ static bool filter_modes_for_single_channel_workaround(struct dc *dc,
struct dc_state *context)
{
// Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR
- if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) {
+ if (dc->clk_mgr->bw_params->vram_type == 34 &&
+ dc->clk_mgr->bw_params->num_channels < 2 &&
+ context->stream_count > 1) {
int total_phy_pix_clk = 0;
for (int i = 0; i < context->stream_count; i++)
@@ -1732,8 +1740,8 @@ bool dcn314_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
-
- dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ if (dc->res_pool->funcs->calculate_wm_and_dlg)
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -1765,7 +1773,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn314_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -2025,6 +2033,14 @@ static bool dcn314_resource_construct(
goto create_fail;
}
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index df3a438abda8..127487ea3d7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -895,6 +895,7 @@ static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1659,7 +1660,7 @@ static int dcn315_populate_dml_pipes_from_context(
{
int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = NULL;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
@@ -1817,7 +1818,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn315_update_soc_for_wm_a,
.populate_dml_pipes = dcn315_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index 707cf28bbceb..5fe2c61527df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -891,6 +891,7 @@ static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1704,7 +1705,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn316_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index 11e28e056cf7..921f58c0c729 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -49,7 +49,10 @@ static void dccg32_trigger_dio_fifo_resync(
uint32_t dispclk_rdivider_value = 0;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
- REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
+
+ /* Not valid for the WDIVIDER to be set to 0 */
+ if (dispclk_rdivider_value != 0)
+ REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
static void dccg32_get_pixel_rate_div(
@@ -278,7 +281,8 @@ static void dccg32_set_dpstreamclk(
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
/* set the dtbclk_p source */
- dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
+ /* always program refclk as DTBCLK. No use-case expected to require DPREFCLK as refclk */
+ dccg32_set_dtbclk_p_src(dccg, DTBCLK0, otg_inst);
/* enabled to select one of the DTBCLKs for pipe */
switch (dp_hpo_inst) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index a18b9c0c5709..8bfef6d095b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -955,8 +955,8 @@ void hubbub32_init(struct hubbub *hubbub)
/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
- DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
- DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
+ DISPCLK_R_DCHUBBUB_GATE_DIS, 1,
+ DCFCLK_R_DCHUBBUB_GATE_DIS, 1);
}
/*
ignore the "df_pre_cstate_req" from the SDP port control.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index d52d5feeb311..680e7fa8d18a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -47,11 +47,9 @@
#include "clk_mgr.h"
#include "dsc.h"
#include "dcn20/dcn20_optc.h"
-#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32_resource.h"
#include "link.h"
-#include "dmub/inc/dmub_subvp_state.h"
#define DC_LOGGER_INIT(logger)
@@ -569,7 +567,7 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/
- if (pipe_ctx->top_pipe == NULL) {
+ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
/*program shaper and 3dlut in MPC*/
ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
@@ -1204,10 +1202,10 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->top_pipe || pipe->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe, OTG_MASTER))
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
+ if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
&& pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
@@ -1301,7 +1299,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link)
if (link->phy_state.symclk_ref_cnts.otg > 0) {
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && pipe_ctx->stream->link == link) {
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
@@ -1384,7 +1382,7 @@ void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe)
{
phantom_pipe->update_flags.raw = 0;
if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- if (phantom_pipe->stream && phantom_pipe->plane_state) {
+ if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
phantom_pipe->update_flags.bits.enable = 1;
phantom_pipe->update_flags.bits.mpcc = 1;
phantom_pipe->update_flags.bits.dppclk = 1;
@@ -1394,7 +1392,7 @@ void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe)
phantom_pipe->update_flags.bits.scaler = 1;
phantom_pipe->update_flags.bits.viewport = 1;
phantom_pipe->update_flags.bits.det_size = 1;
- if (!phantom_pipe->top_pipe && !phantom_pipe->prev_odm_pipe) {
+ if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
phantom_pipe->update_flags.bits.odm = 1;
phantom_pipe->update_flags.bits.global_sync = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index c8041cfd594d..3082da04a63d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -64,7 +64,7 @@ void mpc32_mpc_init(struct mpc *mpc)
}
}
-static void mpc32_power_on_blnd_lut(
+void mpc32_power_on_blnd_lut(
struct mpc *mpc,
uint32_t mpcc_id,
bool power_on)
@@ -120,7 +120,7 @@ static enum dc_lut_mode mpc32_get_post1dlut_current(struct mpc *mpc, uint32_t mp
return mode;
}
-static void mpc32_configure_post1dlut(
+void mpc32_configure_post1dlut(
struct mpc *mpc,
uint32_t mpcc_id,
bool is_ram_a)
@@ -163,7 +163,7 @@ static void mpc32_post1dlut_get_reg_field(
}
/*program blnd lut RAM A*/
-static void mpc32_program_post1dluta_settings(
+void mpc32_program_post1dluta_settings(
struct mpc *mpc,
uint32_t mpcc_id,
const struct pwl_params *params)
@@ -192,7 +192,7 @@ static void mpc32_program_post1dluta_settings(
}
/*program blnd lut RAM B*/
-static void mpc32_program_post1dlutb_settings(
+void mpc32_program_post1dlutb_settings(
struct mpc *mpc,
uint32_t mpcc_id,
const struct pwl_params *params)
@@ -220,7 +220,7 @@ static void mpc32_program_post1dlutb_settings(
cm_helper_program_gamcor_xfer_func(mpc->ctx, params, &gam_regs);
}
-static void mpc32_program_post1dlut_pwl(
+void mpc32_program_post1dlut_pwl(
struct mpc *mpc,
uint32_t mpcc_id,
const struct pwl_result_data *rgb,
@@ -321,7 +321,7 @@ static enum dc_lut_mode mpc32_get_shaper_current(struct mpc *mpc, uint32_t mpcc_
}
-static void mpc32_configure_shaper_lut(
+void mpc32_configure_shaper_lut(
struct mpc *mpc,
bool is_ram_a,
uint32_t mpcc_id)
@@ -336,7 +336,7 @@ static void mpc32_configure_shaper_lut(
}
-static void mpc32_program_shaper_luta_settings(
+void mpc32_program_shaper_luta_settings(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -486,7 +486,7 @@ static void mpc32_program_shaper_luta_settings(
}
-static void mpc32_program_shaper_lutb_settings(
+void mpc32_program_shaper_lutb_settings(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -637,7 +637,7 @@ static void mpc32_program_shaper_lutb_settings(
}
-static void mpc32_program_shaper_lut(
+void mpc32_program_shaper_lut(
struct mpc *mpc,
const struct pwl_result_data *rgb,
uint32_t num,
@@ -671,7 +671,7 @@ static void mpc32_program_shaper_lut(
}
-static void mpc32_power_on_shaper_3dlut(
+void mpc32_power_on_shaper_3dlut(
struct mpc *mpc,
uint32_t mpcc_id,
bool power_on)
@@ -789,7 +789,7 @@ static enum dc_lut_mode get3dlut_config(
}
-static void mpc32_select_3dlut_ram(
+void mpc32_select_3dlut_ram(
struct mpc *mpc,
enum dc_lut_mode mode,
bool is_color_channel_12bits,
@@ -803,7 +803,7 @@ static void mpc32_select_3dlut_ram(
}
-static void mpc32_select_3dlut_ram_mask(
+void mpc32_select_3dlut_ram_mask(
struct mpc *mpc,
uint32_t ram_selection_mask,
uint32_t mpcc_id)
@@ -816,7 +816,7 @@ static void mpc32_select_3dlut_ram_mask(
}
-static void mpc32_set3dlut_ram12(
+void mpc32_set3dlut_ram12(
struct mpc *mpc,
const struct dc_rgb *lut,
uint32_t entries,
@@ -848,7 +848,7 @@ static void mpc32_set3dlut_ram12(
}
-static void mpc32_set3dlut_ram10(
+void mpc32_set3dlut_ram10(
struct mpc *mpc,
const struct dc_rgb *lut,
uint32_t entries,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
index 2c2ecd053806..9ac584fa89ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
@@ -332,4 +332,65 @@ void dcn32_mpc_construct(struct dcn30_mpc *mpc30,
int num_mpcc,
int num_rmu);
+void mpc32_power_on_blnd_lut(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ bool power_on);
+void mpc32_program_post1dlut_pwl(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+void mpc32_program_post1dlutb_settings(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ const struct pwl_params *params);
+void mpc32_program_post1dluta_settings(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ const struct pwl_params *params);
+void mpc32_configure_post1dlut(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ bool is_ram_a);
+void mpc32_program_shaper_lut(
+ struct mpc *mpc,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ uint32_t mpcc_id);
+void mpc32_program_shaper_lutb_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
+void mpc32_program_shaper_luta_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
+void mpc32_configure_shaper_lut(
+ struct mpc *mpc,
+ bool is_ram_a,
+ uint32_t mpcc_id);
+void mpc32_power_on_shaper_3dlut(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ bool power_on);
+void mpc32_set3dlut_ram10(
+ struct mpc *mpc,
+ const struct dc_rgb *lut,
+ uint32_t entries,
+ uint32_t mpcc_id);
+void mpc32_set3dlut_ram12(
+ struct mpc *mpc,
+ const struct dc_rgb *lut,
+ uint32_t entries,
+ uint32_t mpcc_id);
+void mpc32_select_3dlut_ram_mask(
+ struct mpc *mpc,
+ uint32_t ram_selection_mask,
+ uint32_t mpcc_id);
+void mpc32_select_3dlut_ram(
+ struct mpc *mpc,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ uint32_t mpcc_id);
#endif //__DC_MPCC_DCN32_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 1cc09799f92d..935cd23e6a01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1709,8 +1709,8 @@ void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe &&
- pipe->plane_state && pipe->stream &&
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_is_pipe_type(pipe, DPP_PIPE) &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
phantom_plane = pipe->plane_state;
phantom_stream = pipe->stream;
@@ -1892,7 +1892,7 @@ int dcn32_populate_dml_pipes_from_context(
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = NULL;
bool subvp_in_use = false;
struct dc_crtc_timing *timing;
bool vsr_odm_support = false;
@@ -2038,7 +2038,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.validate_bandwidth = dcn32_validate_bandwidth,
.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -2485,109 +2485,115 @@ struct resource_pool *dcn32_create_resource_pool(
return NULL;
}
-static struct pipe_ctx *find_idle_secondary_pipe_check_mpo(
- struct resource_context *res_ctx,
+/*
+ * Find the most optimal free pipe from res_ctx, which could be used as a
+ * secondary dpp pipe for input opp head pipe.
+ *
+ * a free pipe - a pipe in input res_ctx not yet used for any streams or
+ * planes.
+ * secondary dpp pipe - a pipe gets inserted to a head OPP pipe's MPC blending
+ * tree. This is typical used for rendering MPO planes or additional offset
+ * areas in MPCC combine.
+ *
+ * Hardware Transition Minimization Algorithm for Finding a Secondary DPP Pipe
+ * -------------------------------------------------------------------------
+ *
+ * PROBLEM:
+ *
+ * 1. There is a hardware limitation that a secondary DPP pipe cannot be
+ * transferred from one MPC blending tree to the other in a single frame.
+ * Otherwise it could cause glitches on the screen.
+ *
+ * For instance, we cannot transition from state 1 to state 2 in one frame. This
+ * is because PIPE1 is transferred from PIPE0's MPC blending tree over to
+ * PIPE2's MPC blending tree, which is not supported by hardware.
+ * To support this transition we need to first remove PIPE1 from PIPE0's MPC
+ * blending tree in one frame and then insert PIPE1 to PIPE2's MPC blending tree
+ * in the next frame. This is not optimal as it will delay the flip for two
+ * frames.
+ *
+ * State 1:
+ * PIPE0 -- secondary DPP pipe --> (PIPE1)
+ * PIPE2 -- secondary DPP pipe --> NONE
+ *
+ * State 2:
+ * PIPE0 -- secondary DPP pipe --> NONE
+ * PIPE2 -- secondary DPP pipe --> (PIPE1)
+ *
+ * 2. We want to in general minimize the unnecessary changes in pipe topology.
+ * If a pipe is already added in current blending tree and there are no changes
+ * to plane topology, we don't want to swap it with another free pipe
+ * unnecessarily in every update. Powering up and down a pipe would require a
+ * full update which delays the flip for 1 frame. If we use the original pipe
+ * we don't have to toggle its power. So we can flip faster.
+ */
+static int find_optimal_free_pipe_as_secondary_dpp_pipe(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
const struct resource_pool *pool,
- const struct pipe_ctx *primary_pipe)
+ const struct pipe_ctx *new_opp_head)
{
- int i;
- struct pipe_ctx *secondary_pipe = NULL;
- struct pipe_ctx *next_odm_mpo_pipe = NULL;
- int primary_index, preferred_pipe_idx;
- struct pipe_ctx *old_primary_pipe = NULL;
+ const struct pipe_ctx *cur_opp_head;
+ int free_pipe_idx;
- /*
- * Modified from find_idle_secondary_pipe
- * With windowed MPO and ODM, we want to avoid the case where we want a
- * free pipe for the left side but the free pipe is being used on the
- * right side.
- * Add check on current_state if the primary_pipe is the left side,
- * to check the right side ( primary_pipe->next_odm_pipe ) to see if
- * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe )
- * - If so, then don't use this pipe
- * EXCEPTION - 3 plane ( 2 MPO plane ) case
- * - in this case, the primary pipe has already gotten a free pipe for the
- * MPO window in the left
- * - when it tries to get a free pipe for the MPO window on the right,
- * it will see that it is already assigned to the right side
- * ( primary_pipe->next_odm_pipe ). But in this case, we want this
- * free pipe, since it will be for the right side. So add an
- * additional condition, that skipping the free pipe on the right only
- * applies if the primary pipe has no bottom pipe currently assigned
- */
- if (primary_pipe) {
- primary_index = primary_pipe->pipe_idx;
- old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index];
- if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe)
- && (!primary_pipe->bottom_pipe))
- next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe;
-
- preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
- if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) &&
- !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) {
- secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
- secondary_pipe->pipe_idx = preferred_pipe_idx;
- }
- }
+ cur_opp_head = &cur_res_ctx->pipe_ctx[new_opp_head->pipe_idx];
+ free_pipe_idx = resource_find_free_pipe_used_in_cur_mpc_blending_tree(
+ cur_res_ctx, new_res_ctx, cur_opp_head);
- /*
- * search backwards for the second pipe to keep pipe
- * assignment more consistent
+ /* Up until here if we have not found a free secondary pipe, we will
+ * need to wait for at least one frame to complete the transition
+ * sequence.
*/
- if (!secondary_pipe)
- for (i = pool->pipe_count - 1; i >= 0; i--) {
- if ((res_ctx->pipe_ctx[i].stream == NULL) &&
- !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) {
- secondary_pipe = &res_ctx->pipe_ctx[i];
- secondary_pipe->pipe_idx = i;
- break;
- }
- }
+ if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx(
+ cur_res_ctx, new_res_ctx, pool);
+
+ /* Up until here if we have not found a free secondary pipe, we will
+ * need to wait for at least two frames to complete the transition
+ * sequence. It really doesn't matter which pipe we decide take from
+ * current enabled pipes. It won't save our frame time when we swap only
+ * one pipe or more pipes.
+ */
+ if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ free_pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
+ cur_res_ctx, new_res_ctx, pool);
- return secondary_pipe;
+ if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool);
+
+ return free_pipe_idx;
}
-struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
- struct dc_state *state,
+struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream,
- struct pipe_ctx *head_pipe)
+ const struct pipe_ctx *opp_head_pipe)
{
- struct resource_context *res_ctx = &state->res_ctx;
- struct pipe_ctx *idle_pipe, *pipe;
- struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx;
- int head_index;
-
- if (!head_pipe)
- ASSERT(0);
-
- /*
- * Modified from dcn20_acquire_idle_pipe_for_layer
- * Check if head_pipe in old_context already has bottom_pipe allocated.
- * - If so, check if that pipe is available in the current context.
- * -- If so, reuse pipe from old_context
- */
- head_index = head_pipe->pipe_idx;
- pipe = &old_ctx->pipe_ctx[head_index];
- if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) {
- idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx];
- idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx;
+ int free_pipe_idx =
+ find_optimal_free_pipe_as_secondary_dpp_pipe(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx,
+ pool, opp_head_pipe);
+ struct pipe_ctx *free_pipe;
+
+ if (free_pipe_idx >= 0) {
+ free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx];
+ free_pipe->pipe_idx = free_pipe_idx;
+ free_pipe->stream = opp_head_pipe->stream;
+ free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg;
+ free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp;
+
+ free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx];
+ free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx];
+ free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx];
+ free_pipe->plane_res.mpcc_inst =
+ pool->dpps[free_pipe->pipe_idx]->inst;
} else {
- idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe);
- if (!idle_pipe)
- return NULL;
+ ASSERT(opp_head_pipe);
+ free_pipe = NULL;
}
- idle_pipe->stream = head_pipe->stream;
- idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
- idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
-
- idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
-
- return idle_pipe;
+ return free_pipe;
}
unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 81e443170829..103a2b54d025 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -136,11 +136,11 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
bool dcn32_is_center_timing(struct pipe_ctx *pipe);
bool dcn32_is_psr_capable(struct pipe_ctx *pipe);
-struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
- struct dc_state *state,
+struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream,
- struct pipe_ctx *head_pipe);
+ const struct pipe_ctx *opp_head_pipe);
void dcn32_determine_det_override(struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 5be242a1b82c..3ad2b48954e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -641,16 +641,21 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
uint8_t non_subvp_pipes = 0;
bool drr_pipe_found = false;
bool drr_psr_capable = false;
+ uint64_t refresh_rate = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->stream)
- continue;
-
- if (pipe->plane_state && !pipe->top_pipe) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (resource_is_pipe_type(pipe, OPP_HEAD) &&
+ resource_is_pipe_type(pipe, DPP_PIPE)) {
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
subvp_count++;
+
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+ }
if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
non_subvp_pipes++;
drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
@@ -662,7 +667,8 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
}
}
- if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable)
+ if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
+ ((uint32_t)refresh_rate < 120))
result = true;
return result;
@@ -693,16 +699,21 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
bool drr_pipe_found = false;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
bool vblank_psr_capable = false;
+ uint64_t refresh_rate = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->stream)
- continue;
-
- if (pipe->plane_state && !pipe->top_pipe) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (resource_is_pipe_type(pipe, OPP_HEAD) &&
+ resource_is_pipe_type(pipe, DPP_PIPE)) {
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
subvp_count++;
+
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+ }
if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
non_subvp_pipes++;
vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
@@ -715,7 +726,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
}
if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
+ ((uint32_t)refresh_rate < 120) &&
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
result = true;
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index a53478e15ce3..8d73cceb485b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -1588,7 +1588,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.validate_bandwidth = dcn32_validate_bandwidth,
.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c
index 0100a6053ab6..f2dfa96f9ef5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c
@@ -3015,7 +3015,7 @@ static bool all_displays_in_sync(const struct pipe_ctx pipe[],
int i, num_active_pipes = 0;
for (i = 0; i < pipe_count; i++) {
- if (!pipe[i].stream || pipe[i].top_pipe)
+ if (!resource_is_pipe_type(&pipe[i], OPP_HEAD))
continue;
active_pipes[num_active_pipes++] = &pipe[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index e73f089c84bb..50b0434354f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1258,7 +1258,7 @@ bool dcn_validate_bandwidth(
hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
} else {
/* pipe not split previously needs split */
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe);
+ hsplit_pipe = resource_find_free_secondary_pipe_legacy(&context->res_ctx, pool, pipe);
ASSERT(hsplit_pipe);
split_stream_across_pipes(&context->res_ctx, pool, pipe, hsplit_pipe);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 8ae5ddbd1b27..8afda5ecc0cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1305,7 +1305,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
pipes[pipe_cnt].dout.is_virtual = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
- switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
+ switch (resource_get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
case 1:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index f294f2f8c75b..57cf0358cc43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -3194,7 +3194,7 @@ static void CalculateFlipSchedule(
unsigned int HostVMDynamicLevels;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
double HostVMInefficiencyFactor;
double VRatioClamped;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 9af1a43c042b..ad741a723c0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -784,8 +784,7 @@ static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum o
Delay = Delay + 1;
// sft
Delay = Delay + 1;
- }
- else {
+ } else {
// sfr
Delay = Delay + 2;
// dsccif
@@ -3489,8 +3488,7 @@ static double TruncToValidBPP(
if (Format == dm_n422) {
MinDSCBPP = 7;
MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;
- }
- else {
+ } else {
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16.0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 43016c462251..adea459e7d36 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -3505,7 +3505,7 @@ static void CalculateFlipSchedule(
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
double LineTime = v->HTotal[k] / v->PixelClock[k];
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
@@ -4135,7 +4135,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN31_MAX_FMT_420_BUFFER_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
- if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) {
+ if (v->Output[k] == dm_hdmi) {
+ FMTBufferExceeded = true;
+ } else if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index d9e049e7ff0a..07adb614366e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -31,6 +31,7 @@
#include "dml/dcn20/dcn20_fpu.h"
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/display_mode_vba.h"
+#include "dml/dml_inline_defs.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
.VBlankNomDefaultUS = 668,
@@ -273,6 +274,25 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
+/*
+ * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing
+ *
+ * @param: num_us: number of microseconds
+ * @return: number of vertical lines. If exact number of vertical lines is not found then
+ * it will round up to next number of lines to guarantee num_us
+ */
+static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing)
+{
+ unsigned int num_lines = 0;
+ unsigned int lines_time_in_ns = 1000.0 *
+ (((float)timing->h_total * 1000.0) /
+ ((float)timing->pix_clk_100hz / 10.0));
+
+ num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0);
+
+ return num_lines;
+}
+
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
@@ -289,15 +309,22 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
+ unsigned int num_lines = 0;
if (!res_ctx->pipe_ctx[i].stream)
continue;
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
- pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+ num_lines = micro_sec_to_vert_lines(dcn3_14_ip.VBlankNomDefaultUS, timing);
+
+ if (pipe->stream->adjust.v_total_min != 0)
+ pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+ else
+ pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
- pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 9010c47476e9..a94aa0f21a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -3613,7 +3613,7 @@ static void CalculateFlipSchedule(
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
double LineTime = v->HTotal[k] / v->PixelClock[k];
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
@@ -4227,7 +4227,9 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
}
if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN314_MAX_FMT_420_BUFFER_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
- if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) {
+ if (v->Output[k] == dm_hdmi) {
+ FMTBufferExceeded = true;
+ } else if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index a95034801712..711d4085b33b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -37,7 +37,7 @@
static const struct subvp_high_refresh_list subvp_high_refresh_list = {
.min_refresh = 120,
- .max_refresh = 165,
+ .max_refresh = 175,
.res = {
{.width = 3840, .height = 2160, },
{.width = 3440, .height = 1440, },
@@ -756,7 +756,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Find the minimum pipe split count for non SubVP pipes
- if (pipe->stream && !pipe->top_pipe &&
+ if (resource_is_pipe_type(pipe, OPP_HEAD) &&
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
split_cnt = 0;
while (pipe) {
@@ -886,7 +886,8 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
- if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(pipe, DPP_PIPE))
continue;
// Find the SubVP pipe
@@ -899,7 +900,8 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
drr_pipe = &context->res_ctx.pipe_ctx[i];
// We check for master pipe only
- if (!drr_pipe->stream || !drr_pipe->plane_state || drr_pipe->top_pipe || drr_pipe->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(pipe, DPP_PIPE))
continue;
if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
@@ -980,7 +982,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
- if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(pipe, DPP_PIPE))
continue;
if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
@@ -1040,7 +1043,7 @@ static bool subvp_subvp_admissable(struct dc *dc,
uint32_t i;
uint8_t subvp_count = 0;
uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0;
- uint32_t refresh_rate = 0;
+ uint64_t refresh_rate = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1050,19 +1053,22 @@ static bool subvp_subvp_admissable(struct dc *dc,
if (pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
- / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
- if (refresh_rate < min_refresh)
- min_refresh = refresh_rate;
- if (refresh_rate > max_refresh)
- max_refresh = refresh_rate;
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+
+ if ((uint32_t)refresh_rate < min_refresh)
+ min_refresh = (uint32_t)refresh_rate;
+ if ((uint32_t)refresh_rate > max_refresh)
+ max_refresh = (uint32_t)refresh_rate;
subvp_count++;
}
}
if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) ||
- (min_refresh >= 120 && max_refresh >= 120)))
+ (min_refresh >= subvp_high_refresh_list.min_refresh &&
+ max_refresh <= subvp_high_refresh_list.max_refresh)))
result = true;
return result;
@@ -1715,8 +1721,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& !dc->config.enable_windowed_mpo_odm
&& pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index a50e7f4dce42..ecea008f19d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -3459,6 +3459,7 @@ bool dml32_CalculatePrefetchSchedule(
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
+ double LinesForPrefetchBandwidth = 0;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
double Tvm_trips;
@@ -3888,11 +3889,15 @@ bool dml32_CalculatePrefetchSchedule(
TimeForFetchingMetaPTE = Tvm_oto;
TimeForFetchingRowInVBlank = Tr0_oto;
*PrefetchBandwidth = prefetch_bw_oto;
+ /* Clamp to oto for bandwidth calculation */
+ LinesForPrefetchBandwidth = dst_y_prefetch_oto;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
TimeForFetchingMetaPTE = Tvm_equ;
TimeForFetchingRowInVBlank = Tr0_equ;
*PrefetchBandwidth = prefetch_bw_equ;
+ /* Clamp to equ for bandwidth calculation */
+ LinesForPrefetchBandwidth = dst_y_prefetch_equ;
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
@@ -3900,7 +3905,7 @@ bool dml32_CalculatePrefetchSchedule(
*DestinationLinesToRequestRowInVBlank =
dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
- LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+ LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
@@ -4124,7 +4129,7 @@ void dml32_CalculateFlipSchedule(
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
if (GPUVMEnable == true && HostVMEnable == true)
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 58dd62cce4bb..3966845c7694 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -40,6 +40,8 @@ static bool dsc_policy_enable_dsc_when_not_needed;
static bool dsc_policy_disable_dsc_stream_overhead;
+static bool disable_128b_132b_stream_overhead;
+
#ifndef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#endif
@@ -47,8 +49,44 @@ static bool dsc_policy_disable_dsc_stream_overhead;
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
+/* Need to account for padding due to pixel-to-symbol packing
+ * for uncompressed 128b/132b streams.
+ */
+static uint32_t apply_128b_132b_stream_overhead(
+ const struct dc_crtc_timing *timing, const uint32_t kbps)
+{
+ uint32_t total_kbps = kbps;
+
+ if (disable_128b_132b_stream_overhead)
+ return kbps;
+
+ if (!timing->flags.DSC) {
+ struct fixed31_32 bpp;
+ struct fixed31_32 overhead_factor;
+
+ bpp = dc_fixpt_from_int(kbps);
+ bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10);
+
+ /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size)
+ * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive
+ */
+ overhead_factor = dc_fixpt_from_int(timing->h_addressable);
+ overhead_factor = dc_fixpt_mul(overhead_factor, bpp);
+ overhead_factor = dc_fixpt_div_int(overhead_factor, 128);
+ overhead_factor = dc_fixpt_div(
+ dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)),
+ overhead_factor);
+
+ total_kbps = dc_fixpt_ceil(
+ dc_fixpt_mul_int(overhead_factor, total_kbps));
+ }
+
+ return total_kbps;
+}
+
uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
+ const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding)
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
@@ -96,6 +134,9 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
kbps = kbps * 2 / 3;
}
+ if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
+ kbps = apply_128b_132b_stream_overhead(timing, kbps);
+
return kbps;
}
@@ -107,6 +148,7 @@ static bool decide_dsc_bandwidth_range(
const uint32_t num_slices_h,
const struct dsc_enc_caps *dsc_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range);
static uint32_t compute_bpp_x16_from_target_bandwidth(
@@ -133,6 +175,7 @@ static bool setup_dsc_config(
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -398,6 +441,7 @@ bool dc_dsc_compute_bandwidth_range(
uint32_t max_bpp_x16,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
@@ -417,11 +461,11 @@ bool dc_dsc_compute_bandwidth_range(
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- &options, &config);
+ &options, link_encoding, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
- config.num_slices_h, &dsc_common_caps, timing, range);
+ config.num_slices_h, &dsc_common_caps, timing, link_encoding, range);
return is_dsc_possible;
}
@@ -557,6 +601,7 @@ static bool decide_dsc_bandwidth_range(
const uint32_t num_slices_h,
const struct dsc_enc_caps *dsc_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range)
{
uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
@@ -586,7 +631,7 @@ static bool decide_dsc_bandwidth_range(
/* populate output structure */
if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) {
/* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+ range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding);
/* max dsc target bpp */
range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
@@ -612,6 +657,7 @@ static bool decide_dsc_target_bpp_x16(
const int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const int num_slices_h,
+ const enum dc_link_encoding_format link_encoding,
int *target_bpp_x16)
{
struct dc_dsc_bw_range range;
@@ -619,7 +665,7 @@ static bool decide_dsc_target_bpp_x16(
*target_bpp_x16 = 0;
if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
- num_slices_h, dsc_common_caps, timing, &range)) {
+ num_slices_h, dsc_common_caps, timing, link_encoding, &range)) {
if (target_bandwidth_kbps >= range.stream_kbps) {
if (policy->enable_dsc_when_not_needed)
/* enable max bpp even dsc is not needed */
@@ -796,6 +842,7 @@ static bool setup_dsc_config(
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -995,6 +1042,7 @@ static bool setup_dsc_config(
target_bandwidth_kbps,
timing,
num_slices_h,
+ link_encoding,
&target_bpp);
dsc_cfg->bits_per_pixel = target_bpp;
}
@@ -1023,6 +1071,7 @@ bool dc_dsc_compute_config(
const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg)
{
bool is_dsc_possible = false;
@@ -1032,7 +1081,7 @@ bool dc_dsc_compute_config(
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, options, dsc_cfg);
+ timing, options, link_encoding, dsc_cfg);
return is_dsc_possible;
}
@@ -1165,6 +1214,11 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable)
dsc_policy_disable_dsc_stream_overhead = disable;
}
+void dc_set_disable_128b_132b_stream_overhead(bool disable)
+{
+ disable_128b_132b_stream_overhead = disable;
+}
+
void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
{
options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index 59884ef651b3..4a2bf81286d8 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -31,21 +31,21 @@
/****************************** new register headers */
/*** following in header */
-#define DDC_GPIO_REG_LIST_ENTRY(type,cd,id) \
+#define DDC_GPIO_REG_LIST_ENTRY(type, cd, id) \
.type ## _reg = REG(DC_GPIO_DDC ## id ## _ ## type),\
.type ## _mask = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## _MASK,\
.type ## _shift = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## __SHIFT
-#define DDC_GPIO_REG_LIST(cd,id) \
+#define DDC_GPIO_REG_LIST(cd, id) \
{\
- DDC_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
- DDC_GPIO_REG_LIST_ENTRY(A,cd,id),\
- DDC_GPIO_REG_LIST_ENTRY(EN,cd,id),\
- DDC_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ DDC_GPIO_REG_LIST_ENTRY(MASK, cd, id),\
+ DDC_GPIO_REG_LIST_ENTRY(A, cd, id),\
+ DDC_GPIO_REG_LIST_ENTRY(EN, cd, id),\
+ DDC_GPIO_REG_LIST_ENTRY(Y, cd, id)\
}
-#define DDC_REG_LIST(cd,id) \
- DDC_GPIO_REG_LIST(cd,id),\
+#define DDC_REG_LIST(cd, id) \
+ DDC_GPIO_REG_LIST(cd, id),\
.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP)
#define DDC_REG_LIST_DCN2(cd, id) \
@@ -54,34 +54,34 @@
.phy_aux_cntl = REG(PHY_AUX_CNTL), \
.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5)
-#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\
+#define DDC_GPIO_VGA_REG_LIST_ENTRY(type, cd)\
.type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\
.type ## _mask = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## _MASK,\
.type ## _shift = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## __SHIFT
#define DDC_GPIO_VGA_REG_LIST(cd) \
{\
- DDC_GPIO_VGA_REG_LIST_ENTRY(MASK,cd),\
- DDC_GPIO_VGA_REG_LIST_ENTRY(A,cd),\
- DDC_GPIO_VGA_REG_LIST_ENTRY(EN,cd),\
- DDC_GPIO_VGA_REG_LIST_ENTRY(Y,cd)\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(MASK, cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(A, cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(EN, cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(Y, cd)\
}
#define DDC_VGA_REG_LIST(cd) \
DDC_GPIO_VGA_REG_LIST(cd),\
.ddc_setup = mmDC_I2C_DDCVGA_SETUP
-#define DDC_GPIO_I2C_REG_LIST_ENTRY(type,cd) \
+#define DDC_GPIO_I2C_REG_LIST_ENTRY(type, cd) \
.type ## _reg = REG(DC_GPIO_I2CPAD_ ## type),\
.type ## _mask = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## _MASK,\
.type ## _shift = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## __SHIFT
#define DDC_GPIO_I2C_REG_LIST(cd) \
{\
- DDC_GPIO_I2C_REG_LIST_ENTRY(MASK,cd),\
- DDC_GPIO_I2C_REG_LIST_ENTRY(A,cd),\
- DDC_GPIO_I2C_REG_LIST_ENTRY(EN,cd),\
- DDC_GPIO_I2C_REG_LIST_ENTRY(Y,cd)\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(MASK, cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(A, cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(EN, cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(Y, cd)\
}
#define DDC_I2C_REG_LIST(cd) \
@@ -150,12 +150,12 @@ struct ddc_sh_mask {
#define ddc_data_regs(id) \
{\
- DDC_REG_LIST(DATA,id)\
+ DDC_REG_LIST(DATA, id)\
}
#define ddc_clk_regs(id) \
{\
- DDC_REG_LIST(CLK,id)\
+ DDC_REG_LIST(CLK, id)\
}
#define ddc_vga_data_regs \
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
index dcfdd71b2304..debb363cfcf4 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
@@ -36,17 +36,17 @@
#define ONE_MORE_5 6
-#define HPD_GPIO_REG_LIST_ENTRY(type,cd,id) \
+#define HPD_GPIO_REG_LIST_ENTRY(type, cd, id) \
.type ## _reg = REG(DC_GPIO_HPD_## type),\
.type ## _mask = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## _MASK,\
.type ## _shift = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## __SHIFT
#define HPD_GPIO_REG_LIST(id) \
{\
- HPD_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
- HPD_GPIO_REG_LIST_ENTRY(A,cd,id),\
- HPD_GPIO_REG_LIST_ENTRY(EN,cd,id),\
- HPD_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ HPD_GPIO_REG_LIST_ENTRY(MASK, cd, id),\
+ HPD_GPIO_REG_LIST_ENTRY(A, cd, id),\
+ HPD_GPIO_REG_LIST_ENTRY(EN, cd, id),\
+ HPD_GPIO_REG_LIST_ENTRY(Y, cd, id)\
}
#define HPD_REG_LIST(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 034610b74a37..027aec70c070 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -125,39 +125,15 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *context);
- /*
- * Acquires a free pipe for the head pipe.
- * The head pipe is first pipe in the current context that matches the stream
- * and does not have a top pipe or prev_odm_pipe.
- */
- struct pipe_ctx *(*acquire_idle_pipe_for_layer)(
- struct dc_state *context,
- const struct resource_pool *pool,
- struct dc_stream_state *stream);
-
- /*
- * Acquires a free pipe for the head pipe with some additional checks for odm.
- * The head pipe is passed in as an argument unlike acquire_idle_pipe_for_layer
- * where it is read from the context. So this allows us look for different
- * idle_pipe if the head_pipes are different ( ex. in odm 2:1 when we have
- * a left and right pipe ).
- *
- * It also checks the old context to see if:
- *
- * 1. a pipe has already been allocated for the head pipe. If so, it will
- * try to select that pipe as the idle pipe if it is available in the current
- * context.
- * 2. if the head_pipe is on the left, it will check if the right pipe has
- * a pipe already allocated. If so, it will not use that pipe if it is
- * selected as the idle pipe.
- */
- struct pipe_ctx *(*acquire_idle_pipe_for_head_pipe_in_layer)(
- struct dc_state *context,
+ struct pipe_ctx *(*acquire_free_pipe_as_secondary_dpp_pipe)(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream,
- struct pipe_ctx *head_pipe);
+ const struct pipe_ctx *opp_head_pipe);
- enum dc_status (*validate_plane)(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+ enum dc_status (*validate_plane)(
+ const struct dc_plane_state *plane_state,
+ struct dc_caps *caps);
enum dc_status (*add_stream_to_ctx)(
struct dc *dc,
@@ -304,6 +280,8 @@ struct resource_pool {
struct dmcu *dmcu;
struct dmub_psr *psr;
+ struct dmub_replay *replay;
+
struct abm *multiple_abms[MAX_PIPES];
const struct resource_funcs *funcs;
@@ -572,6 +550,23 @@ struct dc_state {
} perf_params;
};
+struct replay_context {
+ /* ddc line */
+ enum channel_id aux_inst;
+ /* Transmitter id */
+ enum transmitter digbe_inst;
+ /* Engine Id is used for Dig Be source select */
+ enum engine_id digfe_inst;
+ /* Controller Id used for Dig Fe source select */
+ enum controller_id controllerId;
+ unsigned int line_time_in_ns;
+};
+
+enum dc_replay_enable {
+ DC_REPLAY_DISABLE = 0,
+ DC_REPLAY_ENABLE = 1,
+};
+
struct dc_bounding_box_max_clk {
int max_dcfclk_mhz;
int max_dispclk_mhz;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index d2190a3320f6..33db15d69f23 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -27,6 +27,8 @@
#include "dm_services_types.h"
+struct abm_save_restore;
+
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
@@ -55,6 +57,10 @@ struct abm_funcs {
unsigned int bytes,
unsigned int inst);
bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst);
+ bool (*save_restore)(
+ struct abm *abm,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData);
bool (*set_pipe_ex)(struct abm *abm,
unsigned int otg_inst,
unsigned int option,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index 7254182b7c72..af6b9509d09d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -172,8 +172,6 @@ struct aux_engine_funcs {
struct aux_engine *engine,
uint8_t *returned_bytes);
bool (*is_engine_available)(struct aux_engine *engine);
- enum i2caux_engine_type (*get_engine_type)(
- const struct aux_engine *engine);
bool (*acquire)(
struct aux_engine *engine,
struct ddc *ddc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 8dc804bbe98b..3e2f0f64c98c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -123,6 +123,11 @@ struct dccg_funcs {
struct dccg *dccg,
int hpo_le_inst);
+ void (*set_symclk32_le_root_clock_gating)(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable);
+
void (*set_physymclk)(
struct dccg *dccg,
int phy_inst,
@@ -167,6 +172,16 @@ struct dccg_funcs {
struct dccg *dccg,
unsigned int dpp_inst,
bool clock_on);
+
+ void (*enable_symclk_se)(
+ struct dccg *dccg,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst);
+
+ void (*disable_symclk_se)(
+ struct dccg *dccg,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index aaa293613846..f5677dbb4e7d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -49,6 +49,8 @@ struct dcn_hubbub_wm_set {
uint32_t dram_clk_change;
uint32_t usr_retrain;
uint32_t fclk_pstate_change;
+ uint32_t sr_enter_exit_Z8;
+ uint32_t sr_enter_Z8;
};
struct dcn_hubbub_wm {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index c4fbbf08ef86..a6dedf3c7d74 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -269,6 +269,7 @@ struct stream_encoder_funcs {
struct stream_encoder *enc, unsigned int pix_per_container);
void (*enable_fifo)(struct stream_encoder *enc);
void (*disable_fifo)(struct stream_encoder *enc);
+ void (*map_stream_to_link)(struct stream_encoder *enc, uint32_t stream_enc_inst, uint32_t link_enc_inst);
};
struct hpo_dp_stream_encoder_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index f839494d59d8..e3e8c76c17cf 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -179,6 +179,10 @@ struct link_service {
int (*aux_transfer_raw)(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
+ bool (*configure_fixed_vs_pe_retimer)(
+ struct ddc_service *ddc,
+ const uint8_t *data,
+ uint32_t len);
bool (*aux_transfer_with_retries_no_mutex)(struct ddc_service *ddc,
struct aux_payload *payload);
bool (*is_in_aux_transaction_mode)(struct ddc_service *ddc);
@@ -269,6 +273,20 @@ struct link_service {
uint16_t psr_vtotal_su);
void (*edp_get_psr_residency)(
const struct dc_link *link, uint32_t *residency);
+
+ bool (*edp_get_replay_state)(
+ const struct dc_link *link, uint64_t *state);
+ bool (*edp_set_replay_allow_active)(struct dc_link *dc_link,
+ const bool *enable, bool wait, bool force_static,
+ const unsigned int *power_opts);
+ bool (*edp_setup_replay)(struct dc_link *link,
+ const struct dc_stream_state *stream);
+ bool (*edp_set_coasting_vtotal)(
+ struct dc_link *link, uint16_t coasting_vtotal);
+ bool (*edp_replay_residency)(const struct dc_link *link,
+ unsigned int *residency, const bool is_start,
+ const bool is_alpm);
+
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index eaeb684c8a48..e546b9c506c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -142,10 +142,6 @@ struct clock_source *dc_resource_find_first_free_pll(
struct resource_context *res_ctx,
const struct resource_pool *pool);
-struct pipe_ctx *resource_get_head_pipe_for_stream(
- struct resource_context *res_ctx,
- struct dc_stream_state *stream);
-
bool resource_attach_surfaces_to_context(
struct dc_plane_state *const *plane_state,
int surface_count,
@@ -153,11 +149,232 @@ bool resource_attach_surfaces_to_context(
struct dc_state *context,
const struct resource_pool *pool);
-struct pipe_ctx *find_idle_secondary_pipe(
+#define FREE_PIPE_INDEX_NOT_FOUND -1
+
+/*
+ * pipe types are identified based on MUXes in DCN front end that are capable
+ * of taking input from one DCN pipeline to another DCN pipeline. The name is
+ * in a form of XXXX_YYYY, where XXXX is the DCN front end hardware block the
+ * pipeline ends with and YYYY is the rendering role that the pipe is in.
+ *
+ * For instance OTG_MASTER is a pipe ending with OTG hardware block in its
+ * pipeline and it is in a role of a master pipe for timing generation.
+ *
+ * For quick reference a diagram of each pipe type's areas of responsibility
+ * for outputting timings on the screen is shown below:
+ *
+ * Timing Active for Stream 0
+ * __________________________________________________
+ * |OTG master 0 (OPP head 0)|OPP head 2 (DPP pipe 2) |
+ * | (DPP pipe 0)| |
+ * | Top Plane 0 | |
+ * | ______________|____ |
+ * | |DPP pipe 1 |DPP | |
+ * | | |pipe| |
+ * | | Bottom |3 | |
+ * | | Plane 1 | | |
+ * | | | | |
+ * | |______________|____| |
+ * | | |
+ * | | |
+ * | ODM slice 0 | ODM slice 1 |
+ * |_________________________|________________________|
+ *
+ * Timing Active for Stream 1
+ * __________________________________________________
+ * |OTG master 4 (OPP head 4) |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | Blank Pixel Data |
+ * | (generated by DPG4) |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * |__________________________________________________|
+ *
+ * Inter-pipe Relation
+ * __________________________________________________
+ * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
+ * | | plane 0 | slice 0 | |
+ * | 0 | -------------MPC---------ODM----------- |
+ * | | plane 1 | | | | |
+ * | 1 | ------------- | | | |
+ * | | plane 0 | slice 1 | | |
+ * | 2 | -------------MPC--------- | |
+ * | | plane 1 | | | |
+ * | 3 | ------------- | | |
+ * | | | blank | |
+ * | 4 | | ----------------------- |
+ * | | | | |
+ * | 5 | (FREE) | | |
+ * |________|_______________|___________|_____________|
+ */
+enum pipe_type {
+ /* free pipe - free pipe is an uninitialized pipe without a stream
+ * associated with it. It is a free DCN pipe resource. It can be
+ * acquired as any type of pipe.
+ */
+ FREE_PIPE,
+
+ /* OTG master pipe - the master pipe of its OPP head pipes with a
+ * functional OTG. It merges all its OPP head pipes pixel data in ODM
+ * block and output to backend DIG. OTG master pipe is responsible for
+ * generating entire crtc timing to backend DIG. An OTG master pipe may
+ * or may not have a plane. If it has a plane it blends it as the left
+ * most MPC slice of the top most layer. If it doesn't have a plane it
+ * can output pixel data from its OPP head pipes' test pattern
+ * generators (DPG) such as solid black pixel data to blank the screen.
+ */
+ OTG_MASTER,
+
+ /* OPP head pipe - the head pipe of an MPC blending tree with a
+ * functional OPP outputting to an OTG. OPP head pipe is responsible for
+ * processing output pixels in its own ODM slice. It may or may not have
+ * a plane. If it has a plane it blends it as the top most layer within
+ * its own ODM slice. If it doesn't have a plane it can output pixel
+ * data from its DPG such as solid black pixel data to blank the pixel
+ * data in its own ODM slice. OTG master pipe is also an OPP head pipe
+ * but with more responsibility.
+ */
+ OPP_HEAD,
+
+ /* DPP pipe - the pipe with a functional DPP outputting to an OPP head
+ * pipe's MPC. DPP pipe is responsible for processing pixel data from
+ * its own MPC slice of a plane. It must be connected to an OPP head
+ * pipe and it must have a plane associated with it.
+ */
+ DPP_PIPE,
+};
+
+/*
+ * Determine if the input pipe ctx is of a pipe type.
+ * return - true if pipe ctx is of the input type.
+ */
+bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type);
+
+/*
+ * Determine if the input pipe ctx is used for rendering a plane with MPCC
+ * combine. MPCC combine is a hardware feature to combine multiple DPP pipes
+ * into a single plane. It is typically used for bypassing pipe bandwidth
+ * limitation for rendering a very large plane or saving power by reducing UCLK
+ * and DPPCLK speeds.
+ *
+ * For instance in the Inter-pipe Relation diagram shown below, both PIPE 0 and
+ * 1 are for MPCC combine for plane 0
+ *
+ * Inter-pipe Relation
+ * __________________________________________________
+ * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
+ * | | plane 0 | | |
+ * | 0 | -------------MPC----------------------- |
+ * | | plane 0 | | | |
+ * | 1 | ------------- | | |
+ * |________|_______________|___________|_____________|
+ *
+ * return - true if pipe ctx is used for mpcc combine.
+ */
+bool resource_is_for_mpcc_combine(const struct pipe_ctx *pipe_ctx);
+
+/*
+ * Look for a free pipe in new resource context that is used as a secondary DPP
+ * pipe in MPC blending tree associated with input OPP head pipe.
+ *
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int resource_find_free_pipe_used_in_cur_mpc_blending_tree(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct pipe_ctx *cur_opp_head);
+
+/*
+ * Look for a free pipe in new resource context that is not used in current
+ * resource context.
+ *
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int recource_find_free_pipe_not_used_in_cur_res_ctx(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
+ * Look for a free pipe in new resource context that is used as a secondary DPP
+ * pipe in any MPCC combine in current resource context.
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
+ * Look for any free pipe in new resource context.
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int resource_find_any_free_pipe(struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
+ * Legacy find free secondary pipe logic deprecated for newer DCNs as it doesn't
+ * find the most optimal free pipe to prevent from time consuming hardware state
+ * transitions.
+ */
+struct pipe_ctx *resource_find_free_secondary_pipe_legacy(
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe);
+/*
+ * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice
+ * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it
+ * will have 4 pieces of slice.
+ * return - 0 if pipe is not used for a plane with MPCC combine. otherwise
+ * the number of MPC "cuts" for the plane.
+ */
+int resource_get_num_mpc_splits(const struct pipe_ctx *pipe);
+
+/*
+ * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice
+ * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it
+ * will have 4 pieces of slice.
+ * return - 0 if pipe is not used for ODM combine. otherwise
+ * the number of ODM "cuts" for the timing.
+ */
+int resource_get_num_odm_splits(const struct pipe_ctx *pipe);
+
+/*
+ * Get the OTG master pipe in resource context associated with the stream.
+ * return - NULL if not found. Otherwise the OTG master pipe associated with the
+ * stream.
+ */
+struct pipe_ctx *resource_get_otg_master_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream);
+
+/*
+ * Get the OTG master pipe for the input pipe context.
+ * return - the OTG master pipe for the input pipe
+ * context.
+ */
+struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx);
+
+/*
+ * Get the OPP head pipe for the input pipe context.
+ * return - the OPP head pipe for the input pipe
+ * context.
+ */
+struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx);
+
+
bool resource_validate_attach_surfaces(
const struct dc_validation_set set[],
int set_count,
@@ -193,10 +410,6 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
-int get_num_mpc_splits(struct pipe_ctx *pipe);
-
-int get_num_odm_splits(struct pipe_ctx *pipe);
-
bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
index c923b2af8510..37bc98faa7a0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
@@ -38,10 +38,9 @@
#define DCN_BASE__INST0_SEG2 0x000034C0
-static enum dc_irq_source to_dal_irq_source_dcn314(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile
index a52b56e2859e..6af8a97d4a77 100644
--- a/drivers/gpu/drm/amd/display/dc/link/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/link/Makefile
@@ -42,7 +42,8 @@ AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES)
###############################################################################
# hwss
###############################################################################
-LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o
+LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o \
+link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o
AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \
$(LINK_HWSS))
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index db9f1baa27e5..fe4282771cd0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -428,15 +428,24 @@ static void set_crtc_test_pattern(struct dc_link *link,
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
- int width = pipe_ctx->stream->timing.h_addressable +
+ struct pipe_ctx *odm_pipe;
+ int odm_cnt = 1;
+ int h_active = pipe_ctx->stream->timing.h_addressable +
pipe_ctx->stream->timing.h_border_left +
pipe_ctx->stream->timing.h_border_right;
- int height = pipe_ctx->stream->timing.v_addressable +
+ int v_active = pipe_ctx->stream->timing.v_addressable +
pipe_ctx->stream->timing.v_border_bottom +
pipe_ctx->stream->timing.v_border_top;
+ int odm_slice_width, last_odm_slice_width, offset = 0;
memset(&params, 0, sizeof(params));
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_cnt++;
+
+ odm_slice_width = h_active / odm_cnt;
+ last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
+
switch (test_pattern) {
case DP_TEST_PATTERN_COLOR_SQUARES:
controller_test_pattern =
@@ -473,16 +482,13 @@ static void set_crtc_test_pattern(struct dc_link *link,
{
/* disable bit depth reduction */
pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
+ } else if (link->dc->hwss.set_disp_pattern_generator) {
enum controller_dp_color_space controller_color_space;
- int opp_cnt = 1;
- int offset = 0;
- int dpg_width = width;
+ struct output_pixel_processor *odm_opp;
switch (test_pattern_color_space) {
case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -502,24 +508,9 @@ static void set_crtc_test_pattern(struct dc_link *link,
break;
}
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
- dpg_width = width / opp_cnt;
- offset = dpg_width;
-
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- dpg_width,
- height,
- 0);
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
+ odm_pipe = pipe_ctx;
+ while (odm_pipe->next_odm_pipe) {
+ odm_opp = odm_pipe->stream_res.opp;
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
link->dc->hwss.set_disp_pattern_generator(link->dc,
odm_pipe,
@@ -527,11 +518,23 @@ static void set_crtc_test_pattern(struct dc_link *link,
controller_color_space,
color_depth,
NULL,
- dpg_width,
- height,
+ odm_slice_width,
+ v_active,
offset);
- offset += offset;
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
}
+ odm_opp = odm_pipe->stream_res.opp;
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ odm_pipe,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ last_odm_slice_width,
+ v_active,
+ offset);
}
}
break;
@@ -540,23 +543,17 @@ static void set_crtc_test_pattern(struct dc_link *link,
/* restore bitdepth reduction */
resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
- int dpg_width;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- dpg_width = width / opp_cnt;
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth);
+ } else if (link->dc->hwss.set_disp_pattern_generator) {
+ struct output_pixel_processor *odm_opp;
+ odm_pipe = pipe_ctx;
+ while (odm_pipe->next_odm_pipe) {
+ odm_opp = odm_pipe->stream_res.opp;
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
link->dc->hwss.set_disp_pattern_generator(link->dc,
odm_pipe,
@@ -564,19 +561,23 @@ static void set_crtc_test_pattern(struct dc_link *link,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- dpg_width,
- height,
- 0);
+ odm_slice_width,
+ v_active,
+ offset);
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
}
+ odm_opp = odm_pipe->stream_res.opp;
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
+ odm_pipe,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- dpg_width,
- height,
- 0);
+ last_odm_slice_width,
+ v_active,
+ offset);
}
}
break;
@@ -674,7 +675,8 @@ bool dp_set_test_pattern(
if (pipes[i].stream == NULL)
continue;
- if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
+ if (resource_is_pipe_type(&pipes[i], OTG_MASTER) &&
+ pipes[i].stream->link == link) {
pipe_ctx = &pipes[i];
break;
}
@@ -702,6 +704,7 @@ bool dp_set_test_pattern(
/* Reset Test Pattern state */
link->test_pattern_enabled = false;
+ link->current_test_pattern = test_pattern;
return true;
}
@@ -739,6 +742,7 @@ bool dp_set_test_pattern(
if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
/* Set Test Pattern state */
link->test_pattern_enabled = true;
+ link->current_test_pattern = test_pattern;
if (p_link_settings != NULL)
dpcd_set_link_settings(link,
p_link_settings);
@@ -937,6 +941,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
+ link->current_test_pattern = test_pattern;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index bebf9c4c8702..1328a0ade342 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -46,6 +46,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
+ if (stream_enc->funcs->map_stream_to_link)
+ stream_enc->funcs->map_stream_to_link(stream_enc,
+ stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A);
if (stream_enc->funcs->enable_fifo)
stream_enc->funcs->enable_fifo(stream_enc);
}
@@ -163,7 +166,7 @@ void set_dio_dp_lane_settings(struct dc_link *link,
link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings);
}
-static void update_dio_stream_allocation_table(struct dc_link *link,
+void update_dio_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index 8b8a099feeb0..f4633d3cf9b9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -55,5 +55,8 @@ void setup_dio_audio_output(struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output, uint32_t audio_inst);
void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx);
void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx);
+void update_dio_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table);
#endif /* __LINK_HWSS_DIO_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
new file mode 100644
index 000000000000..b659baa23147
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_hwss_dio.h"
+#include "link_hwss_dio_fixed_vs_pe_retimer.h"
+#include "link_enc_cfg.h"
+
+uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link)
+{
+ // TODO: Get USB-C cable orientation
+ if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR)
+ return 0xF2;
+ else
+ return 0x12;
+}
+
+void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link)
+{
+ const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link);
+ const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+ const uint8_t vendor_lttpr_exit_manual_automation_1[4] = {0x1, 0x50, dp_type, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_2[4] = {0x1, 0x50, 0x50, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_3[4] = {0x1, 0x51, 0x50, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_4[4] = {0x1, 0x10, 0x58, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_5[4] = {0x1, 0x10, 0x59, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_6[4] = {0x1, 0x30, 0x51, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_7[4] = {0x1, 0x30, 0x52, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_8[4] = {0x1, 0x30, 0x54, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_9[4] = {0x1, 0x30, 0x55, 0x0};
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_1[0], sizeof(vendor_lttpr_exit_manual_automation_1));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_2[0], sizeof(vendor_lttpr_exit_manual_automation_2));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_3[0], sizeof(vendor_lttpr_exit_manual_automation_3));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_4[0], sizeof(vendor_lttpr_exit_manual_automation_4));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_5[0], sizeof(vendor_lttpr_exit_manual_automation_5));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_6[0], sizeof(vendor_lttpr_exit_manual_automation_6));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_7[0], sizeof(vendor_lttpr_exit_manual_automation_7));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_8[0], sizeof(vendor_lttpr_exit_manual_automation_8));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_9[0], sizeof(vendor_lttpr_exit_manual_automation_9));
+}
+
+static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_link *link,
+ const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params,
+ const struct link_hwss *link_hwss)
+{
+ struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 };
+ const uint8_t pltpat_custom[10] = {0x1F, 0x7C, 0xF0, 0xC1, 0x07, 0x1F, 0x7C, 0xF0, 0xC1, 0x07};
+ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+
+
+ if (tp_params == NULL)
+ return false;
+
+ if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
+ link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) {
+ // Deprogram overrides from previous test pattern
+ dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
+ }
+
+ switch (tp_params->dp_phy_pattern) {
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern,
+ pltpat_custom, tp_params->custom_pattern_size) != 0)
+ return false;
+ break;
+ case DP_TEST_PATTERN_D102:
+ break;
+ default:
+ if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
+ link->current_test_pattern == DP_TEST_PATTERN_D102)
+ // Deprogram overrides from previous test pattern
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_0[0],
+ sizeof(vendor_lttpr_exit_manual_automation_0));
+
+ return false;
+ }
+
+ hw_tp_params.dp_phy_pattern = tp_params->dp_phy_pattern;
+ hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode;
+
+ if (link_hwss->ext.set_dp_link_test_pattern)
+ link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params);
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0));
+
+ return true;
+}
+
+static void set_dio_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (!set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(
+ link, link_res, tp_params, get_dio_link_hwss())) {
+ link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
+ }
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+}
+
+void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link)
+{
+ const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+ const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+ const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+ const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+ const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
+}
+
+static void enable_dio_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ if (link_settings->lane_count == LANE_COUNT_FOUR)
+ enable_dio_fixed_vs_pe_retimer_program_4lane_output(link);
+
+ enable_dio_dp_link_output(link, link_res, signal, clock_source, link_settings);
+}
+
+static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
+ .setup_stream_encoder = setup_dio_stream_encoder,
+ .reset_stream_encoder = reset_dio_stream_encoder,
+ .setup_stream_attribute = setup_dio_stream_attribute,
+ .disable_link_output = disable_dio_link_output,
+ .setup_audio_output = setup_dio_audio_output,
+ .enable_audio_packet = enable_dio_audio_packet,
+ .disable_audio_packet = disable_dio_audio_packet,
+ .ext = {
+ .set_throttled_vcp_size = set_dio_throttled_vcp_size,
+ .enable_dp_link_output = enable_dio_fixed_vs_pe_retimer_dp_link_output,
+ .set_dp_link_test_pattern = set_dio_fixed_vs_pe_retimer_dp_link_test_pattern,
+ .set_dp_lane_settings = set_dio_dp_lane_settings,
+ .update_stream_allocation_table = update_dio_stream_allocation_table,
+ },
+};
+
+bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
+{
+ if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
+ return false;
+
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
+ return true;
+}
+
+const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)
+{
+ return &dio_fixed_vs_pe_retimer_link_hwss;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
new file mode 100644
index 000000000000..9ac08a332540
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
+#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
+
+#include "link.h"
+
+uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link);
+uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link);
+void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link);
+void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link);
+bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link);
+const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void);
+
+#endif /* __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index 586fe25c1702..e1257404357b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -28,7 +28,7 @@
#include "dccg.h"
#include "clk_mgr.h"
-static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
+void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size)
{
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder =
@@ -41,7 +41,7 @@ static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
throttled_vcp_size);
}
-static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
+void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
const struct dc_link_settings *link_settings,
struct fixed31_32 throttled_vcp_size)
{
@@ -69,7 +69,7 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
hblank_min_symbol_width);
}
-static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
+void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc;
@@ -78,14 +78,14 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);
}
-static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
+void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
stream_enc->funcs->disable(stream_enc);
}
-static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
+void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
{
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -102,12 +102,17 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
-static void enable_hpo_dp_link_output(struct dc_link *link,
+void enable_hpo_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
+ if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
+ link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
+ link->dc->res_pool->dccg,
+ link_res->hpo_dp_link_enc->inst,
+ true);
link_res->hpo_dp_link_enc->funcs->enable_link_phy(
link_res->hpo_dp_link_enc,
link_settings,
@@ -115,13 +120,18 @@ static void enable_hpo_dp_link_output(struct dc_link *link,
link->link_enc->hpd_source);
}
-static void disable_hpo_dp_link_output(struct dc_link *link,
+void disable_hpo_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
link_res->hpo_dp_link_enc->funcs->disable_link_phy(
link_res->hpo_dp_link_enc, signal);
+ if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
+ link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
+ link->dc->res_pool->dccg,
+ link_res->hpo_dp_link_enc->inst,
+ false);
}
static void set_hpo_dp_link_test_pattern(struct dc_link *link,
@@ -144,7 +154,7 @@ static void set_hpo_dp_lane_settings(struct dc_link *link,
lane_settings[0].FFE_PRESET.raw);
}
-static void update_hpo_dp_stream_allocation_table(struct dc_link *link,
+void update_hpo_dp_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
@@ -153,7 +163,7 @@ static void update_hpo_dp_stream_allocation_table(struct dc_link *link,
table);
}
-static void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx,
+void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output, uint32_t audio_inst)
{
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup(
@@ -162,13 +172,13 @@ static void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx,
&pipe_ctx->stream->audio_info);
}
-static void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx)
+void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx)
{
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(
pipe_ctx->stream_res.hpo_dp_stream_enc);
}
-static void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx)
+void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->stream_res.audio)
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable(
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
index 3cbb94b41a23..1d3ed8ca83b5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
@@ -28,9 +28,35 @@
#include "link_hwss.h"
#include "link.h"
+void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
+ struct fixed31_32 throttled_vcp_size);
+void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
+ const struct dc_link_settings *link_settings,
+ struct fixed31_32 throttled_vcp_size);
+void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
+ const struct dc_link_settings *link_settings,
+ struct fixed31_32 throttled_vcp_size);
+void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx);
+void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx);
+void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx);
+void enable_hpo_dp_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+void disable_hpo_dp_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+void update_hpo_dp_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table);
+void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output, uint32_t audio_inst);
+void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx);
+void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx);
+const struct link_hwss *get_hpo_dp_link_hwss(void);
bool can_use_hpo_dp_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
-const struct link_hwss *get_hpo_dp_link_hwss(void);
#endif /* __LINK_HWSS_HPO_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
new file mode 100644
index 000000000000..b621b97711b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_hwss_hpo_dp.h"
+#include "link_hwss_hpo_fixed_vs_pe_retimer_dp.h"
+#include "link_hwss_dio_fixed_vs_pe_retimer.h"
+
+static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
+ const struct dc_lane_settings *hw_lane_settings)
+{
+ const uint8_t vendor_ffe_preset_table[16] = {
+ 0x01, 0x41, 0x61, 0x81,
+ 0xB1, 0x05, 0x35, 0x65,
+ 0x85, 0xA5, 0x09, 0x39,
+ 0x59, 0x89, 0x0F, 0x24};
+
+ const uint8_t ffe_mask[4] = {
+ (hw_lane_settings[0].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF)
+ & (hw_lane_settings[0].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF),
+ (hw_lane_settings[1].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF)
+ & (hw_lane_settings[1].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF),
+ (hw_lane_settings[2].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF)
+ & (hw_lane_settings[2].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF),
+ (hw_lane_settings[3].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF)
+ & (hw_lane_settings[3].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF)};
+
+ const uint8_t ffe_cfg[4] = {
+ vendor_ffe_preset_table[hw_lane_settings[0].FFE_PRESET.settings.level] & ffe_mask[0],
+ vendor_ffe_preset_table[hw_lane_settings[1].FFE_PRESET.settings.level] & ffe_mask[1],
+ vendor_ffe_preset_table[hw_lane_settings[2].FFE_PRESET.settings.level] & ffe_mask[2],
+ vendor_ffe_preset_table[hw_lane_settings[3].FFE_PRESET.settings.level] & ffe_mask[3]};
+
+ const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link);
+
+ const uint8_t vendor_lttpr_write_data_ffe1[4] = {0x01, 0x50, dp_type, 0x0F};
+ const uint8_t vendor_lttpr_write_data_ffe2[4] = {0x01, 0x55, dp_type, ffe_cfg[0]};
+ const uint8_t vendor_lttpr_write_data_ffe3[4] = {0x01, 0x56, dp_type, ffe_cfg[1]};
+ const uint8_t vendor_lttpr_write_data_ffe4[4] = {0x01, 0x57, dp_type, ffe_cfg[2]};
+ const uint8_t vendor_lttpr_write_data_ffe5[4] = {0x01, 0x58, dp_type, ffe_cfg[3]};
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe1[0], sizeof(vendor_lttpr_write_data_ffe1));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe2[0], sizeof(vendor_lttpr_write_data_ffe2));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe3[0], sizeof(vendor_lttpr_write_data_ffe3));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe4[0], sizeof(vendor_lttpr_write_data_ffe4));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe5[0], sizeof(vendor_lttpr_write_data_ffe5));
+}
+
+static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
+ const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0};
+ const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0};
+ const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21};
+ const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21};
+ const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F};
+ const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F};
+ const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20};
+ const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20};
+ const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20};
+ const uint8_t vendor_lttpr_write_data_pg10[4] = {0x1, 0x30, 0x55, 0x20};
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg1[0], sizeof(vendor_lttpr_write_data_pg1));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg2[0], sizeof(vendor_lttpr_write_data_pg2));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg3[0], sizeof(vendor_lttpr_write_data_pg3));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg4[0], sizeof(vendor_lttpr_write_data_pg4));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg5[0], sizeof(vendor_lttpr_write_data_pg5));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg6[0], sizeof(vendor_lttpr_write_data_pg6));
+
+ if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR)
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg7[0], sizeof(vendor_lttpr_write_data_pg7));
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg8[0], sizeof(vendor_lttpr_write_data_pg8));
+
+ if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR)
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg9[0], sizeof(vendor_lttpr_write_data_pg9));
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg10[0], sizeof(vendor_lttpr_write_data_pg10));
+}
+
+static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link *link,
+ const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params,
+ const struct link_hwss *link_hwss)
+{
+ struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 };
+ const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+
+ if (tp_params == NULL)
+ return false;
+
+ if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN ||
+ tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) {
+ // Deprogram overrides from previously set square wave override
+ if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
+ link->current_test_pattern == DP_TEST_PATTERN_D102)
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_0[0],
+ sizeof(vendor_lttpr_exit_manual_automation_0));
+ else
+ dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
+
+ return false;
+ }
+
+ hw_tp_params.dp_phy_pattern = DP_TEST_PATTERN_PRBS31;
+ hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode;
+
+ if (link_hwss->ext.set_dp_link_test_pattern)
+ link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params);
+
+ dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params);
+
+ dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]);
+
+ return true;
+}
+
+static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ if (!dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(
+ link, link_res, tp_params, get_hpo_dp_link_hwss())) {
+ link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
+ link_res->hpo_dp_link_enc, tp_params);
+ }
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+}
+
+static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings,
+ const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+ link_res->hpo_dp_link_enc->funcs->set_ffe(
+ link_res->hpo_dp_link_enc,
+ link_settings,
+ lane_settings[0].FFE_PRESET.raw);
+
+ // FFE is programmed when retimer is programmed for SQ128, but explicit
+ // programming needed here as well in case FFE-only update is requested
+ if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
+ link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END)
+ dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
+}
+
+static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ if (link_settings->lane_count == LANE_COUNT_FOUR)
+ enable_dio_fixed_vs_pe_retimer_program_4lane_output(link);
+
+ enable_hpo_dp_link_output(link, link_res, signal, clock_source, link_settings);
+}
+
+static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = {
+ .setup_stream_encoder = setup_hpo_dp_stream_encoder,
+ .reset_stream_encoder = reset_hpo_dp_stream_encoder,
+ .setup_stream_attribute = setup_hpo_dp_stream_attribute,
+ .disable_link_output = disable_hpo_dp_link_output,
+ .setup_audio_output = setup_hpo_dp_audio_output,
+ .enable_audio_packet = enable_hpo_dp_audio_packet,
+ .disable_audio_packet = disable_hpo_dp_audio_packet,
+ .ext = {
+ .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size,
+ .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width,
+ .enable_dp_link_output = enable_hpo_fixed_vs_pe_retimer_dp_link_output,
+ .set_dp_link_test_pattern = set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern,
+ .set_dp_lane_settings = set_hpo_fixed_vs_pe_retimer_dp_lane_settings,
+ .update_stream_allocation_table = update_hpo_dp_stream_allocation_table,
+ },
+};
+
+bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link)
+{
+ if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
+ return false;
+
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
+ return true;
+}
+
+const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void)
+{
+ return &hpo_fixed_vs_pe_retimer_dp_link_hwss;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
new file mode 100644
index 000000000000..82301187bc7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
+#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
+
+#include "link.h"
+
+bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link);
+const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void);
+
+#endif /* __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 8041b8369e45..c9b6676eaf53 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -876,8 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
(link->dpcd_sink_ext_caps.bits.oled == 1)) {
dpcd_set_source_specific_data(link);
msleep(post_oui_delay);
- set_default_brightness_aux(link);
- //TODO: use cached
+ set_cached_brightness_aux(link);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 1a7b93e41e35..79aef205598b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -182,11 +182,8 @@ void link_resume(struct dc_link *link)
static bool is_master_pipe_for_link(const struct dc_link *link,
const struct pipe_ctx *pipe)
{
- return (pipe->stream &&
- pipe->stream->link &&
- pipe->stream->link == link &&
- pipe->top_pipe == NULL &&
- pipe->prev_odm_pipe == NULL);
+ return resource_is_pipe_type(pipe, OTG_MASTER) &&
+ pipe->stream->link == link;
}
/*
@@ -1079,8 +1076,14 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
{
uint64_t kbps;
+ enum dc_link_encoding_format link_encoding;
- kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ link_encoding = DC_LINK_ENCODING_DP_128b_132b;
+ else
+ link_encoding = DC_LINK_ENCODING_DP_8b_10b;
+
+ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding);
return get_pbn_from_bw_in_kbps(kbps);
}
@@ -1538,7 +1541,8 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
struct fixed31_32 timing_bw =
dc_fixpt_from_int(
- dc_bandwidth_in_kbps_from_timing(&stream->timing));
+ dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(link)));
struct fixed31_32 avg_time_slots_per_mtp =
dc_fixpt_div(timing_bw, timeslot_bw_effective);
@@ -1971,6 +1975,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
if (stream->phy_pix_clk == 0)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
@@ -2010,6 +2015,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
display_color_depth = COLOR_DEPTH_888;
+ /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS
+ * character clock in case that beyond 340MHz.
+ */
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ link_hwss->setup_stream_encoder(pipe_ctx);
+
dc->hwss.enable_tmds_link_output(
link,
&pipe_ctx->link_res,
@@ -2129,7 +2140,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
- set_default_brightness_aux(link); // TODO: use cached if known
+ set_cached_brightness_aux(link);
+
if (link->dpcd_sink_ext_caps.bits.oled == 1)
msleep(bl_oled_enable_delay);
edp_backlight_enable_aux(link, true);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index ac1c3e2e7c1d..195ca9e52eda 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -132,6 +132,7 @@ static void construct_link_service_ddc(struct link_service *link_srv)
link_srv->destroy_ddc_service = link_destroy_ddc_service;
link_srv->query_ddc_data = link_query_ddc_data;
link_srv->aux_transfer_raw = link_aux_transfer_raw;
+ link_srv->configure_fixed_vs_pe_retimer = link_configure_fixed_vs_pe_retimer;
link_srv->aux_transfer_with_retries_no_mutex =
link_aux_transfer_with_retries_no_mutex;
link_srv->is_in_aux_transaction_mode = link_is_in_aux_transaction_mode;
@@ -207,6 +208,13 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s
link_srv->edp_set_sink_vtotal_in_psr_active =
edp_set_sink_vtotal_in_psr_active;
link_srv->edp_get_psr_residency = edp_get_psr_residency;
+
+ link_srv->edp_get_replay_state = edp_get_replay_state;
+ link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active;
+ link_srv->edp_setup_replay = edp_setup_replay;
+ link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal;
+ link_srv->edp_replay_residency = edp_replay_residency;
+
link_srv->edp_wait_for_t12 = edp_wait_for_t12;
link_srv->edp_is_ilr_optimization_required =
edp_is_ilr_optimization_required;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index e8b2fc4002a5..b45fda96eaf6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -130,7 +130,8 @@ static bool dp_active_dongle_validate_timing(
/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
outputTiming.flags.DSC = 0;
#endif
- if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+ if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) >
+ dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
return false;
} else { // DP to HDMI TMDS converter
if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
@@ -285,7 +286,7 @@ static bool dp_validate_mode_timing(
link_setting = &link->verified_link_cap;
*/
- req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link));
max_bw = dp_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
@@ -357,7 +358,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
for (uint8_t i = 0; i < num_streams; ++i) {
link[i] = stream[i].link;
- bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing);
+ bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+ dc_link_get_highest_encoding_format(link[i]));
}
ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index 0fa1228bc178..ecfd83299e75 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -412,6 +412,88 @@ int link_aux_transfer_raw(struct ddc_service *ddc,
}
}
+uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link)
+{
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ uint8_t offset;
+
+ switch (link->dpcd_caps.lttpr_caps.phy_repeater_cnt) {
+ case 0x80: // 1 lttpr repeater
+ offset = 1;
+ break;
+ case 0x40: // 2 lttpr repeaters
+ offset = 2;
+ break;
+ case 0x20: // 3 lttpr repeaters
+ offset = 3;
+ break;
+ case 0x10: // 4 lttpr repeaters
+ offset = 4;
+ break;
+ case 0x08: // 5 lttpr repeaters
+ offset = 5;
+ break;
+ case 0x04: // 6 lttpr repeaters
+ offset = 6;
+ break;
+ case 0x02: // 7 lttpr repeaters
+ offset = 7;
+ break;
+ case 0x01: // 8 lttpr repeaters
+ offset = 8;
+ break;
+ default:
+ offset = 0xFF;
+ }
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+ return vendor_lttpr_write_address;
+}
+
+uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link)
+{
+ return link_get_fixed_vs_pe_retimer_write_address(link) + 4;
+}
+
+bool link_configure_fixed_vs_pe_retimer(struct ddc_service *ddc, const uint8_t *data, uint32_t length)
+{
+ struct aux_payload write_payload = {
+ .i2c_over_aux = false,
+ .write = true,
+ .address = link_get_fixed_vs_pe_retimer_write_address(ddc->link),
+ .length = length,
+ .data = (uint8_t *) data,
+ .reply = NULL,
+ .mot = I2C_MOT_UNDEF,
+ .write_status_update = false,
+ .defer_delay = 0,
+ };
+
+ return link_aux_transfer_with_retries_no_mutex(ddc,
+ &write_payload);
+}
+
+bool link_query_fixed_vs_pe_retimer(struct ddc_service *ddc, uint8_t *data, uint32_t length)
+{
+ struct aux_payload read_payload = {
+ .i2c_over_aux = false,
+ .write = false,
+ .address = link_get_fixed_vs_pe_retimer_read_address(ddc->link),
+ .length = length,
+ .data = data,
+ .reply = NULL,
+ .mot = I2C_MOT_UNDEF,
+ .write_status_update = false,
+ .defer_delay = 0,
+ };
+
+ return link_aux_transfer_with_retries_no_mutex(ddc,
+ &read_payload);
+}
+
bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
struct aux_payload *payload)
{
@@ -427,7 +509,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc,
if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
!ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
- ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) {
+ ddc->ctx->dce_version == DCN_VERSION_3_1) {
/* Fixed VS workaround for AUX timeout */
const uint32_t fixed_vs_address = 0xF004F;
const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index 860ef15d7f1b..a3e25e55bed6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -72,6 +72,20 @@ bool link_query_ddc_data(
bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
struct aux_payload *payload);
+bool link_configure_fixed_vs_pe_retimer(
+ struct ddc_service *ddc,
+ const uint8_t *data,
+ uint32_t length);
+
+bool link_query_fixed_vs_pe_retimer(
+ struct ddc_service *ddc,
+ uint8_t *data,
+ uint32_t length);
+
+uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link);
+uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link);
+
+
void write_scdc_data(
struct ddc_service *ddc_service,
uint32_t pix_clk,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 3a5e80b57711..237e0ff955f3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -906,7 +906,7 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
struct dc_link_settings *link_setting)
{
struct dc_link *link = stream->link;
- uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link));
memset(link_setting, 0, sizeof(*link_setting));
@@ -939,7 +939,8 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
tmp_timing.flags.DSC = 0;
- orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
+ orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing,
+ dc_link_get_highest_encoding_format(link));
edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw);
max_link_rate = tmp_link_setting.link_rate;
}
@@ -2008,6 +2009,16 @@ void detect_edp_sink_caps(struct dc_link *link)
core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP,
&link->dpcd_caps.alpm_caps.raw,
sizeof(link->dpcd_caps.alpm_caps.raw));
+
+ /*
+ * Read REPLAY info
+ */
+ core_link_read_dpcd(link, DP_SINK_PR_PIXEL_DEVIATION_PER_LINE,
+ &link->dpcd_caps.pr_info.pixel_deviation_per_line,
+ sizeof(link->dpcd_caps.pr_info.pixel_deviation_per_line));
+ core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE,
+ &link->dpcd_caps.pr_info.max_deviation_line,
+ sizeof(link->dpcd_caps.pr_info.max_deviation_line));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
@@ -2165,7 +2176,9 @@ static bool dp_verify_link_cap(
link,
&irq_data))
(*fail_count)++;
-
+ } else if (status == LINK_TRAINING_LINK_LOSS) {
+ success = true;
+ (*fail_count)++;
} else {
(*fail_count)++;
}
@@ -2188,6 +2201,7 @@ bool dp_verify_link_cap_with_retries(
int i = 0;
bool success = false;
int fail_count = 0;
+ struct dc_link_settings last_verified_link_cap = fail_safe_link_settings;
dp_trace_detect_lt_init(link);
@@ -2204,10 +2218,14 @@ bool dp_verify_link_cap_with_retries(
if (!link_detect_connection_type(link, &type) || type == dc_connection_none) {
link->verified_link_cap = fail_safe_link_settings;
break;
- } else if (dp_verify_link_cap(link, known_limit_link_setting,
- &fail_count) && fail_count == 0) {
- success = true;
- break;
+ } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) {
+ last_verified_link_cap = link->verified_link_cap;
+ if (fail_count == 0) {
+ success = true;
+ break;
+ }
+ } else {
+ link->verified_link_cap = last_verified_link_cap;
}
fsleep(10 * 1000);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index ef8739df91bc..e047bbeaa49a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -182,6 +182,68 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
return false;
}
+static bool handle_hpd_irq_replay_sink(struct dc_link *link)
+{
+ union dpcd_replay_configuration replay_configuration;
+ /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
+ union psr_error_status replay_error_status;
+
+ if (!link->replay_settings.replay_feature_enabled)
+ return false;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_SINK_PR_REPLAY_STATUS,
+ &replay_configuration.raw,
+ sizeof(replay_configuration.raw));
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_ERROR_STATUS,
+ &replay_error_status.raw,
+ sizeof(replay_error_status.raw));
+
+ link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR =
+ replay_error_status.bits.LINK_CRC_ERROR;
+ link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR =
+ replay_configuration.bits.DESYNC_ERROR_STATUS;
+ link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR =
+ replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS;
+
+ if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR ||
+ link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR ||
+ link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) {
+ bool allow_active;
+
+ /* Acknowledge and clear configuration bits */
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_SINK_PR_REPLAY_STATUS,
+ &replay_configuration.raw,
+ sizeof(replay_configuration.raw));
+
+ /* Acknowledge and clear error bits */
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_ERROR_STATUS,/*DpcdAddress_REPLAY_Error_Status*/
+ &replay_error_status.raw,
+ sizeof(replay_error_status.raw));
+
+ /* Replay error, disable and re-enable Replay */
+ if (link->replay_settings.replay_allow_active) {
+ allow_active = false;
+ edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
+ allow_active = true;
+ edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
+ }
+ }
+ return true;
+}
+
void dp_handle_link_loss(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
@@ -360,6 +422,10 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
/* PSR-related error was detected and handled */
return true;
+ if (handle_hpd_irq_replay_sink(link))
+ /* Replay-related error was detected and handled */
+ return true;
+
/* If PSR-related error handled, Main link may be off,
* so do not handle as a normal sink status change interrupt.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index e011df4bdaf2..90339c2dfd84 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1699,13 +1699,20 @@ bool perform_link_training_with_retries(
} else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
uint32_t req_bw;
uint32_t link_bw;
+ enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED;
decide_fallback_link_setting(link, &max_link_settings,
&cur_link_settings, status);
+
+ if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
+ link_encoding = DC_LINK_ENCODING_DP_8b_10b;
+ else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING)
+ link_encoding = DC_LINK_ENCODING_DP_128b_132b;
+
/* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
* minimum link bandwidth.
*/
- req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding);
link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings);
is_link_bw_low = (req_bw > link_bw);
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 15faaf645b14..fd8f6f198146 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -36,6 +36,7 @@
#include "link_dpcd.h"
#include "link_dp_phy.h"
#include "link_dp_capability.h"
+#include "link_ddc.h"
#define DC_LOGGER \
link->ctx->logger
@@ -46,42 +47,20 @@ void dp_fixed_vs_pe_read_lane_adjust(
{
const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
- const uint8_t offset = dp_parse_lttpr_repeater_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- uint32_t vendor_lttpr_write_address = 0xF004F;
- uint32_t vendor_lttpr_read_address = 0xF0053;
uint8_t dprx_vs = 0;
uint8_t dprx_pe = 0;
uint8_t lane;
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- vendor_lttpr_read_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- }
-
/* W/A to read lane settings requested by DPRX */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_read_dpcd(
- link,
- vendor_lttpr_read_address,
- &dprx_vs,
- 1);
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
- core_link_read_dpcd(
- link,
- vendor_lttpr_read_address,
- &dprx_pe,
- 1);
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+
+ link_query_fixed_vs_pe_retimer(link->ddc, &dprx_vs, 1);
+
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
+
+ link_query_fixed_vs_pe_retimer(link->ddc, &dprx_pe, 1);
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3;
@@ -95,19 +74,11 @@ void dp_fixed_vs_pe_set_retimer_lane_settings(
const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
uint8_t lane_count)
{
- const uint8_t offset = dp_parse_lttpr_repeater_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
- uint32_t vendor_lttpr_write_address = 0xF004F;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
uint8_t lane = 0;
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- }
-
for (lane = 0; lane < lane_count; lane++) {
vendor_lttpr_write_data_vs[3] |=
dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
@@ -116,21 +87,14 @@ void dp_fixed_vs_pe_set_retimer_lane_settings(
}
/* Force LTTPR to output desired VS and PE */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
+
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
}
static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence(
@@ -236,7 +200,11 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
uint32_t pre_disable_intercept_delay_ms = 0;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- uint32_t vendor_lttpr_write_address = 0xF004F;
+ const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+ const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+ const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+ const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+ const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
union down_spread_ctrl downspread = {0};
@@ -244,10 +212,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
uint8_t toggle_rate;
uint8_t rate;
- if (link->local_sink)
- pre_disable_intercept_delay_ms =
- link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms;
-
/* Only 8b/10b is supported */
ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
@@ -258,37 +222,27 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
}
if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ if (offset == 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
/* Certain display and cable configuration require extra delay */
- if (offset > 2)
- pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2;
+ } else if (offset > 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ }
}
/* Vendor specific: Reset lane settings */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
/* Vendor specific: Enable intercept */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en));
+
/* 1. set link rate, lane count and spread. */
@@ -339,6 +293,19 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
+ }
+
/* 2. Perform link training */
/* Perform Clock Recovery Sequence */
@@ -351,7 +318,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- enum dc_status dpcd_status = DC_OK;
uint8_t i = 0;
retries_cr = 0;
@@ -386,18 +352,12 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
for (i = 0; i < max_vendor_dpcd_retries; i++) {
if (pre_disable_intercept_delay_ms != 0)
msleep(pre_disable_intercept_delay_ms);
- dpcd_status = core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ if (link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis));
-
- if (dpcd_status == DC_OK)
+ sizeof(vendor_lttpr_write_data_intercept_dis)))
break;
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_intercept_en[0],
sizeof(vendor_lttpr_write_data_intercept_en));
}
@@ -413,16 +373,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
}
/* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
dpcd_set_lane_settings(
link,
@@ -518,16 +472,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
}
/* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
/* 2. update DPCD*/
if (!retries_ch_eq)
@@ -596,10 +544,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E};
const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01};
const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68};
+ uint32_t pre_disable_intercept_delay_ms = 0;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- uint32_t pre_disable_intercept_delay_ms = 0;
- uint32_t vendor_lttpr_write_address = 0xF004F;
+ const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+ const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+ const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+ const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+ const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
union down_spread_ctrl downspread = {0};
@@ -607,10 +559,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
uint8_t toggle_rate;
uint8_t rate;
- if (link->local_sink)
- pre_disable_intercept_delay_ms =
- link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms;
-
/* Only 8b/10b is supported */
ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
@@ -621,37 +569,26 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
}
if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ if (offset == 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
/* Certain display and cable configuration require extra delay */
- if (offset > 2)
- pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2;
+ } else if (offset > 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ }
}
/* Vendor specific: Reset lane settings */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
/* Vendor specific: Enable intercept */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en));
/* 1. set link rate, lane count and spread. */
@@ -702,6 +639,19 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
+ }
+
/* 2. Perform link training */
/* Perform Clock Recovery Sequence */
@@ -714,7 +664,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- enum dc_status dpcd_status = DC_OK;
uint8_t i = 0;
retries_cr = 0;
@@ -749,18 +698,12 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
for (i = 0; i < max_vendor_dpcd_retries; i++) {
if (pre_disable_intercept_delay_ms != 0)
msleep(pre_disable_intercept_delay_ms);
- dpcd_status = core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ if (link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis));
-
- if (dpcd_status == DC_OK)
+ sizeof(vendor_lttpr_write_data_intercept_dis)))
break;
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_intercept_en[0],
sizeof(vendor_lttpr_write_data_intercept_en));
}
@@ -776,16 +719,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
}
/* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
dpcd_set_lane_settings(
link,
@@ -858,17 +795,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_adicora_eq1[0],
sizeof(vendor_lttpr_write_data_adicora_eq1));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_adicora_eq2[0],
sizeof(vendor_lttpr_write_data_adicora_eq2));
+
/* Note: also check that TPS4 is a supported feature*/
tr_pattern = lt_settings->pattern_for_eq;
@@ -892,16 +826,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
}
/* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
/* 2. update DPCD*/
if (!retries_ch_eq) {
@@ -914,11 +842,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
lt_settings,
tr_pattern, 0);
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_adicora_eq3[0],
- sizeof(vendor_lttpr_write_data_adicora_eq3));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_adicora_eq3[0],
+ sizeof(vendor_lttpr_write_data_adicora_eq3));
+
} else
dpcd_set_lane_settings(link, lt_settings, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 2039a345f23a..98e715aa6d8e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -34,9 +34,13 @@
#include "dm_helpers.h"
#include "dal_asic_id.h"
#include "dce/dmub_psr.h"
+#include "dc/dc_dmub_srv.h"
+#include "dce/dmub_replay.h"
#include "abm.h"
#define DC_LOGGER_INIT(logger)
+#define DP_SINK_PR_ENABLE_AND_CONFIGURATION 0x37B
+
/* Travis */
static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
/* Nutmeg */
@@ -46,43 +50,42 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
{
union dpcd_edp_config edp_config_set;
bool panel_mode_edp = false;
+ enum dc_status result;
memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
- if (panel_mode != DP_PANEL_MODE_DEFAULT) {
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ case DP_PANEL_MODE_SPECIAL:
+ panel_mode_edp = true;
+ break;
- switch (panel_mode) {
- case DP_PANEL_MODE_EDP:
- case DP_PANEL_MODE_SPECIAL:
- panel_mode_edp = true;
- break;
+ default:
+ break;
+ }
- default:
- break;
- }
+ /*set edp panel mode in receiver*/
+ result = core_link_read_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
- /*set edp panel mode in receiver*/
- core_link_read_dpcd(
+ if (result == DC_OK &&
+ edp_config_set.bits.PANEL_MODE_EDP
+ != panel_mode_edp) {
+
+ edp_config_set.bits.PANEL_MODE_EDP =
+ panel_mode_edp;
+ result = core_link_write_dpcd(
link,
DP_EDP_CONFIGURATION_SET,
&edp_config_set.raw,
sizeof(edp_config_set.raw));
- if (edp_config_set.bits.PANEL_MODE_EDP
- != panel_mode_edp) {
- enum dc_status result;
-
- edp_config_set.bits.PANEL_MODE_EDP =
- panel_mode_edp;
- result = core_link_write_dpcd(
- link,
- DP_EDP_CONFIGURATION_SET,
- &edp_config_set.raw,
- sizeof(edp_config_set.raw));
-
- ASSERT(result == DC_OK);
- }
+ ASSERT(result == DC_OK);
}
+
link->panel_mode = panel_mode;
DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
"eDP panel mode enabled: %d \n",
@@ -164,6 +167,7 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+ link->backlight_settings.backlight_millinits = backlight_millinits;
if (!link->dpcd_caps.panel_luminance_control) {
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
@@ -251,10 +255,20 @@ static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millin
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
- (uint8_t *) backlight_millinits,
- sizeof(uint32_t)))
- return false;
+ if (!link->dpcd_caps.panel_luminance_control) {
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)backlight_millinits,
+ sizeof(uint32_t)))
+ return false;
+ } else {
+ //setting to 0 as a precaution, since target_luminance_value is 3 bytes
+ memset(backlight_millinits, 0, sizeof(uint32_t));
+
+ if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ (uint8_t *)backlight_millinits,
+ sizeof(struct target_luminance_value)))
+ return false;
+ }
return true;
}
@@ -276,6 +290,16 @@ bool set_default_brightness_aux(struct dc_link *link)
return false;
}
+bool set_cached_brightness_aux(struct dc_link *link)
+{
+ if (link->backlight_settings.backlight_millinits)
+ return edp_set_backlight_level_nits(link, true,
+ link->backlight_settings.backlight_millinits, 0);
+ else
+ return set_default_brightness_aux(link);
+ return false;
+}
+
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing)
{
@@ -309,7 +333,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
core_link_read_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, sizeof(lane_count_set));
- req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
+ req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link));
if (!crtc_timing->flags.DSC)
edp_decide_link_settings(link, &link_setting, req_bw);
@@ -807,6 +831,167 @@ bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_
return true;
}
+bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (replay == NULL && force_static)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ /* Set power optimization flag */
+ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
+ if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) {
+ replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
+ link->replay_settings.replay_power_opt_active = *power_opts;
+ }
+ }
+
+ /* Activate or deactivate Replay */
+ if (allow_active && link->replay_settings.replay_allow_active != *allow_active) {
+ // TODO: Handle mux change case if force_static is set
+ // If force_static is set, just change the replay_allow_active state directly
+ if (replay != NULL && link->replay_settings.replay_feature_enabled)
+ replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst);
+ link->replay_settings.replay_allow_active = *allow_active;
+ }
+
+ return true;
+}
+
+bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+ enum replay_state pr_state = REPLAY_STATE_0;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if (replay != NULL && link->replay_settings.replay_feature_enabled)
+ replay->funcs->replay_get_state(replay, &pr_state, panel_inst);
+ *state = pr_state;
+
+ return true;
+}
+
+bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+{
+ /* To-do: Setup Replay */
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ int i;
+ unsigned int panel_inst;
+ struct replay_context replay_context = { 0 };
+ unsigned int lineTimeInNs = 0;
+
+
+ union replay_enable_and_configuration replay_config;
+
+ union dpcd_alpm_configuration alpm_config;
+
+ replay_context.controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ if (!replay)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel;
+ replay_context.digbe_inst = link->link_enc->transmitter;
+ replay_context.digfe_inst = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ replay_context.controllerId =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ lineTimeInNs =
+ ((stream->timing.h_total * 1000000) /
+ (stream->timing.pix_clk_100hz / 10)) + 1;
+
+ replay_context.line_time_in_ns = lineTimeInNs;
+
+ if (replay)
+ link->replay_settings.replay_feature_enabled =
+ replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
+ if (link->replay_settings.replay_feature_enabled) {
+
+ replay_config.bits.FREESYNC_PANEL_REPLAY_MODE = 1;
+ replay_config.bits.TIMING_DESYNC_ERROR_VERIFICATION =
+ link->replay_settings.config.replay_timing_sync_supported;
+ replay_config.bits.STATE_TRANSITION_ERROR_DETECTION = 1;
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_SINK_PR_ENABLE_AND_CONFIGURATION,
+ (uint8_t *)&(replay_config.raw), sizeof(uint8_t));
+
+ memset(&alpm_config, 0, sizeof(alpm_config));
+ alpm_config.bits.ENABLE = 1;
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_RECEIVER_ALPM_CONFIG,
+ &alpm_config.raw,
+ sizeof(alpm_config.raw));
+ }
+ return true;
+}
+
+bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (!replay)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
+ replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst);
+ link->replay_settings.coasting_vtotal = coasting_vtotal;
+ }
+
+ return true;
+}
+
+bool edp_replay_residency(const struct dc_link *link,
+ unsigned int *residency, const bool is_start, const bool is_alpm)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if (replay != NULL && link->replay_settings.replay_feature_enabled)
+ replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm);
+ else
+ *residency = 0;
+
+ return true;
+}
+
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 28f552080558..0a5bbda8c739 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -30,6 +30,7 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
+bool set_cached_brightness_aux(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
int edp_get_backlight_level(const struct dc_link *link);
bool edp_get_backlight_level_nits(struct dc_link *link,
@@ -52,6 +53,14 @@ bool edp_setup_psr(struct dc_link *link,
bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link,
uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
+bool edp_setup_replay(struct dc_link *link,
+ const struct dc_stream_state *stream);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_replay_residency(const struct dc_link *link,
+ unsigned int *residency, const bool is_start, const bool is_alpm);
+bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);