summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c1490
1 files changed, 1356 insertions, 134 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 330edd666b7d..54662d74c65a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1,4 +1,26 @@
-/* Copyright 2015 Advanced Micro Devices, Inc. */
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
#include "dm_services.h"
#include "dc.h"
#include "dc_link_dp.h"
@@ -14,6 +36,7 @@
#include "dpcd_defs.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "inc/dc_link_dpia.h"
#include "inc/link_enc_cfg.h"
/*Travis*/
@@ -39,6 +62,43 @@ enum {
POST_LT_ADJ_REQ_TIMEOUT = 200
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct dp_lt_fallback_entry {
+ enum dc_lane_count lane_count;
+ enum dc_link_rate link_rate;
+};
+
+static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
+ /* This link training fallback array is ordered by
+ * link bandwidth from highest to lowest.
+ * DP specs makes it a normative policy to always
+ * choose the next highest link bandwidth during
+ * link training fallback.
+ */
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR20},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR10},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH2},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH2},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH3},
+ {LANE_COUNT_FOUR, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH2},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH},
+ {LANE_COUNT_TWO, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_LOW},
+};
+#endif
+
static bool decide_fallback_link_setting(
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
@@ -52,15 +112,27 @@ static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
{
union training_aux_rd_interval training_rd_interval;
uint32_t wait_in_micro_secs = 100;
-
+#if defined(CONFIG_DRM_AMD_DC_DCN)
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
+#else
core_link_read_dpcd(
link,
DP_TRAINING_AUX_RD_INTERVAL,
(uint8_t *)&training_rd_interval,
sizeof(training_rd_interval));
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+#endif
return wait_in_micro_secs;
}
@@ -68,6 +140,36 @@ static uint32_t get_eq_training_aux_rd_interval(
struct dc_link *link,
const struct dc_link_settings *link_settings)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union training_aux_rd_interval training_rd_interval;
+
+ memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ core_link_read_dpcd(
+ link,
+ DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ }
+
+ switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
+ case 0: return 400;
+ case 1: return 4000;
+ case 2: return 8000;
+ case 3: return 12000;
+ case 4: return 16000;
+ case 5: return 32000;
+ case 6: return 64000;
+ default: return 400;
+ }
+#else
union training_aux_rd_interval training_rd_interval;
uint32_t wait_in_micro_secs = 400;
@@ -87,13 +189,21 @@ static uint32_t get_eq_training_aux_rd_interval(
}
return wait_in_micro_secs;
+#endif
}
void dp_wait_for_training_aux_rd_interval(
struct dc_link *link,
uint32_t wait_in_micro_secs)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (wait_in_micro_secs > 16000)
+ msleep(wait_in_micro_secs/1000);
+ else
+ udelay(wait_in_micro_secs);
+#else
udelay(wait_in_micro_secs);
+#endif
DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
__func__,
@@ -121,6 +231,17 @@ enum dpcd_training_patterns
case DP_TRAINING_PATTERN_SEQUENCE_4:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_TPS1:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS1;
+ break;
+ case DP_128b_132b_TPS2:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2;
+ break;
+ case DP_128b_132b_TPS2_CDS:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS;
+ break;
+#endif
case DP_TRAINING_PATTERN_VIDEOIDLE:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE;
break;
@@ -159,13 +280,57 @@ static void dpcd_set_training_pattern(
static enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings)
{
- return DP_TRAINING_PATTERN_SEQUENCE_1;
+ switch (dp_get_link_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ default:
+ return DP_TRAINING_PATTERN_SEQUENCE_1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_ENCODING:
+ return DP_128b_132b_TPS1;
+#endif
+ }
}
static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
const struct dc_link_settings *link_settings)
{
struct link_encoder *link_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct encoder_feature_support *enc_caps;
+ struct dpcd_caps *rx_caps = &link->dpcd_caps;
+ enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+
+ /* Access link encoder capability based on whether it is statically
+ * or dynamically assigned to a link.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
+ enc_caps = &link_enc->features;
+
+ switch (dp_get_link_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
+ rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+ else if (enc_caps->flags.bits.IS_TPS3_CAPABLE &&
+ rx_caps->max_ln_count.bits.TPS3_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_3;
+ else
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ case DP_128b_132b_ENCODING:
+ pattern = DP_128b_132b_TPS2;
+ break;
+ default:
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ }
+ return pattern;
+#else
enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2;
struct encoder_feature_support *features;
struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
@@ -175,7 +340,7 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -196,8 +361,39 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
return DP_TRAINING_PATTERN_SEQUENCE_3;
return DP_TRAINING_PATTERN_SEQUENCE_2;
+#endif
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
+{
+ uint8_t link_rate = 0;
+ enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings);
+
+ if (encoding == DP_128b_132b_ENCODING)
+ switch (link_settings->link_rate) {
+ case LINK_RATE_UHBR10:
+ link_rate = 0x1;
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = 0x2;
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = 0x4;
+ break;
+ default:
+ link_rate = 0;
+ break;
+ }
+ else if (encoding == DP_8b_10b_ENCODING)
+ link_rate = (uint8_t) link_settings->link_rate;
+ else
+ link_rate = 0;
+
+ return link_rate;
+}
+#endif
+
enum dc_status dpcd_set_link_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings)
@@ -247,7 +443,11 @@ enum dc_status dpcd_set_link_settings(
status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
&lt_settings->link_settings.link_rate_set, 1);
} else {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+#else
rate = (uint8_t) (lt_settings->link_settings.link_rate);
+#endif
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
@@ -289,6 +489,10 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
disable_scrabled_data_symbols = 1;
break;
case DP_TRAINING_PATTERN_SEQUENCE_4:
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_TPS1:
+ case DP_128b_132b_TPS2:
+#endif
disable_scrabled_data_symbols = 0;
break;
default:
@@ -317,7 +521,6 @@ static void dpcd_set_lt_pattern_and_lane_settings(
uint8_t dpcd_lt_buffer[5] = {0};
union dpcd_training_pattern dpcd_pattern = { {0} };
- uint32_t lane;
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
@@ -350,24 +553,8 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_base_lt_offset,
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
- /*****************************************************************
- * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
- *****************************************************************/
- for (lane = 0; lane <
- (uint32_t)(lt_settings->link_settings.lane_count); lane++) {
- dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
- (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
- dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
- (uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
-
- dpcd_lane[lane].bits.MAX_SWING_REACHED =
- (lt_settings->lane_settings[lane].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
- dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
- (lt_settings->lane_settings[lane].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
- }
+ dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->lane_settings, dpcd_lane);
/* concatenate everything into one buffer*/
@@ -380,6 +567,18 @@ static void dpcd_set_lt_pattern_and_lane_settings(
size_in_bytes);
if (is_repeater(link, offset)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_lane[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
@@ -390,6 +589,16 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lane[0].bits.MAX_SWING_REACHED,
dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ dpcd_lane[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
dpcd_base_lt_offset,
@@ -414,6 +623,15 @@ static void dpcd_set_lt_pattern_and_lane_settings(
(uint8_t *)(dpcd_lane),
size_in_bytes);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ } else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ sizeof(dpcd_lt_buffer));
+#endif
} else
/* write it all in (1 + number-of-lanes)-byte burst*/
core_link_write_dpcd(
@@ -421,8 +639,6 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_base_lt_offset,
dpcd_lt_buffer,
size_in_bytes + sizeof(dpcd_pattern.raw));
-
- link->cur_lane_setting = lt_settings->lane_settings[0];
}
bool dp_is_cr_done(enum dc_lane_count ln_count,
@@ -464,6 +680,37 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
return align_status.bits.INTERLANE_ALIGN_DONE == 1;
}
+void dp_hw_to_dpcd_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint8_t lane = 0;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS);
+ dpcd_lane_settings[lane].bits.MAX_SWING_REACHED =
+ (hw_lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (hw_lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE =
+ hw_lane_settings[lane].FFE_PRESET.settings.level;
+ }
+#endif
+ }
+}
+
void dp_update_drive_settings(
struct link_training_settings *dest,
struct link_training_settings src)
@@ -484,6 +731,13 @@ void dp_update_drive_settings(
dest->lane_settings[lane].POST_CURSOR2 = src.lane_settings[lane].POST_CURSOR2;
else
dest->lane_settings[lane].POST_CURSOR2 = *dest->post_cursor2;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dest->ffe_preset == NULL)
+ dest->lane_settings[lane].FFE_PRESET = src.lane_settings[lane].FFE_PRESET;
+ else
+ dest->lane_settings[lane].FFE_PRESET = *dest->ffe_preset;
+#endif
}
}
@@ -529,6 +783,10 @@ static void find_max_drive_settings(
lane_settings[0].PRE_EMPHASIS;
/*max_requested.postCursor2 =
* link_training_setting->laneSettings[0].postCursor2;*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ max_requested.FFE_PRESET =
+ link_training_setting->lane_settings[0].FFE_PRESET;
+#endif
/* Determine what the maximum of the requested settings are*/
for (lane = 1; lane < link_training_setting->link_settings.lane_count;
@@ -554,6 +812,13 @@ static void find_max_drive_settings(
link_training_setting->laneSettings[lane].postCursor2;
}
*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link_training_setting->lane_settings[lane].FFE_PRESET.settings.level >
+ max_requested.FFE_PRESET.settings.level)
+ max_requested.FFE_PRESET.settings.level =
+ link_training_setting->
+ lane_settings[lane].FFE_PRESET.settings.level;
+#endif
}
/* make sure the requested settings are
@@ -567,6 +832,10 @@ static void find_max_drive_settings(
if (max_requested.postCursor2 > PostCursor2_MaxLevel)
max_requested.postCursor2 = PostCursor2_MaxLevel;
*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
+ max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
+#endif
/* make sure the pre-emphasis matches the voltage swing*/
if (max_requested.PRE_EMPHASIS >
@@ -604,6 +873,10 @@ static void find_max_drive_settings(
/*max_lt_setting->laneSettings[lane].postCursor2 =
* max_requested.postCursor2;
*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ max_lt_setting->lane_settings[lane].FFE_PRESET =
+ max_requested.FFE_PRESET;
+#endif
}
}
@@ -701,12 +974,28 @@ enum dc_status dp_get_lane_status_and_drive_settings(
(uint32_t)(link_training_setting->link_settings.lane_count);
lane++) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ request_settings.lane_settings[lane].FFE_PRESET.raw =
+ dpcd_lane_adjust[lane].tx_ffe.PRESET_VALUE;
+ } else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ request_settings.lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
+ VOLTAGE_SWING_LANE);
+ request_settings.lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
+ PRE_EMPHASIS_LANE);
+ }
+#else
request_settings.lane_settings[lane].VOLTAGE_SWING =
(enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
VOLTAGE_SWING_LANE);
request_settings.lane_settings[lane].PRE_EMPHASIS =
(enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
PRE_EMPHASIS_LANE);
+#endif
}
/*Note: for postcursor2, read adjusted
@@ -731,7 +1020,6 @@ enum dc_status dpcd_set_lane_settings(
uint32_t offset)
{
union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
- uint32_t lane;
unsigned int lane0_set_address;
enum dc_status status;
@@ -741,25 +1029,9 @@ enum dc_status dpcd_set_lane_settings(
lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- for (lane = 0; lane <
- (uint32_t)(link_training_setting->
- link_settings.lane_count);
- lane++) {
- dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
- (uint8_t)(link_training_setting->
- lane_settings[lane].VOLTAGE_SWING);
- dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
- (uint8_t)(link_training_setting->
- lane_settings[lane].PRE_EMPHASIS);
- dpcd_lane[lane].bits.MAX_SWING_REACHED =
- (link_training_setting->
- lane_settings[lane].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
- dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
- (link_training_setting->
- lane_settings[lane].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
- }
+ dp_hw_to_dpcd_lane_settings(link_training_setting,
+ link_training_setting->lane_settings,
+ dpcd_lane);
status = core_link_write_dpcd(link,
lane0_set_address,
@@ -786,6 +1058,18 @@ enum dc_status dpcd_set_lane_settings(
*/
if (is_repeater(link, offset)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ offset,
+ lane0_set_address,
+ dpcd_lane[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
@@ -797,6 +1081,16 @@ enum dc_status dpcd_set_lane_settings(
dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ lane0_set_address,
+ dpcd_lane[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
lane0_set_address,
@@ -805,7 +1099,6 @@ enum dc_status dpcd_set_lane_settings(
dpcd_lane[0].bits.MAX_SWING_REACHED,
dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
}
- link->cur_lane_setting = link_training_setting->lane_settings[0];
return status;
}
@@ -932,6 +1225,14 @@ uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval
case 0x04:
aux_rd_interval_us = 16000;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case 0x05:
+ aux_rd_interval_us = 32000;
+ break;
+ case 0x06:
+ aux_rd_interval_us = 64000;
+ break;
+#endif
default:
break;
}
@@ -972,8 +1273,13 @@ static enum link_training_result perform_channel_equalization_sequence(
tr_pattern = lt_settings->pattern_for_eq;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_repeater(link, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
+ tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+#else
if (is_repeater(link, offset))
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+#endif
dp_set_hw_training_pattern(link, tr_pattern, offset);
@@ -1125,9 +1431,28 @@ static enum link_training_result perform_clock_recovery_sequence(
return LINK_TRAINING_SUCCESS;
/* 6. max VS reached*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if ((dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) &&
+ dp_is_max_vs_reached(lt_settings))
+ break;
+#else
if (dp_is_max_vs_reached(lt_settings))
break;
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
+ lt_settings->lane_settings[0].FFE_PRESET.settings.level ==
+ req_settings.lane_settings[0].FFE_PRESET.settings.level)
+ retries_cr++;
+ else if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) &&
+ lt_settings->lane_settings[0].VOLTAGE_SWING ==
+ req_settings.lane_settings[0].VOLTAGE_SWING)
+ retries_cr++;
+ else
+ retries_cr = 0;
+#else
/* 7. same lane settings*/
/* Note: settings are the same for all lanes,
* so comparing first lane is sufficient*/
@@ -1138,6 +1463,7 @@ static enum link_training_result perform_clock_recovery_sequence(
retries_cr++;
else
retries_cr = 0;
+#endif
/* 8. update VS/PE/PC2 in lt_settings*/
dp_update_drive_settings(lt_settings, req_settings);
@@ -1172,7 +1498,11 @@ static inline enum link_training_result dp_transition_to_video_idle(
* TPS4 must be used instead of POST_LT_ADJ_REQ.
*/
if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) {
+#else
lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) {
+#endif
/* delay 5ms after Main Link output idle pattern and then check
* DPCD 0202h.
*/
@@ -1268,6 +1598,32 @@ static inline void decide_8b_10b_training_settings(
lt_settings->should_set_fec_ready = true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static inline void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings)
+{
+ memset(lt_settings, 0, sizeof(*lt_settings));
+
+ lt_settings->link_settings = *link_settings;
+ /* TODO: should decide link spread when populating link_settings */
+ lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED :
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+ lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
+ lt_settings->eq_pattern_time = 2500;
+ lt_settings->eq_wait_time_limit = 400000;
+ lt_settings->eq_loop_count_limit = 20;
+ lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS;
+ lt_settings->cds_pattern_time = 2500;
+ lt_settings->cds_wait_time_limit = (dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
+ lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ?
+ LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT;
+}
+#endif
+
void dp_decide_training_settings(
struct dc_link *link,
const struct dc_link_settings *link_settings,
@@ -1275,6 +1631,10 @@ void dp_decide_training_settings(
{
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
decide_8b_10b_training_settings(link, link_settings, lt_settings);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING)
+ decide_128b_132b_training_settings(link, link_settings, lt_settings);
+#endif
}
static void override_training_settings(
@@ -1284,12 +1644,6 @@ static void override_training_settings(
{
uint32_t lane;
- /* Override link settings */
- if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
- lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate;
- if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN)
- lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count;
-
/* Override link spread */
if (!link->dp_ss_off && overrides->downspread != NULL)
lt_settings->link_settings.link_spread = *overrides->downspread ?
@@ -1303,6 +1657,11 @@ static void override_training_settings(
lt_settings->pre_emphasis = overrides->pre_emphasis;
if (overrides->post_cursor2 != NULL)
lt_settings->post_cursor2 = overrides->post_cursor2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (overrides->ffe_preset != NULL)
+ lt_settings->ffe_preset = overrides->ffe_preset;
+#endif
+
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
lt_settings->lane_settings[lane].VOLTAGE_SWING =
lt_settings->voltage_swing != NULL ?
@@ -1362,7 +1721,7 @@ uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
-enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
+static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
{
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
@@ -1373,7 +1732,7 @@ enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
sizeof(repeater_mode));
}
-enum dc_status configure_lttpr_mode_non_transparent(
+static enum dc_status configure_lttpr_mode_non_transparent(
struct dc_link *link,
const struct link_training_settings *lt_settings)
{
@@ -1416,6 +1775,13 @@ enum dc_status configure_lttpr_mode_non_transparent(
if (encoding == DP_8b_10b_ENCODING) {
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ /* Driver does not need to train the first hop. Skip DPCD read and clear
+ * AUX_RD_INTERVAL for DPTX-to-DPIA hop.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
+
for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
@@ -1489,6 +1855,17 @@ static void print_status_message(
case LINK_RATE_HIGH3:
link_rate = "HBR3";
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_UHBR10:
+ link_rate = "UHBR10";
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = "UHBR13.5";
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = "UHBR20";
+ break;
+#endif
default:
break;
}
@@ -1518,6 +1895,20 @@ static void print_status_message(
case LINK_TRAINING_LINK_LOSS:
lt_result = "Link loss";
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_LT_FAILED:
+ lt_result = "LT_FAILED received";
+ break;
+ case DP_128b_132b_MAX_LOOP_COUNT_REACHED:
+ lt_result = "max loop count reached";
+ break;
+ case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT:
+ lt_result = "channel EQ timeout";
+ break;
+ case DP_128b_132b_CDS_DONE_TIMEOUT:
+ lt_result = "CDS timeout";
+ break;
+#endif
default:
break;
}
@@ -1537,6 +1928,9 @@ static void print_status_message(
}
/* Connectivity log: link training */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */
+#endif
CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s",
link_rate,
lt_settings->link_settings.lane_count,
@@ -1619,9 +2013,23 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
static void dpcd_exit_training_mode(struct dc_link *link)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t sink_status = 0;
+ uint8_t i;
+#endif
/* clear training pattern set */
dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* poll for intra-hop disable */
+ for (i = 0; i < 10; i++) {
+ if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
+ (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
+ break;
+ udelay(1000);
+ }
+#endif
}
enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
@@ -1645,6 +2053,131 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
return status;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
+ uint32_t *interval_in_us)
+{
+ union dp_128b_132b_training_aux_rd_interval dpcd_interval;
+ uint32_t interval_unit = 0;
+
+ dpcd_interval.raw = 0;
+ core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
+ &dpcd_interval.raw, sizeof(dpcd_interval.raw));
+ interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */
+ /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) *
+ * INTERVAL_UNIT. The maximum is 256 ms
+ */
+ *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000;
+}
+
+static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ uint8_t loop_count = 0;
+ uint32_t aux_rd_interval = 0;
+ uint32_t wait_time = 0;
+ struct link_training_settings req_settings;
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+
+ /* Transmit 128b/132b_TPS1 over Main-Link and Set TRAINING_PATTERN_SET to 01h */
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX);
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
+
+ /* Adjust TX_FFE_PRESET_VALUE as requested */
+ dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, &req_settings, DPRX);
+ dp_update_drive_settings(lt_settings, req_settings);
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dpcd_set_lane_settings(link, lt_settings, DPRX);
+
+ /* Transmit 128b/132b_TPS2 over Main-Link and Set TRAINING_PATTERN_SET to 02h */
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX);
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_eq);
+
+ /* poll for channel EQ done */
+ while (status == LINK_TRAINING_SUCCESS) {
+ loop_count++;
+ dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
+ wait_time += aux_rd_interval;
+ dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, &req_settings, DPRX);
+ dp_update_drive_settings(lt_settings, req_settings);
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
+ dpcd_lane_status)) {
+ /* pass */
+ break;
+ } else if (loop_count >= lt_settings->eq_loop_count_limit) {
+ status = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ status = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dpcd_set_lane_settings(link, lt_settings, DPRX);
+ }
+ }
+
+ /* poll for EQ interlane align done */
+ while (status == LINK_TRAINING_SUCCESS) {
+ if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (wait_time >= lt_settings->eq_wait_time_limit) {
+ status = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ status = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->eq_pattern_time);
+ wait_time += lt_settings->eq_pattern_time;
+ dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, &req_settings, DPRX);
+ }
+ }
+
+ return status;
+}
+
+static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ /* Assumption: assume hardware has transmitted eq pattern */
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ struct link_training_settings req_settings;
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ uint32_t wait_time = 0;
+
+ /* initiate CDS done sequence */
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cds);
+
+ /* poll for CDS interlane align done and symbol lock */
+ while (status == LINK_TRAINING_SUCCESS) {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->cds_pattern_time);
+ wait_time += lt_settings->cds_pattern_time;
+ dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, &req_settings, DPRX);
+ if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
+ dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ status = DP_128b_132b_LT_FAILED;
+ } else if (wait_time >= lt_settings->cds_wait_time_limit) {
+ status = DP_128b_132b_CDS_DONE_TIMEOUT;
+ }
+ }
+
+ return status;
+}
+#endif
+
static enum link_training_result dp_perform_8b_10b_link_training(
struct dc_link *link,
struct link_training_settings *lt_settings)
@@ -1701,6 +2234,35 @@ static enum link_training_result dp_perform_8b_10b_link_training(
return status;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static enum link_training_result dp_perform_128b_132b_link_training(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */
+ if (link->dc->debug.legacy_dp2_lt) {
+ struct link_training_settings legacy_settings;
+
+ decide_8b_10b_training_settings(link,
+ &lt_settings->link_settings,
+ &legacy_settings);
+ return dp_perform_8b_10b_link_training(link, &legacy_settings);
+ }
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings);
+
+ return result;
+}
+#endif
+
enum link_training_result dc_link_dp_perform_link_training(
struct dc_link *link,
const struct dc_link_settings *link_settings,
@@ -1735,6 +2297,10 @@ enum link_training_result dc_link_dp_perform_link_training(
*/
if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, &lt_settings);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (encoding == DP_128b_132b_ENCODING)
+ status = dp_perform_128b_132b_link_training(link, &lt_settings);
+#endif
else
ASSERT(0);
@@ -1772,16 +2338,19 @@ bool perform_link_training_with_retries(
/* Dynamically assigned link encoders associated with stream rather than
* link.
*/
- if (link->dc->res_pool->funcs->link_encs_assign)
- link_enc = stream->link_enc;
+ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream);
else
link_enc = link->link_enc;
/* We need to do this before the link training to ensure the idle pattern in SST
* mode will be sent right after the link training
*/
- link_enc->funcs->connect_dig_be_to_fe(link_enc,
+ if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING) {
+ link_enc->funcs->connect_dig_be_to_fe(link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
+ }
for (j = 0; j < attempts; ++j) {
@@ -1804,14 +2373,13 @@ bool perform_link_training_with_retries(
if (panel_mode == DP_PANEL_MODE_EDP) {
struct cp_psp *cp_psp = &stream->ctx->cp_psp;
- if (cp_psp && cp_psp->funcs.enable_assr) {
- if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
- /* since eDP implies ASSR on, change panel
- * mode to disable ASSR
- */
- panel_mode = DP_PANEL_MODE_DEFAULT;
- }
- }
+ if (cp_psp && cp_psp->funcs.enable_assr)
+ /* ASSR is bound to fail with unsigned PSP
+ * verstage used during devlopment phase.
+ * Report and continue with eDP panel mode to
+ * perform eDP link training with right settings
+ */
+ cp_psp->funcs.enable_assr(cp_psp->handle, link);
}
#endif
@@ -1821,10 +2389,22 @@ bool perform_link_training_with_retries(
dc_link_dp_perform_link_training_skip_aux(link, &current_setting);
return true;
} else {
- status = dc_link_dp_perform_link_training(
- link,
- &current_setting,
- skip_video_pattern);
+ /** @todo Consolidate USB4 DP and DPx.x training. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ status = dc_link_dpia_perform_link_training(link,
+ &current_setting,
+ skip_video_pattern);
+
+ /* Transmit idle pattern once training successful. */
+ if (status == LINK_TRAINING_SUCCESS)
+ dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE,
+ NULL, 0);
+ } else {
+ status = dc_link_dp_perform_link_training(link,
+ &current_setting,
+ skip_video_pattern);
+ }
+
if (status == LINK_TRAINING_SUCCESS)
return true;
}
@@ -1840,15 +2420,23 @@ bool perform_link_training_with_retries(
dp_disable_link_phy(link, signal);
/* Abort link training if failure due to sink being unplugged. */
- if (status == LINK_TRAINING_ABORT)
- break;
- else if (do_fallback) {
+ if (status == LINK_TRAINING_ABORT) {
+ enum dc_connection_type type = dc_connection_none;
+
+ dc_link_detect_sink(link, &type);
+ if (type == dc_connection_none)
+ break;
+ } else if (do_fallback) {
+ uint32_t req_bw;
+ uint32_t link_bw;
+
decide_fallback_link_setting(*link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets
* stream requirements.
*/
- if (dc_bandwidth_in_kbps_from_timing(&stream->timing) <
- dc_link_bandwidth_kbps(link, &current_setting))
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ link_bw = dc_link_bandwidth_kbps(link, &current_setting);
+ if (req_bw > link_bw)
break;
}
@@ -1950,8 +2538,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
dp_cs_id, link_settings);
/* Set FEC enable */
- fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
- dp_set_fec_ready(link, fec_enable);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+#endif
+ fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
+ dp_set_fec_ready(link, fec_enable);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ }
+#endif
if (lt_overrides->alternate_scrambler_reset) {
if (*lt_overrides->alternate_scrambler_reset)
@@ -1993,23 +2587,59 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
* Still shouldn't turn off dp_receiver (DPCD:600h)
*/
if (link_down == true) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link_settings link_settings = link->cur_link_settings;
+#endif
dp_disable_link_phy(link, link->connector_signal);
- dp_set_fec_ready(link, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
+#endif
+ dp_set_fec_ready(link, false);
}
link->sync_lt_in_progress = false;
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
+{
+ enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+ if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
+ lttpr_max_link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
+ lttpr_max_link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10)
+ lttpr_max_link_rate = LINK_RATE_UHBR10;
+
+ return lttpr_max_link_rate;
+}
+#endif
+
bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
{
+ struct link_encoder *link_enc = NULL;
+
if (!max_link_enc_cap) {
DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
return false;
}
- if (link->link_enc->funcs->get_max_link_cap) {
- link->link_enc->funcs->get_max_link_cap(link->link_enc, max_link_enc_cap);
+ /* Links supporting dynamically assigned link encoder will be assigned next
+ * available encoder if one not already assigned.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ if (link_enc == NULL)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
+ } else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
+
+ if (link_enc && link_enc->funcs->get_max_link_cap) {
+ link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap);
return true;
}
@@ -2022,9 +2652,31 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
struct dc_link_settings max_link_cap = {0};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ enum dc_link_rate lttpr_max_link_rate;
+#endif
+ struct link_encoder *link_enc = NULL;
+
+ /* Links supporting dynamically assigned link encoder will be assigned next
+ * available encoder if one not already assigned.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ if (link_enc == NULL)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
+ } else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
/* get max link encoder capability */
- link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+ if (link_enc)
+ link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (max_link_cap.link_rate >= LINK_RATE_UHBR10 &&
+ !link->hpo_dp_link_enc)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+#endif
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -2045,8 +2697,15 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lttpr_max_link_rate = get_lttpr_max_link_rate(link);
+
+ if (lttpr_max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = lttpr_max_link_rate;
+#else
if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate)
max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+#endif
DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
__func__,
@@ -2056,7 +2715,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
-enum dc_status read_hpd_rx_irq_data(
+static enum dc_status read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data)
{
@@ -2187,17 +2846,32 @@ bool dp_verify_link_cap(
enum link_training_result status;
union hpd_irq_data irq_data;
- if (link->dc->debug.skip_detection_link_training) {
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ max_link_cap = get_max_link_cap(link);
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+
+ /* Accept reported capabilities if link supports flexible encoder mapping or encoder already in use. */
+ if (link->dc->debug.skip_detection_link_training ||
+ link->is_dig_mapping_flexible) {
+ /* TODO - should we check link encoder's max link caps here?
+ * How do we know which link encoder to check from?
+ */
link->verified_link_cap = *known_limit_link_setting;
return true;
+ } else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign &&
+ !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine)) {
+ link->verified_link_cap = initial_link_settings;
+ return true;
}
memset(&irq_data, 0, sizeof(irq_data));
success = false;
skip_link_training = false;
- max_link_cap = get_max_link_cap(link);
-
/* Grant extended timeout request */
if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
@@ -2205,6 +2879,10 @@ bool dp_verify_link_cap(
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
+ reset_dp_hpo_stream_encoders_for_link(link);
+#endif
/* TODO implement override and monitor patch later */
/* try to train the link from high to low to
@@ -2215,19 +2893,13 @@ bool dp_verify_link_cap(
dp_cs_id = get_clock_source_id(link);
- /* link training starts with the maximum common settings
- * supported by both sink and ASIC.
- */
- initial_link_settings = get_common_supported_link_settings(
- *known_limit_link_setting,
- max_link_cap);
cur_link_setting = initial_link_settings;
/* Temporary Renoir-specific workaround for SWDEV-215184;
* PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
* so add extra cycle of enabling and disabling the PHY before first link training.
*/
- if (link->link_enc->features.flags.bits.DP_IS_USB_C &&
+ if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa) {
dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
dp_disable_link_phy(link, link->connector_signal);
@@ -2314,7 +2986,7 @@ bool dp_verify_link_cap_with_retries(
link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
break;
} else if (dp_verify_link_cap(link,
- &link->reported_link_cap,
+ known_limit_link_setting,
&fail_count) && fail_count == 0) {
success = true;
break;
@@ -2329,11 +3001,21 @@ bool dp_verify_mst_link_cap(
{
struct dc_link_settings max_link_cap = {0};
- max_link_cap = get_max_link_cap(link);
- link->verified_link_cap = get_common_supported_link_settings(
- link->reported_link_cap,
- max_link_cap);
-
+ if (dp_get_link_encoding_format(&link->reported_link_cap) ==
+ DP_8b_10b_ENCODING) {
+ max_link_cap = get_max_link_cap(link);
+ link->verified_link_cap = get_common_supported_link_settings(
+ link->reported_link_cap,
+ max_link_cap);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&link->reported_link_cap) ==
+ DP_128b_132b_ENCODING) {
+ dp_verify_link_cap_with_retries(link,
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
+ }
+#endif
return true;
}
@@ -2360,7 +3042,17 @@ static struct dc_link_settings get_common_supported_link_settings(
* We map it to the maximum supported link rate that
* is smaller than MAX_LINK_BW in this case.
*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link_settings.link_rate > LINK_RATE_UHBR20) {
+ link_settings.link_rate = LINK_RATE_UHBR20;
+ } else if (link_settings.link_rate < LINK_RATE_UHBR20 &&
+ link_settings.link_rate > LINK_RATE_UHBR13_5) {
+ link_settings.link_rate = LINK_RATE_UHBR13_5;
+ } else if (link_settings.link_rate < LINK_RATE_UHBR10 &&
+ link_settings.link_rate > LINK_RATE_HIGH3) {
+#else
if (link_settings.link_rate > LINK_RATE_HIGH3) {
+#endif
link_settings.link_rate = LINK_RATE_HIGH3;
} else if (link_settings.link_rate < LINK_RATE_HIGH3
&& link_settings.link_rate > LINK_RATE_HIGH2) {
@@ -2405,6 +3097,14 @@ static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
{
switch (link_rate) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_UHBR20:
+ return LINK_RATE_UHBR13_5;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ return LINK_RATE_HIGH3;
+#endif
case LINK_RATE_HIGH3:
return LINK_RATE_HIGH2;
case LINK_RATE_HIGH2:
@@ -2439,11 +3139,55 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
return LINK_RATE_HIGH2;
case LINK_RATE_HIGH2:
return LINK_RATE_HIGH3;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_HIGH3:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ return LINK_RATE_UHBR13_5;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR20;
+#endif
default:
return LINK_RATE_UNKNOWN;
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool decide_fallback_link_setting_max_bw_policy(
+ const struct dc_link_settings *max,
+ struct dc_link_settings *cur)
+{
+ uint8_t cur_idx = 0, next_idx;
+ bool found = false;
+
+ while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find current index */
+ if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count &&
+ dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate)
+ break;
+ else
+ cur_idx++;
+
+ next_idx = cur_idx + 1;
+
+ while (next_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find next index */
+ if (dp_lt_fallbacks[next_idx].lane_count <= max->lane_count &&
+ dp_lt_fallbacks[next_idx].link_rate <= max->link_rate)
+ break;
+ else
+ next_idx++;
+
+ if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) {
+ cur->lane_count = dp_lt_fallbacks[next_idx].lane_count;
+ cur->link_rate = dp_lt_fallbacks[next_idx].link_rate;
+ found = true;
+ }
+
+ return found;
+}
+#endif
+
/*
* function: set link rate and lane count fallback based
* on current link setting and last link training result
@@ -2459,6 +3203,11 @@ static bool decide_fallback_link_setting(
{
if (!current_link_setting)
return false;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING)
+ return decide_fallback_link_setting_max_bw_policy(&initial_link_settings,
+ current_link_setting);
+#endif
switch (training_result) {
case LINK_TRAINING_CR_FAIL_LANE0:
@@ -2724,7 +3473,7 @@ void decide_link_settings(struct dc_stream_state *stream,
}
/*************************Short Pulse IRQ***************************/
-static bool allow_hpd_rx_irq(const struct dc_link *link)
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
{
/*
* Don't handle RX IRQ unless one of following is met:
@@ -2831,9 +3580,15 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
union phy_test_pattern dpcd_test_pattern;
union lane_adjust dpcd_lane_adjustment[2];
unsigned char dpcd_post_cursor_2_adjustment = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ unsigned char test_pattern_buffer[
+ (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+#else
unsigned char test_pattern_buffer[
(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+#endif
unsigned int test_pattern_size = 0;
enum dp_test_pattern test_pattern;
struct dc_link_training_settings link_settings;
@@ -2899,6 +3654,35 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
case PHY_TEST_PATTERN_CP2520_3:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case PHY_TEST_PATTERN_128b_132b_TPS1:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case PHY_TEST_PATTERN_128b_132b_TPS2:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case PHY_TEST_PATTERN_PRBS9:
+ test_pattern = DP_TEST_PATTERN_PRBS9;
+ break;
+ case PHY_TEST_PATTERN_PRBS11:
+ test_pattern = DP_TEST_PATTERN_PRBS11;
+ break;
+ case PHY_TEST_PATTERN_PRBS15:
+ test_pattern = DP_TEST_PATTERN_PRBS15;
+ break;
+ case PHY_TEST_PATTERN_PRBS23:
+ test_pattern = DP_TEST_PATTERN_PRBS23;
+ break;
+ case PHY_TEST_PATTERN_PRBS31:
+ test_pattern = DP_TEST_PATTERN_PRBS31;
+ break;
+ case PHY_TEST_PATTERN_264BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_PULSE:
+ test_pattern = DP_TEST_PATTERN_SQUARE_PULSE;
+ break;
+#endif
default:
test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
break;
@@ -2914,6 +3698,27 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) {
+ test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
+ core_link_read_dpcd(
+ link,
+ DP_PHY_SQUARE_PATTERN,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) {
+ test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256-
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1;
+ core_link_read_dpcd(
+ link,
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+#endif
+
/* prepare link training settings */
link_settings.link = link->cur_link_settings;
@@ -2922,6 +3727,24 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
lane++) {
dpcd_lane_adjust.raw =
get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) ==
+ DP_128b_132b_ENCODING) {
+ link_settings.lane_settings[lane].FFE_PRESET.raw =
+ dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
+ } else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
+ DP_8b_10b_ENCODING) {
+ link_settings.lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)
+ (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+ link_settings.lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)
+ (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+ link_settings.lane_settings[lane].POST_CURSOR2 =
+ (enum dc_post_cursor2)
+ ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+ }
+#else
link_settings.lane_settings[lane].VOLTAGE_SWING =
(enum dc_voltage_swing)
(dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
@@ -2931,6 +3754,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
link_settings.lane_settings[lane].POST_CURSOR2 =
(enum dc_post_cursor2)
((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+#endif
}
for (i = 0; i < 4; i++)
@@ -3158,7 +3982,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
}
}
-static void handle_automated_test(struct dc_link *link)
+void dc_link_dp_handle_automated_test(struct dc_link *link)
{
union test_request test_request;
union test_response test_response;
@@ -3207,17 +4031,50 @@ static void handle_automated_test(struct dc_link *link)
sizeof(test_response));
}
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
+void dc_link_dp_handle_link_loss(struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ break;
+ }
+
+ if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ core_link_disable_stream(pipe_ctx);
+ }
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ core_link_enable_stream(link->dc->current_state, pipe_ctx);
+ }
+ }
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
{
union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } };
enum dc_status result;
bool status = false;
- struct pipe_ctx *pipe_ctx;
- int i;
if (out_link_loss)
*out_link_loss = false;
+
+ if (has_left_work)
+ *has_left_work = false;
/* For use cases related to down stream connection status change,
* PSR and device auto test, refer to function handle_sst_hpd_irq
* in DAL2.1*/
@@ -3249,11 +4106,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
&device_service_clear.raw,
sizeof(device_service_clear.raw));
device_service_clear.raw = 0;
- handle_automated_test(link);
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dc_link_dp_handle_automated_test(link);
return false;
}
- if (!allow_hpd_rx_irq(link)) {
+ if (!dc_link_dp_allow_hpd_rx_irq(link)) {
DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
__func__, link->link_index);
return false;
@@ -3267,12 +4127,18 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
* so do not handle as a normal sink status change interrupt.
*/
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY)
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
return true;
+ }
/* check if we have MST msg and return since we poll for it */
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY)
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
return false;
+ }
/* For now we only handle 'Downstream port status' case.
* If we got sink count changed it means
@@ -3289,29 +4155,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
sizeof(hpd_irq_dpcd_data),
"Status: ");
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
- break;
- }
-
- if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
- return false;
-
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
- core_link_disable_stream(pipe_ctx);
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
- core_link_enable_stream(link->dc->current_state, pipe_ctx);
- }
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dc_link_dp_handle_link_loss(link);
status = false;
if (out_link_loss)
@@ -3535,6 +4382,43 @@ static void get_active_converter_info(
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ union dp_dfp_cap_ext dfp_cap_ext;
+ memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext));
+ core_link_read_dpcd(
+ link,
+ DP_DFP_CAPABILITY_EXTENSION_SUPPORT,
+ dfp_cap_ext.raw,
+ sizeof(dfp_cap_ext.raw));
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps =
+ dfp_cap_ext.fields.max_pixel_rate_in_mps[0] +
+ (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width =
+ dfp_cap_ext.fields.max_video_h_active_width[0] +
+ (dfp_cap_ext.fields.max_video_h_active_width[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height =
+ dfp_cap_ext.fields.max_video_v_active_height[0] +
+ (dfp_cap_ext.fields.max_video_v_active_height[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps =
+ dfp_cap_ext.fields.encoding_format_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps =
+ dfp_cap_ext.fields.rgb_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr444_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr422_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr420_color_depth_caps;
+ DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index);
+ DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false");
+ DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height);
+ }
+#endif
}
static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
@@ -3594,7 +4478,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t lttpr_dpcd_data[8];
+ bool allow_lttpr_non_transparent_mode = 0;
+#else
uint8_t lttpr_dpcd_data[6];
+#endif
bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
enum dc_status status = DC_ERROR_UNEXPECTED;
@@ -3602,6 +4491,16 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
+ link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
+ allow_lttpr_non_transparent_mode = 1;
+ } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
+ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ allow_lttpr_non_transparent_mode = 1;
+ }
+#endif
+
/*
* Logic to determine LTTPR mode
*/
@@ -3609,17 +4508,31 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
if (vbios_lttpr_enable && vbios_lttpr_interop)
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (allow_lttpr_non_transparent_mode)
+#else
if (link->dc->config.allow_lttpr_non_transparent_mode)
+#endif
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
else
link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
} else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
+#else
if (!link->dc->config.allow_lttpr_non_transparent_mode
|| !link->dc->caps.extended_aux_timeout_support)
+#endif
link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
else
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check DP tunnel LTTPR mode debug option. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->dc->debug.dpia_debug.bits.force_non_lttpr)
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+#endif
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
/* By reading LTTPR capability, RX assumes that we will enable
@@ -3659,8 +4572,19 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128b_132b_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+#endif
+
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
@@ -3709,6 +4633,8 @@ static bool retrieve_link_cap(struct dc_link *link)
LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
is_lttpr_present = dp_retrieve_lttpr_cap(link);
+ /* Read DP tunneling information. */
+ status = dpcd_get_tunneling_device_data(link);
status = core_link_read_dpcd(link, DP_SET_POWER,
&dpcd_power_state, sizeof(dpcd_power_state));
@@ -3909,16 +4835,82 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_DSC_SUPPORT,
link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+ DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1);
+ DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH);
+ }
+#else
status = core_link_read_dpcd(
link,
DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+#endif
}
if (!dpcd_read_sink_ext_caps(link))
link->dpcd_sink_ext_caps.raw = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link->dpcd_caps.channel_coding_cap.raw = dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_CAP - DP_DPCD_REV];
+
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
+
+ core_link_read_dpcd(link,
+ DP_128b_132b_SUPPORTED_LINK_RATES,
+ &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
+ sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw));
+ if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR10;
+ else
+ dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__);
+ DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index);
+ DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz",
+ link->reported_link_cap.link_rate / 100,
+ link->reported_link_cap.link_rate % 100);
+
+ core_link_read_dpcd(link,
+ DP_SINK_VIDEO_FALLBACK_FORMATS,
+ &link->dpcd_caps.fallback_formats.raw,
+ sizeof(link->dpcd_caps.fallback_formats.raw));
+ DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index);
+ if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.raw == 0) {
+ DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported");
+ link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1;
+ }
+
+ core_link_read_dpcd(link,
+ DP_FEC_CAPABILITY_1,
+ &link->dpcd_caps.fec_cap1.raw,
+ sizeof(link->dpcd_caps.fec_cap1.raw));
+ DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index);
+ if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE)
+ DC_LOG_DP2("\tFEC aggregated error counters are supported");
+ }
+#endif
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -4349,7 +5341,7 @@ bool dc_link_dp_set_test_pattern(
* MuteAudioEndpoint(pPathMode->pDisplayPath, true);
*/
/* Blank stream */
- pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+ pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
}
dp_set_hw_test_pattern(link, test_pattern,
@@ -4389,6 +5381,35 @@ bool dc_link_dp_set_test_pattern(
case DP_TEST_PATTERN_CP2520_3:
pattern = PHY_TEST_PATTERN_CP2520_3;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_TEST_PATTERN_128b_132b_TPS1:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case DP_TEST_PATTERN_PRBS9:
+ pattern = PHY_TEST_PATTERN_PRBS9;
+ break;
+ case DP_TEST_PATTERN_PRBS11:
+ pattern = PHY_TEST_PATTERN_PRBS11;
+ break;
+ case DP_TEST_PATTERN_PRBS15:
+ pattern = PHY_TEST_PATTERN_PRBS15;
+ break;
+ case DP_TEST_PATTERN_PRBS23:
+ pattern = PHY_TEST_PATTERN_PRBS23;
+ break;
+ case DP_TEST_PATTERN_PRBS31:
+ pattern = PHY_TEST_PATTERN_PRBS31;
+ break;
+ case DP_TEST_PATTERN_264BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_SQUARE_PULSE:
+ pattern = PHY_TEST_PATTERN_SQUARE_PULSE;
+ break;
+#endif
default:
return false;
}
@@ -4651,7 +5672,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -4671,7 +5692,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
link_enc->funcs->fec_set_ready(link_enc, true);
link->fec_state = dc_link_fec_ready;
} else {
- link_enc->funcs->fec_set_ready(link->link_enc, false);
+ link_enc->funcs->fec_set_ready(link_enc, false);
link->fec_state = dc_link_fec_not_ready;
dm_error("dpcd write failed to set fec_ready");
}
@@ -4698,8 +5719,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(
- link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -4964,6 +5984,208 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings
if ((link_settings->link_rate >= LINK_RATE_LOW) &&
(link_settings->link_rate <= LINK_RATE_HIGH3))
return DP_8b_10b_ENCODING;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if ((link_settings->link_rate >= LINK_RATE_UHBR10) &&
+ (link_settings->link_rate <= LINK_RATE_UHBR20))
+ return DP_128b_132b_ENCODING;
+#endif
return DP_UNKNOWN_ENCODING;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST)
+static void get_lane_status(
+ struct dc_link *link,
+ uint32_t lane_count,
+ union lane_status *status,
+ union lane_align_status_updated *status_updated)
+{
+ unsigned int lane;
+ uint8_t dpcd_buf[3] = {0};
+
+ if (status == NULL || status_updated == NULL) {
+ return;
+ }
+
+ core_link_read_dpcd(
+ link,
+ DP_LANE0_1_STATUS,
+ dpcd_buf,
+ sizeof(dpcd_buf));
+
+ for (lane = 0; lane < lane_count; lane++) {
+ status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane);
+ }
+
+ status_updated->raw = dpcd_buf[2];
+}
+
+bool dpcd_write_128b_132b_sst_payload_allocation_table(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ struct link_mst_stream_allocation_table *proposed_table,
+ bool allocate)
+{
+ const uint8_t vc_id = 1; /// VC ID always 1 for SST
+ const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST
+ bool result = false;
+ uint8_t req_slot_count = 0;
+ struct fixed31_32 avg_time_slots_per_mtp = { 0 };
+ union payload_table_update_status update_status = { 0 };
+ const uint32_t max_retries = 30;
+ uint32_t retries = 0;
+
+ if (allocate) {
+ avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ } else {
+ /// Leave req_slot_count = 0 if allocate is false.
+ }
+
+ /// Write DPCD 2C0 = 1 to start updating
+ update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1;
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ /// Program the changes in DPCD 1C0 - 1C2
+ ASSERT(vc_id == 1);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_SET,
+ &vc_id,
+ 1);
+
+ ASSERT(start_time_slot == 0);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_START_TIME_SLOT,
+ &start_time_slot,
+ 1);
+
+ ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); /// Validation should filter out modes that exceed link BW
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT,
+ &req_slot_count,
+ 1);
+
+ /// Poll till DPCD 2C0 read 1
+ /// Try for at least 150ms (30 retries, with 5ms delay after each attempt)
+
+ while (retries < max_retries) {
+ if (core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1) == DC_OK) {
+ if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) {
+ DC_LOG_DP2("SST Update Payload: downstream payload table updated.");
+ result = true;
+ break;
+ }
+ } else {
+ union dpcd_rev dpcdRev;
+
+ if (core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ &dpcdRev.raw,
+ 1) != DC_OK) {
+ DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision "
+ "of sink while polling payload table "
+ "updated status bit.");
+ break;
+ }
+ }
+ retries++;
+ udelay(5000);
+ }
+
+ if (!result && retries == max_retries) {
+ DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, "
+ "continue on. Something is wrong with the branch.");
+ // TODO - DP2.0 Payload: Read and log the payload table from downstream branch
+ }
+
+ proposed_table->stream_count = 1; /// Always 1 stream for SST
+ proposed_table->stream_allocations[0].slot_count = req_slot_count;
+ proposed_table->stream_allocations[0].vcp_id = vc_id;
+
+ return result;
+}
+
+bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link)
+{
+ /*
+ * wait for ACT handled
+ */
+ int i;
+ const int act_retries = 30;
+ enum act_return_status result = ACT_FAILED;
+ union payload_table_update_status update_status = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated lane_status_updated;
+
+ for (i = 0; i < act_retries; i++) {
+ get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated);
+
+ if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_interlane_aligned(lane_status_updated)) {
+ DC_LOG_ERROR("SST Update Payload: Link loss occurred while "
+ "polling for ACT handled.");
+ result = ACT_LINK_LOST;
+ break;
+ }
+ core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ if (update_status.bits.ACT_HANDLED == 1) {
+ DC_LOG_DP2("SST Update Payload: ACT handled by downstream.");
+ result = ACT_SUCCESS;
+ break;
+ }
+
+ udelay(5000);
+ }
+
+ if (result == ACT_FAILED) {
+ DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, "
+ "continue on. Something is wrong with the branch.");
+ }
+
+ return (result == ACT_SUCCESS);
+}
+
+struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link)
+{
+ struct fixed31_32 link_bw_effective =
+ dc_fixpt_from_int(
+ dc_link_bandwidth_kbps(link, &link->cur_link_settings));
+ struct fixed31_32 timeslot_bw_effective =
+ dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
+ struct fixed31_32 timing_bw =
+ dc_fixpt_from_int(
+ dc_bandwidth_in_kbps_from_timing(&stream->timing));
+ struct fixed31_32 avg_time_slots_per_mtp =
+ dc_fixpt_div(timing_bw, timeslot_bw_effective);
+
+ return avg_time_slots_per_mtp;
+}
+
+bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+{
+ return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+ pipe_ctx->stream->link->hpo_dp_link_enc &&
+ dc_is_dp_signal(pipe_ctx->stream->signal));
+}
+#endif