summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMeenakshikumar Somasundaram <meenakshikumar.somasundaram@amd.com>2023-12-05 08:01:15 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-03-01 15:34:57 +0300
commitc3682b63c60fdef04fc503d36d08bb84ee9758ad (patch)
treeafdb5dc500f5d65a84cbe5d082d65162f6f18727 /drivers
parentb45df837fe8710944e18d4a422c558c95aa77d80 (diff)
downloadlinux-c3682b63c60fdef04fc503d36d08bb84ee9758ad.tar.xz
drm/amd/display: Add dpia display mode validation logic
[ Upstream commit 59f1622a5f05d948a7c665a458a3dd76ba73015e ] [Why] If bandwidth allocation feature is enabled, connection manager wont limit the dp tunnel bandwidth. So, need to do display mode validation for streams on dpia links to avoid oversubscription of dp tunnel bandwidth. [How] - To read non reduced link rate and lane count and update reported link capability. - To calculate the bandwidth required for streams of dpia links per host router and validate against the allocated bandwidth for the host router. Tested-by: Daniel Wheeler <daniel.wheeler@amd.com> Reviewed-by: PeiChen Huang <peichen.huang@amd.com> Reviewed-by: Aric Cyr <aric.cyr@amd.com> Acked-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com> Signed-off-by: Meenakshikumar Somasundaram <meenakshikumar.somasundaram@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Stable-dep-of: 0484e05d048b ("drm/amd/display: fixed integer types and null check locations") Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c130
5 files changed, 104 insertions, 40 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index ed94187c2afa..f365773d5714 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -497,7 +497,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}
-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
{
return dc->link_srv->validate_dpia_bandwidth(streams, count);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3f33740e2f65..5f2eac868b74 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -2116,11 +2116,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
*
* @dc: pointer to dc struct
* @stream: pointer to all possible streams
- * @num_streams: number of valid DPIA streams
+ * @count: number of valid DPIA streams
*
* return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
*/
-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
+bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
const unsigned int count);
/* Sink Interfaces - A sink corresponds to a display output device */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index cfaa39c5dd16..83719f5bea49 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1433,6 +1433,12 @@ struct dp_trace {
#ifndef DP_TUNNELING_STATUS
#define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
#endif
+#ifndef DP_TUNNELING_MAX_LINK_RATE
+#define DP_TUNNELING_MAX_LINK_RATE 0xE0028 /* 1.4a */
+#endif
+#ifndef DP_TUNNELING_MAX_LANE_COUNT
+#define DP_TUNNELING_MAX_LANE_COUNT 0xE0029 /* 1.4a */
+#endif
#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
#define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index accffba5a683..19b7314811ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1121,6 +1121,8 @@ struct dc_dpia_bw_alloc {
int bw_granularity; // BW Granularity
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
bool response_ready; // Response ready from the CM side
+ uint8_t nrd_max_lane_count; // Non-reduced max lane count
+ uint8_t nrd_max_link_rate; // Non-reduced max link rate
};
#define MAX_SINKS_PER_LINK 4
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index d6e1f969bfd5..a7aa8c9da868 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -59,6 +59,7 @@ static void reset_bw_alloc_struct(struct dc_link *link)
link->dpia_bw_alloc_config.estimated_bw = 0;
link->dpia_bw_alloc_config.bw_granularity = 0;
link->dpia_bw_alloc_config.response_ready = false;
+ link->dpia_bw_alloc_config.sink_allocated_bw = 0;
}
#define BW_GRANULARITY_0 4 // 0.25 Gbps
@@ -104,6 +105,32 @@ static int get_estimated_bw(struct dc_link *link)
return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
}
+static int get_non_reduced_max_link_rate(struct dc_link *link)
+{
+ uint8_t nrd_max_link_rate = 0;
+
+ core_link_read_dpcd(
+ link,
+ DP_TUNNELING_MAX_LINK_RATE,
+ &nrd_max_link_rate,
+ sizeof(uint8_t));
+
+ return nrd_max_link_rate;
+}
+
+static int get_non_reduced_max_lane_count(struct dc_link *link)
+{
+ uint8_t nrd_max_lane_count = 0;
+
+ core_link_read_dpcd(
+ link,
+ DP_TUNNELING_MAX_LANE_COUNT,
+ &nrd_max_lane_count,
+ sizeof(uint8_t));
+
+ return nrd_max_lane_count;
+}
+
/*
* Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
@@ -111,13 +138,20 @@ static int get_estimated_bw(struct dc_link *link)
*/
static void init_usb4_bw_struct(struct dc_link *link)
{
- // Init the known values
+ reset_bw_alloc_struct(link);
+
+ /* init the known values */
link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+ link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
+ link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);
DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.bw_granularity,
link->dpia_bw_alloc_config.estimated_bw);
+ DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
+ __func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
+ link->dpia_bw_alloc_config.nrd_max_lane_count);
}
static uint8_t get_lowest_dpia_index(struct dc_link *link)
@@ -142,39 +176,50 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
}
/*
- * Get the Max Available BW or Max Estimated BW for each Host Router
+ * Get the maximum dp tunnel banwidth of host router
*
- * @link: pointer to the dc_link struct instance
- * @type: ESTIMATD BW or MAX AVAILABLE BW
+ * @dc: pointer to the dc struct instance
+ * @hr_index: host router index
*
- * return: response_ready flag from dc_link struct
+ * return: host router maximum dp tunnel bandwidth
*/
-static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
+static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
{
- const struct dc *dc_struct = link->dc;
- uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
- uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
- struct dc_link *link_temp;
+ uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
+ uint8_t hr_index_temp = 0;
+ struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
- int i;
-
- for (i = 0; i < MAX_PIPES * 2; ++i) {
- if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
+ for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {
- link_temp = dc_struct->links[i];
- if (!link_temp || !link_temp->hpd_status)
+ if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
- idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
-
- if (idx_temp == idx) {
-
- if (type == HOST_ROUTER_BW_ESTIMATED)
- total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
- else if (type == HOST_ROUTER_BW_ALLOCATED)
- total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
+ hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
+
+ if (hr_index_temp == hr_index) {
+ link_dpia_primary = dc->links[i];
+ link_dpia_secondary = dc->links[i + 1];
+
+ /**
+ * If BW allocation enabled on both DPIAs, then
+ * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
+ * otherwise HR BW = Estimated(bw alloc enabled dpia)
+ */
+ if ((link_dpia_primary->hpd_status &&
+ link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
+ (link_dpia_secondary->hpd_status &&
+ link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
+ total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
+ link_dpia_secondary->dpia_bw_alloc_config.sink_allocated_bw;
+ } else if (link_dpia_primary->hpd_status &&
+ link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
+ total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
+ } else if (link_dpia_secondary->hpd_status &&
+ link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
+ total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
+ }
+ break;
}
}
@@ -194,7 +239,6 @@ static void dpia_bw_alloc_unplug(struct dc_link *link)
if (link) {
DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
__func__, link->link_index);
- link->dpia_bw_alloc_config.sink_allocated_bw = 0;
reset_bw_alloc_struct(link);
}
}
@@ -397,7 +441,7 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
if (!timeout)
ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
- ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
+ ret = link->dpia_bw_alloc_config.sink_allocated_bw;
}
//2. Cold Unplug
else if (!link->hpd_status)
@@ -439,29 +483,41 @@ out:
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
{
bool ret = true;
- int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
- uint8_t lowest_dpia_index = 0, dpia_index = 0;
- uint8_t i;
+ int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
+ uint8_t lowest_dpia_index, i, hr_index;
if (!num_dpias || num_dpias > MAX_DPIA_NUM)
return ret;
- //Get total Host Router BW & Validate against each Host Router max BW
+ lowest_dpia_index = get_lowest_dpia_index(link[0]);
+
+ /* get total Host Router BW with granularity for the given modes */
for (i = 0; i < num_dpias; ++i) {
+ int granularity_Gbps = 0;
+ int bw_granularity = 0;
if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
continue;
- lowest_dpia_index = get_lowest_dpia_index(link[i]);
if (link[i]->link_index < lowest_dpia_index)
continue;
- dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
- bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
- if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
+ granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
+ bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
+ ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
- ret = false;
- break;
+ hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
+ bw_needed_per_hr[hr_index] += bw_granularity;
+ }
+
+ /* validate against each Host Router max BW */
+ for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
+ if (bw_needed_per_hr[hr_index]) {
+ host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
+ if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
+ ret = false;
+ break;
+ }
}
}