summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/dc/core/dc.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2020-08-06 14:15:47 +0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2020-08-06 14:15:47 +0300
commit94fb1afb14c4f0ceb8c5508ddddac6819f662e95 (patch)
tree4988e5769dc7482caa7f441475ae31f50bbd37ef /drivers/gpu/drm/amd/display/dc/core/dc.c
parentc4735d990268399da9133b0ad445e488ece009ad (diff)
parent47ec5303d73ea344e84f46660fff693c57641386 (diff)
downloadlinux-94fb1afb14c4f0ceb8c5508ddddac6819f662e95.tar.xz
Mgerge remote-tracking branch 'torvalds/master' into perf/core
To sync headers, for instance, in this case tools/perf was ahead of upstream till Linus merged tip/perf/core to get the PERF_RECORD_TEXT_POKE changes: Warning: Kernel ABI header at 'tools/include/uapi/linux/perf_event.h' differs from latest version at 'include/uapi/linux/perf_event.h' diff -u tools/include/uapi/linux/perf_event.h include/uapi/linux/perf_event.h Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/core/dc.c')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c140
1 files changed, 132 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d016f50e187c..ef0b5941bc50 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -68,6 +68,8 @@
#include "dmub/dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
+
#define CTX \
dc->ctx
@@ -186,9 +188,10 @@ static bool create_links(
bool should_destory_link = false;
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (dc->config.edp_not_connected)
- should_destory_link = true;
- else if (dc->debug.remove_disconnect_edp) {
+ if (dc->config.edp_not_connected) {
+ if (!IS_DIAG_DC(dc->ctx->dce_environment))
+ should_destory_link = true;
+ } else {
enum dc_connection_type type;
dc_link_detect_sink(link, &type);
if (type == dc_connection_none)
@@ -728,6 +731,9 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
+#endif
if (dc->res_pool->funcs->update_bw_bounding_box)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
@@ -1244,7 +1250,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
- disable_dangling_plane(dc, context);
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1260,6 +1265,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->optimize_seamless_boot_streams == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ disable_dangling_plane(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
@@ -1382,6 +1388,43 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dc_acquire_release_mpc_3dlut(
+ struct dc *dc, bool acquire,
+ struct dc_stream_state *stream,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper)
+{
+ int pipe_idx;
+ bool ret = false;
+ bool found_pipe_idx = false;
+ const struct resource_pool *pool = dc->res_pool;
+ struct resource_context *res_ctx = &dc->current_state->res_ctx;
+ int mpcc_id = 0;
+
+ if (pool && res_ctx) {
+ if (acquire) {
+ /*find pipe idx for the given stream*/
+ for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
+ if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
+ found_pipe_idx = true;
+ mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
+ break;
+ }
+ }
+ } else
+ found_pipe_idx = true;/*for release pipe_idx is not required*/
+
+ if (found_pipe_idx) {
+ if (acquire && pool->funcs->acquire_post_bldn_3dlut)
+ ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
+ else if (acquire == false && pool->funcs->release_post_bldn_3dlut)
+ ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
+ }
+ }
+ return ret;
+}
+#endif
static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
{
int i;
@@ -2280,9 +2323,22 @@ static void commit_planes_for_stream(struct dc *dc,
}
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
- top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
- top_pipe_to_program->stream_res.tg);
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ true,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+ top_pipe_to_program->stream_res.tg);
+ }
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, true);
@@ -2452,7 +2508,20 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
- top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+
+ if (stream && should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ false,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
@@ -2614,6 +2683,13 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
dal_irq_service_ack(dc->res_pool->irqs, src);
}
+void dc_power_down_on_boot(struct dc *dc)
+{
+ if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
+ dc->hwss.power_down_on_boot)
+ dc->hwss.power_down_on_boot(dc);
+}
+
void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state)
@@ -2842,3 +2918,51 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_
if (dc->hwss.get_clock)
dc->hwss.get_clock(dc, clock_type, clock_cfg);
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+void dc_allow_idle_optimizations(struct dc *dc, bool allow)
+{
+ if (dc->debug.disable_idle_power_optimizations)
+ return;
+
+ if (allow == dc->idle_optimizations_allowed)
+ return;
+
+ if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
+ dc->idle_optimizations_allowed = allow;
+}
+
+/*
+ * blank all streams, and set min and max memory clock to
+ * lowest and highest DPM level, respectively
+ */
+void dc_unlock_memory_clock_frequency(struct dc *dc)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_PIPES; i++)
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
+
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+}
+
+/*
+ * set min memory clock to the min required for current mode,
+ * max to maxDPM, and unblank streams
+ */
+void dc_lock_memory_clock_frequency(struct dc *dc)
+{
+ unsigned int i;
+
+ dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+
+ for (i = 0; i < MAX_PIPES; i++)
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+}
+#endif