From d63e31f66892f67d8f1e279c57c4c0aee789fc66 Mon Sep 17 00:00:00 2001 From: Josip Pavic Date: Fri, 14 May 2021 14:04:02 -0400 Subject: drm/amd/display: copy dmub caps to dc on dcn31 [Why & How] Add code path to copy dmub caps to dc, which is missing on dcn31 Acked-by: Qingqing Zhuo Signed-off-by: Josip Pavic Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 7ac6e69cff37..62ce36c75c4d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -295,6 +295,10 @@ void dcn31_init_hw(struct dc *dc) if (dc->res_pool->hubbub->funcs->init_crb) dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); #endif + + // Get DMCUB capabilities + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; } void dcn31_dsc_pg_control( -- cgit v1.2.3 From e97cc04fe0fb33e489583dff79f6b1d6919fcc66 Mon Sep 17 00:00:00 2001 From: Josip Pavic Date: Wed, 15 Feb 2023 15:47:59 -0500 Subject: drm/amd/display: refactor dmub commands into single function [Why & How] Consolidate dmub access to a single interface. This makes it easier to add code in the future that needs to run every time a dmub command is requested (e.g. instrumentation, locking etc). Reviewed-by: Nicholas Kazlauskas Acked-by: Qingqing Zhuo Signed-off-by: Josip Pavic Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 +- .../gpu/drm/amd/display/dc/bios/command_table2.c | 25 +--- .../amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c | 4 +- .../amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c | 4 +- .../amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c | 4 +- .../amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 4 +- drivers/gpu/drm/amd/display/dc/core/dc.c | 23 +-- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 164 ++++++++------------- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 13 +- drivers/gpu/drm/amd/display/dc/dc_helper.c | 28 +--- drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c | 28 +--- .../gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c | 4 +- drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c | 4 +- drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c | 28 +--- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 7 +- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c | 8 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 16 +- .../amd/display/dc/dcn31/dcn31_dio_link_encoder.c | 8 +- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c | 10 +- .../drm/amd/display/dc/dcn31/dcn31_panel_cntl.c | 4 +- .../gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c | 4 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 12 +- drivers/gpu/drm/amd/display/dc/dm_services.h | 7 + drivers/gpu/drm/amd/display/dc/dm_services_types.h | 6 + .../display/dc/link/protocols/link_dp_capability.c | 2 +- .../amd/display/dc/link/protocols/link_dp_dpia.c | 2 +- 26 files changed, 148 insertions(+), 283 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8b03c8d8f0b8..af3efb245610 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -10311,7 +10311,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, input->cea_total_length = total_length; memcpy(input->payload, data, length); - res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); + res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); if (!res) { DRM_ERROR("EDID CEA parser failed\n"); return false; @@ -10761,3 +10761,13 @@ bool check_seamless_boot_capability(struct amdgpu_device *adev) return false; } + +bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); +} + +bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); +} diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 1ef9e4053bb7..90a02d7bd3da 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -123,9 +123,7 @@ static void encoder_control_dmcub( sizeof(cmd.digx_encoder_control.header); cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result encoder_control_digx_v1_5( @@ -261,9 +259,7 @@ static void transmitter_control_dmcub( sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig = *dig; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_6( @@ -325,9 +321,7 @@ static void transmitter_control_dmcub_v1_7( sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_7( @@ -435,9 +429,7 @@ static void set_pixel_clock_dmcub( sizeof(cmd.set_pixel_clock.header); cmd.set_pixel_clock.pixel_clock.clk = *clk; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result set_pixel_clock_v7( @@ -804,9 +796,7 @@ static void enable_disp_power_gating_dmcub( sizeof(cmd.enable_disp_power_gating.header); cmd.enable_disp_power_gating.power_gating.pwr = *pwr; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_disp_power_gating_v2_1( @@ -1016,10 +1006,7 @@ static void enable_lvtma_control_dmcub( panel_instance; cmd.lvtma_control.data.bypass_panel_control_wait = bypass_panel_control_wait; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); - + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_lvtma_control( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index f9e2e0c3095e..3c743cd3d3f0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -250,9 +250,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 5cb44f838bde..4d5cd59f6433 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -286,9 +286,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index b737cbc468f5..300c6a5872d0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -234,9 +234,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 93db4dbee713..538126cefd4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -254,9 +254,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 52564b93f7eb..61eec5aa4067 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -515,8 +515,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, cmd.secure_display.roi_info.y_end = rect->y + rect->height; } - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } static inline void @@ -3309,7 +3308,6 @@ void dc_dmub_update_dirty_rect(struct dc *dc, struct dc_state *context) { union dmub_rb_cmd cmd; - struct dc_context *dc_ctx = dc->ctx; struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; unsigned int i, j; unsigned int panel_inst = 0; @@ -3350,8 +3348,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc, update_dirty_rect->panel_inst = panel_inst; update_dirty_rect->pipe_idx = j; - dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } } } @@ -4606,7 +4603,6 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, { uint8_t action; union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; ASSERT(payload->length <= 16); @@ -4654,9 +4650,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, ); } - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -4700,7 +4694,6 @@ bool dc_process_dmub_set_config_async(struct dc *dc, struct dmub_notification *notify) { union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; bool is_cmd_complete = true; /* prepare SET_CONFIG command */ @@ -4711,7 +4704,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc, cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; - if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { + if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { /* command is not processed by dmub */ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; return is_cmd_complete; @@ -4746,7 +4739,6 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, uint8_t *mst_slots_in_use) { union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; /* prepare MST_ALLOC_SLOTS command */ cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; @@ -4755,7 +4747,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; - if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) + if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) /* command is not processed by dmub */ return DC_ERROR_UNEXPECTED; @@ -4789,14 +4781,11 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, uint32_t hpd_int_enable) { union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; cmd.dpia_hpd_int_enable.enable = hpd_int_enable; - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index a9b9490a532c..954cbfdbc3b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -65,47 +65,6 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) } } -void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, - union dmub_rb_cmd *cmd) -{ - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; - enum dmub_status status; - - status = dmub_srv_cmd_queue(dmub, cmd); - if (status == DMUB_STATUS_OK) - return; - - if (status != DMUB_STATUS_QUEUE_FULL) - goto error; - - /* Execute and wait for queue to become empty again. */ - dc_dmub_srv_cmd_execute(dc_dmub_srv); - dc_dmub_srv_wait_idle(dc_dmub_srv); - - /* Requeue the command. */ - status = dmub_srv_cmd_queue(dmub, cmd); - if (status == DMUB_STATUS_OK) - return; - -error: - DC_ERROR("Error queuing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); -} - -void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) -{ - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; - enum dmub_status status; - - status = dmub_srv_cmd_execute(dmub); - if (status != DMUB_STATUS_OK) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); - } -} - void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub = dc_dmub_srv->dmub; @@ -159,22 +118,55 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, } } -bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd) +bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { + return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); +} + +bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; struct dmub_srv *dmub; enum dmub_status status; + int i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dmub = dc_dmub_srv->dmub; - status = dmub_srv_cmd_with_reply_data(dmub, cmd); + for (i = 0 ; i < count; i++) { + // Queue command + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + return false; + } + } + + status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { - DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } + // Wait for DMUB to process command + if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { + status = dmub_srv_wait_for_idle(dmub, 100000); + + if (status != DMUB_STATUS_OK) { + DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + return false; + } + + // Copy data back from ring buffer into command + if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) + dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); + } + return true; } @@ -267,9 +259,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) @@ -283,9 +273,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) @@ -378,21 +366,14 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); // Send the command to the DMCUB. - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } -void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) +void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) { union dmub_rb_cmd cmd = { 0 }; - enum dmub_status status; - - if (!dmub) { - return; - } memset(&cmd, 0, sizeof(cmd)); @@ -402,15 +383,10 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) cmd.query_feature_caps.header.ret_status = 1; cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); - /* Send command to fw */ - status = dmub_srv_cmd_with_reply_data(dmub, &cmd); - - ASSERT(status == DMUB_STATUS_OK); - /* If command was processed, copy feature caps to dmub srv */ - if (status == DMUB_STATUS_OK && + if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_feature_caps.header.ret_status == 0) { - memcpy(&dmub->feature_caps, + memcpy(&dc_dmub_srv->dmub->feature_caps, &cmd.query_feature_caps.query_feature_caps_data, sizeof(struct dmub_feature_caps)); } @@ -419,7 +395,6 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) { union dmub_rb_cmd cmd = { 0 }; - enum dmub_status status; unsigned int panel_inst = 0; dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst); @@ -433,13 +408,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; - // Send command to fw - status = dmub_srv_cmd_with_reply_data(dc->ctx->dmub_srv->dmub, &cmd); - - ASSERT(status == DMUB_STATUS_OK); - // If command was processed, copy feature caps to dmub srv - if (status == DMUB_STATUS_OK && + if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.visual_confirm_color.header.ret_status == 0) { memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, &cmd.visual_confirm_color.visual_confirm_color_data, @@ -797,9 +767,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; } - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) @@ -982,14 +951,6 @@ static void dc_build_cursor_update_payload0( payload->panel_inst = panel_inst; } -static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv, - union dmub_rb_cmd *cmd) -{ - dc_dmub_srv_cmd_queue(dmub_srv, cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); -} - static void dc_build_cursor_position_update_payload0( struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, const struct hubp *hubp, const struct dpp *dpp) @@ -1032,9 +993,11 @@ static void dc_build_cursor_attribute_update_payload1( void dc_send_update_cursor_info_to_dmu( struct pipe_ctx *pCtx, uint8_t pipe_idx) { - union dmub_rb_cmd cmd = { 0 }; - union dmub_cmd_update_cursor_info_data *update_cursor_info = - &cmd.update_cursor_info.update_cursor_info_data; + union dmub_rb_cmd cmd[2]; + union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = + &cmd[0].update_cursor_info.update_cursor_info_data; + + memset(cmd, 0, sizeof(cmd)); if (!dc_dmub_should_update_cursor_data(pCtx)) return; @@ -1051,31 +1014,28 @@ void dc_send_update_cursor_info_to_dmu( { /* Build Payload#0 Header */ - cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; - cmd.update_cursor_info.header.payload_bytes = - sizeof(cmd.update_cursor_info.update_cursor_info_data); - cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */ + cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; + cmd[0].update_cursor_info.header.payload_bytes = + sizeof(cmd[0].update_cursor_info.update_cursor_info_data); + cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd /* Prepare Payload */ - dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0); + dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0); - dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx, + dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); - /* Send update_curosr_info to queue */ - dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd); - } + } { /* Build Payload#1 Header */ - memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data)); - cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; - cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); - cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */ + cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; + cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); + cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. dc_build_cursor_attribute_update_payload1( - &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, + &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); /* Combine 2nd cmds update_curosr_info to DMU */ - dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd); + dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index d34f5563df2e..22f7b2704c8e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -26,7 +26,7 @@ #ifndef _DMUB_DC_SRV_H_ #define _DMUB_DC_SRV_H_ -#include "os_types.h" +#include "dm_services_types.h" #include "dmub/dmub_srv.h" struct dmub_srv; @@ -52,16 +52,13 @@ struct dc_dmub_srv { void *dm; }; -void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, - union dmub_rb_cmd *cmd); - -void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv); - void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); -bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd); +bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); + +bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type); bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, unsigned int stream_mask); @@ -77,7 +74,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst); bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context); -void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub); +void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv); void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx); void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index f43cce16bb6c..a21948267c0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -41,19 +41,13 @@ static inline void submit_dmub_read_modify_write( const struct dc_context *ctx) { struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; - bool gather = false; offload->should_burst_write = (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); cmd_buf->header.payload_bytes = sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; - - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); - - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); @@ -66,17 +60,11 @@ static inline void submit_dmub_burst_write( const struct dc_context *ctx) { struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; - bool gather = false; cmd_buf->header.payload_bytes = sizeof(uint32_t) * offload->reg_seq_count; - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; - - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); - - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); @@ -88,17 +76,11 @@ static inline void submit_dmub_reg_wait( const struct dc_context *ctx) { struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; - bool gather = false; - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; - - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); offload->reg_seq_count = 0; - - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; } struct dc_reg_value_masks { @@ -151,7 +133,6 @@ static void dmub_flush_buffer_execute( const struct dc_context *ctx) { submit_dmub_read_modify_write(offload, ctx); - dc_dmub_srv_cmd_execute(ctx->dmub_srv); } static void dmub_flush_burst_write_buffer_execute( @@ -159,7 +140,6 @@ static void dmub_flush_burst_write_buffer_execute( const struct dc_context *ctx) { submit_dmub_burst_write(offload, ctx); - dc_dmub_srv_cmd_execute(ctx->dmub_srv); } static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, @@ -691,8 +671,6 @@ void reg_sequence_start_execute(const struct dc_context *ctx) default: return; } - - dc_dmub_srv_cmd_execute(ctx->dmub_srv); } } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 4055d271ac57..e152c68edfd1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -75,9 +75,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dmub_abm_init(struct abm *abm, uint32_t backlight) @@ -156,9 +154,7 @@ bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask) cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -180,9 +176,7 @@ void dmub_abm_set_ambient_level(struct abm *abm, unsigned int ambient_lux, uint8 cmd.abm_set_ambient_level.abm_set_ambient_level_data.panel_mask = panel_mask; cmd.abm_set_ambient_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_ambient_level_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } #endif @@ -212,9 +206,7 @@ void dmub_abm_init_config(struct abm *abm, cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } @@ -231,9 +223,7 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -253,9 +243,7 @@ bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -277,9 +265,7 @@ bool dmub_abm_set_backlight_level(struct abm *abm, cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 3f32e9c3fbaf..2aa0e01a6891 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -47,9 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv, if (!lock) cmd.lock_hw.lock_hw_data.should_release = 1; - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c index fff1d07d865d..d8009b2dc56a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c @@ -48,7 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv) sizeof(cmd.outbox1_enable.header); cmd.outbox1_enable.enable = true; - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 9705d8f88382..4000a834592c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -168,9 +168,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state * cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst; cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -198,9 +196,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8 cmd.psr_enable.header.payload_bytes = 0; // Send header only - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Below loops 1000 x 500us = 500 ms. * Exit PSR may need to wait 1-2 frames to power up. Timeout after at @@ -248,9 +244,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_ cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -269,9 +263,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub, cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle; cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -290,9 +282,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -422,9 +412,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->relock_delay_frame_cnt = 2; copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -445,9 +433,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst) cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC; cmd.psr_enable.header.payload_bytes = 0; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 58e459c7e7d3..f976fac8dc3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -667,7 +667,6 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip static void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs) { - struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv; struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); union dmub_rb_cmd cmd; @@ -690,11 +689,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp, cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid; PERF_TRACE(); // TODO: remove after performance is stable. - dc_dmub_srv_cmd_queue(dmcub, &cmd); - PERF_TRACE(); // TODO: remove after performance is stable. - dc_dmub_srv_cmd_execute(dmcub); - PERF_TRACE(); // TODO: remove after performance is stable. - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); PERF_TRACE(); // TODO: remove after performance is stable. } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 1c6477d73c8e..55a464a39529 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -152,9 +152,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -173,9 +171,7 @@ static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 8263a07f265f..3303c9aae068 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -632,7 +632,7 @@ void dcn30_init_hw(struct dc *dc) dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); // Get DMCUB capabilities - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } @@ -736,8 +736,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ; cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -859,9 +858,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.cursor_height = cursor_attr.height; cmd.mall.cursor_pitch = cursor_attr.pitch; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Use copied cursor, and it's okay to not switch back */ cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part; @@ -877,8 +874,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.tmr_scale = tmr_scale; cmd.mall.debug_bits = dc->debug.mall_error_as_fatal; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -895,9 +891,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 745a5d187a98..bd62502380d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -117,7 +117,6 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc, union dmub_rb_cmd *cmd) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; memset(cmd, 0, sizeof(*cmd)); cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; @@ -126,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc, cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); - if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd)) + if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return false; return true; @@ -425,7 +424,6 @@ static bool link_dpia_control(struct dc_context *dc_ctx, struct dmub_cmd_dig_dpia_control_data *dpia_control) { union dmub_rb_cmd cmd; - struct dc_dmub_srv *dmub = dc_ctx->dmub_srv; memset(&cmd, 0, sizeof(cmd)); @@ -438,9 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx, cmd.dig1_dpia_control.dpia_control = *dpia_control; - dc_dmub_srv_cmd_queue(dmub, &cmd); - dc_dmub_srv_cmd_execute(dmub); - dc_dmub_srv_wait_idle(dmub); + dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 62ce36c75c4d..e0c74868d2ee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -297,7 +297,7 @@ void dcn31_init_hw(struct dc *dc) #endif // Get DMCUB capabilities - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; } @@ -442,9 +442,7 @@ void dcn31_z10_save_init(struct dc *dc) cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_z10_restore(const struct dc *dc) @@ -462,9 +460,7 @@ void dcn31_z10_restore(const struct dc *dc) cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 11ea9d13e312..217acd4e292a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -52,7 +52,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data); cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst; - return dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd); + return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); } static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) @@ -85,7 +85,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; cmd.panel_cntl.data.bl_pwm_ref_div2 = panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2; - if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) + if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return 0; panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 40c488b26901..6fb3f64e3057 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -417,9 +417,7 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool cmd.domain_control.data.inst = hubp_inst; cmd.domain_control.data.power_gate = !power_on; - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(ctx->dmub_srv); - dc_dmub_srv_wait_idle(ctx->dmub_srv); + dm_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); PERF_TRACE(); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 26791e3d162f..9ce11ed769a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -274,8 +274,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -309,8 +308,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); cmd.cab.cab_alloc_ways = ways; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -326,9 +324,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -946,7 +942,7 @@ void dcn32_init_hw(struct dc *dc) // Get DMCUB capabilities if (dc->ctx->dmub_srv) { - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 9a3f2a44f882..d33d595405a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -40,6 +40,7 @@ struct dmub_srv; struct dc_dmub_srv; +union dmub_rb_cmd; irq_handler_idx dm_register_interrupt( struct dc_context *ctx, @@ -273,6 +274,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc #define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__, CTX) #define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX) +/* + * DMUB Interfaces + */ +bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); +bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); + /* * Debug and verification hooks */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index b52ba6ffabe1..facf269c4326 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -269,4 +269,10 @@ struct dtn_min_clk_info { uint32_t min_memory_clock_khz; }; +enum dm_dmub_wait_type { + DM_DMUB_WAIT_TYPE_NO_WAIT, + DM_DMUB_WAIT_TYPE_WAIT, + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY, +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 2914fca7dab3..5ff5d4e64902 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1394,7 +1394,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( link->dc, link->link_enc->transmitter); - if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && + if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.cable_id.header.ret_status == 1) { cable_id->raw = cmd.cable_id.data.output_raw; DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 4626fabc0a96..0bb749133909 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -90,7 +90,7 @@ bool dpia_query_hpd_status(struct dc_link *link) cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; /* Return HPD status reported by DMUB if query successfully executed. */ - if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) + if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) is_hpd_high = cmd.query_hpd.data.result; DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", -- cgit v1.2.3 From 7052a801d6bc8cd203e1708313e4996630208a6e Mon Sep 17 00:00:00 2001 From: Michael Mityushkin Date: Thu, 30 Mar 2023 12:46:58 -0400 Subject: drm/amd/display: Correct output color space during HW reinitialize [Why] Doing core_link_disable_stream or set_dpms_off when reinitializing hardware causes issue to repro with external display connected. This is unnecessary, blanking pixel data should be sufficient. [How] Call disable_pixel_data while reinitializing hardware instead of core_link_disable_stream or set_dpms_off. Reviewed-by: Nicholas Kazlauskas Acked-by: Qingqing Zhuo Signed-off-by: Michael Mityushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 4 ++++ drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c | 1 + drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c | 1 + 3 files changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 422fbf79da64..5800acf6aae1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -313,6 +313,10 @@ void dcn20_init_blank( } opp = dc->res_pool->opps[opp_id_src0]; + /* don't override the blank pattern if already enabled with the correct one. */ + if (opp->funcs->dpg_is_blanked && opp->funcs->dpg_is_blanked(opp)) + return; + if (num_opps == 2) { otg_active_width = otg_active_width / 2; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 3a32810bbe38..8598ea233ef3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -58,6 +58,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 5267e901a35c..ce53339b2e10 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -60,6 +60,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, -- cgit v1.2.3 From 4335077a76095ff75dc0ffb031aeae93f9f5e80f Mon Sep 17 00:00:00 2001 From: Meenakshikumar Somasundaram Date: Wed, 12 Jan 2022 19:58:04 -0500 Subject: drm/amd/display: Adjust dmub outbox notification enable [Why] Currently driver enables dmub outbox notification before oubox ISR is registered. During boot scenario, sometimes dmub issues hpd outbox message before driver registers ISR and those messages are missed. [How] Enable dmub outbox notification after outbox ISR is registered. Also, restructured outbox enable code to call from dm layer and renamed APIs. Reviewed-by: Rodrigo Siqueira Signed-off-by: Meenakshikumar Somasundaram Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index e0c74868d2ee..890268d95495 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -197,10 +197,6 @@ void dcn31_init_hw(struct dc *dc) } } - /* Enables outbox notifications for usb4 dpia */ - if (dc->res_pool->usb4_dpia_count) - dmub_enable_outbox_notification(dc->ctx->dmub_srv); - /* we want to turn off all dp displays before doing detection */ dc->link_srv->blank_all_dp_displays(dc); -- cgit v1.2.3 From 4f63b7a59926eb7fb50091e796170a10a8ef4091 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 6 Apr 2023 17:08:53 -0400 Subject: drm/amd/display: Add FAMS capability to DCN31 DCN31 supports FAMS, but this was not correctly set to the hardware setup sequence. This commit fixes this issue by setting the MCLK switch capability based on the feature capability retrieved from the DMUB. Reviewed-by: Rodrigo Siqueira Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 890268d95495..55494730e500 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -295,6 +295,7 @@ void dcn31_init_hw(struct dc *dc) // Get DMCUB capabilities dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } void dcn31_dsc_pg_control( -- cgit v1.2.3 From 9ba90d760e9354c124fa9bbea08017d96699a82c Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Fri, 9 Dec 2022 15:13:18 -0500 Subject: drm/amd/display: add pixel rate based CRB allocation support This feature is meant to unblock PSTATE for certain high end display configs on dcn315. This is achieved by allocating CRB to detile buffer based on display requirements to meet pstate latency hiding needs. Tested-by: Daniel Wheeler Reviewed-by: Charlene Liu Acked-by: Rodrigo Siqueira Signed-off-by: Dmytro Laktyushkin Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c | 1 + .../drm/amd/display/dc/dcn315/dcn315_resource.c | 97 +++++++++++++++++++++- .../gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 25 +++++- .../gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h | 3 + .../amd/display/dc/dml/dcn31/display_mode_vba_31.c | 39 ++++++--- .../gpu/drm/amd/display/dc/dml/display_mode_vba.c | 6 ++ 6 files changed, 154 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 7e7cd5b64e6a..7445ed27852a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -103,6 +103,7 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne default: break; } + DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments); /* Should never be hit, if it is we have an erroneous hw config*/ ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 41c972c8eb19..42a0157fd813 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -136,6 +136,9 @@ #define DCN3_15_MAX_DET_SIZE 384 #define DCN3_15_CRB_SEGMENT_SIZE_KB 64 +#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB) +/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */ +#define MIN_RESERVED_DET_SEGS 2 enum dcn31_clk_src_array_id { DCN31_CLK_SRC_PLL0, @@ -1636,21 +1639,57 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } +static int source_format_to_bpp (enum source_format_class SourcePixelFormat) +{ + if (SourcePixelFormat == dm_444_64) + return 8; + else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) + return 2; + else if (SourcePixelFormat == dm_444_8) + return 1; + else if (SourcePixelFormat == dm_rgbe_alpha) + return 5; + else if (SourcePixelFormat == dm_420_8) + return 3; + else if (SourcePixelFormat == dm_420_12) + return 6; + else + return 4; +} + +static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) +{ + int i; + struct resource_context *res_ctx = &context->res_ctx; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!res_ctx->pipe_ctx[i].stream) + continue; + + /*Don't apply if MPO to avoid transition issues*/ + if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state) + return false; + } + return true; +} + static int dcn315_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) { - int i, pipe_cnt; + int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB; + int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB; + bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); DC_FP_START(); dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); DC_FP_END(); - for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; if (!res_ctx->pipe_ctx[i].stream) @@ -1671,6 +1710,23 @@ static int dcn315_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.dsc_input_bpc = 0; DC_FP_START(); dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); + if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) { + int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format); + /* Ceil to crb segment size */ + int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( + &context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB); + if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) { + bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS; + split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc); + split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + if (split_required) + approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2; + pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate; + remaining_det_segs -= approx_det_segs_required_for_pstate; + } else + remaining_det_segs = -1; + crb_pipes++; + } DC_FP_END(); if (pipes[pipe_cnt].dout.dsc_enable) { @@ -1689,16 +1745,49 @@ static int dcn315_populate_dml_pipes_from_context( break; } } - pipe_cnt++; } + /* Spread remaining unreserved crb evenly among all pipes, use default policy if not enough det or single pipe */ + if (pixel_rate_crb) { + for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &res_ctx->pipe_ctx[i]; + if (!pipe->stream) + continue; + + if (!pipe->top_pipe && !pipe->prev_odm_pipe) { + bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) + || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + + if (remaining_det_segs < 0 || crb_pipes == 1) + pipes[pipe_cnt].pipe.src.det_size_override = 0; + if (remaining_det_segs > MIN_RESERVED_DET_SEGS) + pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + + (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); + if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { + /* Clamp to 2 pipe split max det segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); + pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; + } + if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { + /* If we are splitting we must have an even number of segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; + pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; + } + /* Convert segments into size for DML use */ + pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; + crb_idx++; + } + pipe_cnt++; + } + } + if (pipe_cnt) context->bw_ctx.dml.ip.det_buffer_size_kbytes = (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB; if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE) context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE; - ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE); + dc->config.enable_4to1MPC = false; if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { if (is_dual_plane(pipe->plane_state->format) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 59836570603a..19d034341e64 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp( int pipe_cnt, int vlevel) { - int i, pipe_idx, active_hubp_count = 0; + int i, pipe_idx, total_det = 0, active_hubp_count = 0; double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; dc_assert_fp_enabled(); @@ -563,6 +563,18 @@ void dcn31_calculate_wm_and_dlg_fp( if (context->res_ctx.pipe_ctx[i].stream) context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0; } + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = + get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384) + context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2; + total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb; + pipe_idx++; + } + context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det; } void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -815,3 +827,14 @@ int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc) { return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0); } + +int dcn_get_approx_det_segs_required_for_pstate( + struct _vcs_dpi_soc_bounding_box_st *soc, + int pix_clk_100hz, int bpp, int seg_size_kb) +{ + /* Roughly calculate required crb to hide latency. In practice there is slightly + * more buffer available for latency hiding + */ + return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp + / 10240000 + seg_size_kb - 1) / seg_size_kb; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index 687d3522cc33..8f9c8faed260 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -47,6 +47,9 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc); +int dcn_get_approx_det_segs_required_for_pstate( + struct _vcs_dpi_soc_bounding_box_st *soc, + int pix_clk_100hz, int bpp, int seg_size_kb); int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 330b089d6a86..01603abd75bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -532,7 +532,8 @@ static void CalculateStutterEfficiency( static void CalculateSwathAndDETConfiguration( bool ForceSingleDPP, int NumberOfActivePlanes, - unsigned int DETBufferSizeInKByte, + bool DETSharedByAllDPP, + unsigned int DETBufferSizeInKByte[], double MaximumSwathWidthLuma[], double MaximumSwathWidthChroma[], enum scan_direction_class SourceScan[], @@ -3118,7 +3119,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SurfaceWidthC[k], v->SurfaceHeightY[k], v->SurfaceHeightC[k], - v->DETBufferSizeInKByte[0] * 1024, + v->DETBufferSizeInKByte[k] * 1024, v->BlockHeight256BytesY[k], v->BlockHeight256BytesC[k], v->SurfaceTiling[k], @@ -3313,7 +3314,8 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib) CalculateSwathAndDETConfiguration( false, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte[0], + mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0], + v->DETBufferSizeInKByte, dummy1, dummy2, v->SourceScan, @@ -3779,14 +3781,16 @@ static noinline void CalculatePrefetchSchedulePerPlane( &v->VReadyOffsetPix[k]); } -static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte) +static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[]) { int i, total_pipes = 0; for (i = 0; i < NumberOfActivePlanes; i++) total_pipes += NoOfDPPThisState[i]; - *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64; - if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE) - *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE; + DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64; + if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE) + DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE; + for (i = 1; i < NumberOfActivePlanes; i++) + DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0]; } @@ -4026,7 +4030,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l CalculateSwathAndDETConfiguration( true, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte[0], + mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0], + v->DETBufferSizeInKByte, v->MaximumSwathWidthLuma, v->MaximumSwathWidthChroma, v->SourceScan, @@ -4166,6 +4171,10 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l || (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) { v->DISPCLK_DPPCLK_Support[i][j] = false; } + if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) { + v->MPCCombine[i][j][k] = true; + v->NoOfDPP[i][j][k] = 2; + } } v->TotalNumberOfActiveDPP[i][j] = 0; v->TotalNumberOfSingleDPPPlanes[i][j] = 0; @@ -4642,12 +4651,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k]; } - if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315) - PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]); + if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0]) + PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte); CalculateSwathAndDETConfiguration( false, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte[0], + mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0], + v->DETBufferSizeInKByte, v->MaximumSwathWidthLuma, v->MaximumSwathWidthChroma, v->SourceScan, @@ -6611,7 +6621,8 @@ static void CalculateStutterEfficiency( static void CalculateSwathAndDETConfiguration( bool ForceSingleDPP, int NumberOfActivePlanes, - unsigned int DETBufferSizeInKByte, + bool DETSharedByAllDPP, + unsigned int DETBufferSizeInKByteA[], double MaximumSwathWidthLuma[], double MaximumSwathWidthChroma[], enum scan_direction_class SourceScan[], @@ -6695,6 +6706,10 @@ static void CalculateSwathAndDETConfiguration( *ViewportSizeSupport = true; for (k = 0; k < NumberOfActivePlanes; ++k) { + unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k]; + + if (DETSharedByAllDPP && DPPPerPlane[k]) + DETBufferSizeInKByte /= DPPPerPlane[k]; if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16 || SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) { if (SurfaceTiling[k] == dm_sw_linear diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index f9653f511baa..2f63ae954826 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -571,6 +571,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate; mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy; mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override; + if (src->det_size_override) + mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override; + else + mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes; //TODO: Need to assign correct values to dp_multistream vars mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en; mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id; @@ -785,6 +789,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.pipe_plane[k] = mode_lib->vba.NumberOfActivePlanes; mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++; + if (src_k->det_size_override) + mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override; if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] == dm_horz) { mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] += -- cgit v1.2.3 From d205a800a66e46430ab93c0d450393233d39931a Mon Sep 17 00:00:00 2001 From: "Leo (Hanghong) Ma" Date: Wed, 12 Apr 2023 14:02:01 -0400 Subject: drm/amd/display: Add visual confirm color support for MCLK switch [Why && How] We would like to have visual confirm color support for MCLK switch. 1. Set visual confirm color to yellow: Vblank MCLK switch. 2. Set visual confirm color to cyan: FPO + Vblank MCLK switch. 3. Set visual confirm color to pink: Vactive MCLK switch. Reviewed-by: Jun Lei Acked-by: Aurabindo Pillai Signed-off-by: Leo (Hanghong) Ma Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 47 +++++++++++++++++--- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 50 ++++++++++++++++++++-- drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 22 +++------- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | 1 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 26 +---------- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h | 5 --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c | 4 +- .../gpu/drm/amd/display/dc/dcn201/dcn201_init.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn301/dcn301_init.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn314/dcn314_init.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 2 +- .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 7 +++ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 + drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 9 +++- 19 files changed, 125 insertions(+), 65 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c26dfdd48dd9..2c639cd346d7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1119,6 +1119,33 @@ static void phantom_pipe_blank( hws->funcs.wait_for_blank_complete(opp); } +static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +{ + if (dc->ctx->dce_version >= DCN_VERSION_1_0) { + memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) + get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) + get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) + get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else { + if (dc->ctx->dce_version < DCN_VERSION_2_0) + color_space_to_black_color( + dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); + } + if (dc->ctx->dce_version >= DCN_VERSION_2_0) { + if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) + get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) + get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) + get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + } + } +} + static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; @@ -1189,6 +1216,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + if (dc->hwss.apply_ctx_for_surface) { apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); @@ -3456,6 +3486,14 @@ static void commit_planes_for_stream(struct dc *dc, } } + if (dc->debug.visual_confirm) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + } + if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; struct pipe_ctx *odm_pipe; @@ -3539,15 +3577,14 @@ static void commit_planes_for_stream(struct dc *dc, for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP && + if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || + dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && pipe_ctx->stream && pipe_ctx->plane_state) { - /* Only update visual confirm for SUBVP here. + /* Only update visual confirm for SUBVP and Mclk switching here. * The bar appears on all pipes, so we need to update the bar on all displays, * so the information doesn't get stale. */ - struct mpcc_blnd_cfg blnd_cfg = { 0 }; - - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, pipe_ctx->plane_res.hubp->inst); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 2acbf692193f..8a98b8dd008e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -421,6 +421,7 @@ void get_hdr_visual_confirm_color( void get_subvp_visual_confirm_color( struct dc *dc, + struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color) { @@ -428,15 +429,17 @@ void get_subvp_visual_confirm_color( bool enable_subvp = false; int i; - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx) + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context) return; for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.paired_stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { /* SubVP enable - red */ + color->color_g_y = 0; + color->color_b_cb = 0; color->color_r_cr = color_value; enable_subvp = true; @@ -448,12 +451,51 @@ void get_subvp_visual_confirm_color( if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) { color->color_r_cr = 0; - if (pipe_ctx->stream->ignore_msa_timing_param == 1) + if (pipe_ctx->stream->allow_freesync == 1) { /* SubVP enable and DRR on - green */ + color->color_b_cb = 0; color->color_g_y = color_value; - else + } else { /* SubVP enable and No DRR - blue */ + color->color_g_y = 0; + color->color_b_cb = color_value; + } + } +} + +void get_mclk_switch_visual_confirm_color( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) + return; + + if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != + dm_dram_clock_change_unsupported) { + /* MCLK switching is supported */ + if (!pipe_ctx->has_vactive_margin) { + /* In Vblank - yellow */ + color->color_r_cr = color_value; + color->color_g_y = color_value; + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + /* FPO + Vblank - cyan */ + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = color_value; + } + } else { + /* In Vactive - pink */ + color->color_r_cr = color_value; color->color_b_cb = color_value; + } + /* SubVP */ + get_subvp_visual_confirm_color(dc, context, pipe_ctx, color); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1ebb8d3573f4..8be2e6d6d888 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -426,6 +426,7 @@ enum visual_confirm { VISUAL_CONFIRM_FAMS = 7, VISUAL_CONFIRM_SWIZZLE = 9, VISUAL_CONFIRM_SUBVP = 14, + VISUAL_CONFIRM_MCLK_SWITCH = 16, }; enum dc_psr_power_opts { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a7ad1d7bc43e..905246a2ece4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2602,23 +2602,15 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } -void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) +void dcn10_update_visual_confirm_color(struct dc *dc, + struct pipe_ctx *pipe_ctx, + int mpcc_id) { struct mpc *mpc = dc->res_pool->mpc; - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) - get_hdr_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) - get_surface_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) - get_surface_tile_visual_confirm_color(pipe_ctx, color); - else - color_space_to_black_color( - dc, pipe_ctx->stream->output_color_space, color); - if (mpc->funcs->set_bg_color) { - memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color)); - mpc->funcs->set_bg_color(mpc, color, mpcc_id); + memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color)); + mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id); } } @@ -2671,7 +2663,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update) { mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); return; } @@ -2693,7 +2685,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) NULL, hubp->inst, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); ASSERT(new_mpcc != NULL); hubp->opp_id = pipe_ctx->stream_res.opp->inst; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 0ef7bf7ddb75..ef6d56da417c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -202,7 +202,6 @@ void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits); void dcn10_update_visual_confirm_color( struct dc *dc, struct pipe_ctx *pipe_ctx, - struct tg_color *color, int mpcc_id); #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index f49c1c0d6274..b3e187b1347d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2580,28 +2580,6 @@ void dcn20_reset_hw_ctx_wrap( } } -void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) -{ - struct mpc *mpc = dc->res_pool->mpc; - - // input to MPCC is always RGB, by default leave black_color at 0 - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) - get_hdr_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) - get_surface_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) - get_mpctree_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) - get_surface_tile_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) - get_subvp_visual_confirm_color(dc, pipe_ctx, color); - - if (mpc->funcs->set_bg_color) { - memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color)); - mpc->funcs->set_bg_color(mpc, color, mpcc_id); - } -} - void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -2657,7 +2635,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) if (!pipe_ctx->plane_state->update_flags.bits.full_update && !pipe_ctx->update_flags.bits.mpcc) { mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); return; } @@ -2679,7 +2657,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) NULL, hubp->inst, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); ASSERT(new_mpcc != NULL); hubp->opp_id = pipe_ctx->stream_res.opp->inst; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 33a36c02b2f8..01901b08644c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -150,10 +150,5 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc, const struct tg_color *solid_color, int width, int height, int offset); -void dcn20_update_visual_confirm_color(struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct tg_color *color, - int mpcc_id); - #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 7c5817c426fa..4192c522e59a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -102,7 +102,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn20_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c index 1aeb04fbd89d..75472d53ff52 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c @@ -496,7 +496,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update) { - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); return; } @@ -521,7 +521,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) dc->res_pool->mpc, mpcc_id); /* Call MPC to insert new plane */ - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, mpc_tree_params, &blnd_cfg, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c index 9c16633e473a..92dd4cddbab8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c @@ -91,7 +91,7 @@ static const struct hw_sequencer_funcs dcn201_funcs = { .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn201_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index fe1a8e2e08ef..8b58ce1db035 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .is_abm_supported = dcn21_is_abm_supported, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn21_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 3216d10c58ba..18e94d8ae54f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, .is_abm_supported = dcn21_is_abm_supported }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c index 6192851c59ed..257df8660b4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c @@ -107,7 +107,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn301_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 8598ea233ef3..ba9e7dee6e5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -110,7 +110,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn31_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index ed8a1b94c006..7a28c7bb25d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -112,7 +112,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn314_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 8085f2acb1a9..24a890d879b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -109,7 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 8c60b88c7d1a..d8d8fcd5ef1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1324,6 +1324,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, int i, pipe_idx, active_hubp_count = 0; bool usr_retraining_support = false; bool unbounded_req_enabled = false; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; dc_assert_fp_enabled(); @@ -1405,6 +1406,11 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0) + context->res_ctx.pipe_ctx[i].has_vactive_margin = true; + else + context->res_ctx.pipe_ctx[i].has_vactive_margin = false; + /* MALL Allocation Sizes */ /* count from active, top pipes per plane only */ if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state && @@ -2015,6 +2021,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; pstate_en = true; + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank; } else { /* Restore FCLK latency and re-run validation to go back to original validation * output if we find that enabling FPO does not give us any benefit (i.e. lower diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 2eb597a24425..b4c1cc6dc857 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -426,6 +426,8 @@ struct pipe_ctx { struct dwbc *dwbc; struct mcif_wb *mcif_wb; union pipe_update_flags update_flags; + struct tg_color visual_confirm_color; + bool has_vactive_margin; }; /* Data used for dynamic link encoder assignment. diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 88ac723d10aa..df160c6a630c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -257,7 +257,6 @@ struct hw_sequencer_funcs { void (*update_visual_confirm_color)(struct dc *dc, struct pipe_ctx *pipe_ctx, - struct tg_color *color, int mpcc_id); void (*update_phantom_vp_position)(struct dc *dc, @@ -294,6 +293,7 @@ void get_surface_visual_confirm_color( void get_subvp_visual_confirm_color( struct dc *dc, + struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); @@ -306,4 +306,11 @@ void get_mpctree_visual_confirm_color( void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); + +void get_mclk_switch_visual_confirm_color( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx, + struct tg_color *color); + #endif /* __DC_HW_SEQUENCER_H__ */ -- cgit v1.2.3 From 25879d7b4986beba3f0d84762fe40d09fdc8b219 Mon Sep 17 00:00:00 2001 From: Qingqing Zhuo Date: Thu, 16 Mar 2023 09:05:58 -0400 Subject: drm/amd/display: Clean FPGA code in dc [Why] Drop dead code for Linux. [How] Remove all IS_FPGA_MAXIMUS_DC and IS_DIAG_DC Reviewed-by: Ariel Bernstein Acked-by: Tom Chung Signed-off-by: Qingqing Zhuo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c | 20 +- .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c | 10 +- .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 58 +++--- .../amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c | 22 +-- .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 73 ++++--- .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c | 17 +- .../amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c | 32 ++- .../drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c | 50 ++--- .../amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c | 38 ++-- .../amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c | 38 ++-- .../amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c | 38 ++-- .../amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 49 ++--- .../amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 1 - drivers/gpu/drm/amd/display/dc/core/dc.c | 40 ++-- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 30 --- drivers/gpu/drm/amd/display/dc/dc_helper.c | 6 +- drivers/gpu/drm/amd/display/dc/dc_types.h | 7 - .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 38 ---- drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 8 +- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 3 - .../amd/display/dc/dce112/dce112_hw_sequencer.c | 3 - .../amd/display/dc/dce120/dce120_hw_sequencer.c | 3 - .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 126 +++++------- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 8 +- .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 5 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 51 +++-- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 4 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 7 +- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 15 +- .../gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c | 79 +++----- .../gpu/drm/amd/display/dc/dcn201/dcn201_optc.c | 7 +- .../drm/amd/display/dc/dcn201/dcn201_resource.c | 5 +- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 4 - .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 10 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 54 ++--- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c | 4 - drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c | 7 +- .../gpu/drm/amd/display/dc/dcn30/dcn30_resource.c | 8 +- .../drm/amd/display/dc/dcn301/dcn301_resource.c | 10 +- .../drm/amd/display/dc/dcn302/dcn302_resource.c | 5 +- .../drm/amd/display/dc/dcn303/dcn303_resource.c | 5 +- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c | 106 ++++------ drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c | 4 - .../gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 17 +- .../gpu/drm/amd/display/dc/dcn314/dcn314_init.c | 4 - .../drm/amd/display/dc/dcn314/dcn314_resource.c | 10 +- .../drm/amd/display/dc/dcn315/dcn315_resource.c | 17 +- .../drm/amd/display/dc/dcn316/dcn316_resource.c | 17 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 4 - .../gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 13 +- .../drm/amd/display/dc/dcn321/dcn321_resource.c | 13 +- .../gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c | 70 ++++--- .../gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 219 ++++++++++---------- .../display/dc/dml/dcn31/display_rq_dlg_calc_31.c | 8 - .../gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c | 6 +- .../dc/dml/dcn314/display_rq_dlg_calc_314.c | 8 - .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 120 ++++++----- .../gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c | 120 ++++++----- .../gpu/drm/amd/display/dc/dml/display_mode_lib.c | 1 - .../gpu/drm/amd/display/dc/dml/display_mode_lib.h | 1 - .../amd/display/dc/link/hwss/link_hwss_hpo_dp.c | 83 +------- drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 220 ++++++++++----------- .../display/dc/link/protocols/link_dp_capability.c | 3 +- 63 files changed, 770 insertions(+), 1292 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 934e6423dc1a..1f36ad8a7de4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -111,12 +111,10 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) bp->funcs->set_dce_clock(bp, &dce_clk_params); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_clock / 1000 / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_clock / 1000 / 7); } clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; @@ -153,12 +151,10 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr->dfs_bypass_disp_clk != actual_clock) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_clock / 1000 / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr->dfs_bypass_disp_clk != actual_clock) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_clock / 1000 / 7); } clk_mgr->dfs_bypass_disp_clk = actual_clock; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 450eaead4f20..89b79dd39628 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -135,12 +135,10 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di VBIOSSMC_MSG_SetDispclkFreq, khz_to_mhz_ceil(requested_dispclk_khz)); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_dispclk_set_mhz / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_dispclk_set_mhz / 7); } return actual_dispclk_set_mhz * 1000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 650f3b4b562e..c435f7632e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -531,6 +531,11 @@ void dcn20_clk_mgr_construct( struct pp_smu_funcs *pp_smu, struct dccg *dccg) { + int dprefclk_did; + int target_div; + uint32_t pll_req_reg; + struct fixed31_32 pll_req; + clk_mgr->base.ctx = ctx; clk_mgr->pp_smu = pp_smu; clk_mgr->base.funcs = &dcn2_funcs; @@ -547,42 +552,34 @@ void dcn20_clk_mgr_construct( clk_mgr->base.dprefclk_khz = 700000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->base.dentist_vco_freq_khz = 3850000; + /* DFS Slice 2 should be used for DPREFCLK */ + dprefclk_did = REG_READ(CLK3_CLK2_DFS_CNTL); + /* Convert DPREFCLK DFS Slice DID to actual divider */ + target_div = dentist_get_divider_from_did(dprefclk_did); + /* get FbMult value */ + pll_req_reg = REG_READ(CLK3_CLK_PLL_REQ); - } else { - /* DFS Slice 2 should be used for DPREFCLK */ - int dprefclk_did = REG_READ(CLK3_CLK2_DFS_CNTL); - /* Convert DPREFCLK DFS Slice DID to actual divider*/ - int target_div = dentist_get_divider_from_did(dprefclk_did); - - /* get FbMult value */ - uint32_t pll_req_reg = REG_READ(CLK3_CLK_PLL_REQ); - struct fixed31_32 pll_req; - - /* set up a fixed-point number - * this works because the int part is on the right edge of the register - * and the frac part is on the left edge - */ + /* set up a fixed-point number + * this works because the int part is on the right edge of the register + * and the frac part is on the left edge + */ - pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int); - pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac; + pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int); + pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac; - /* multiply by REFCLK period */ - pll_req = dc_fixpt_mul_int(pll_req, 100000); + /* multiply by REFCLK period */ + pll_req = dc_fixpt_mul_int(pll_req, 100000); - /* integer part is now VCO frequency in kHz */ - clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); + /* integer part is now VCO frequency in kHz */ + clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3850000; + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3850000; - /* Calculate the DPREFCLK in kHz.*/ - clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; - } + /* Calculate the DPREFCLK in kHz.*/ + clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; //Integrated_info table does not exist on dGPU projects so should not be referenced //anywhere in code for dGPUs. //Also there is no plan for now that DFS BYPASS will be used on NV10/12/14. @@ -590,4 +587,3 @@ void dcn20_clk_mgr_construct( dce_clock_read_ss_info(clk_mgr); } - diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index 811720749faf..694fe4271b4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -190,23 +190,17 @@ void dcn201_clk_mgr_construct(struct dc_context *ctx, clk_mgr->dprefclk_ss_divider = 1000; clk_mgr->ss_on_dprefclk = false; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - dcn201_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->base.dprefclk_khz = 600000; - clk_mgr->base.dentist_vco_freq_khz = 3000000; - } else { - clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT); - clk_mgr->base.dprefclk_khz *= 100; + clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT); + clk_mgr->base.dprefclk_khz *= 100; - if (clk_mgr->base.dprefclk_khz == 0) - clk_mgr->base.dprefclk_khz = 600000; + if (clk_mgr->base.dprefclk_khz == 0) + clk_mgr->base.dprefclk_khz = 600000; - REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz); - clk_mgr->base.dentist_vco_freq_khz *= 100000; + REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz); + clk_mgr->base.dentist_vco_freq_khz *= 100000; - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3000000; - } + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3000000; if (!debug->disable_dfs_bypass && bp->integrated_info) if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index bd9fd0b54f46..0c6a4ab72b1d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -705,6 +705,7 @@ void rn_clk_mgr_construct( struct dpm_clocks clock_table = { 0 }; enum pp_smu_status status = 0; int is_green_sardine = 0; + struct clk_log_info log_info = {0}; #if defined(CONFIG_DRM_AMD_DC_FP) is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev); @@ -725,48 +726,41 @@ void rn_clk_mgr_construct( clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - dcn21_funcs.update_clocks = dcn2_update_clocks_fpga; + clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr); + + /* SMU Version 55.51.0 and up no longer have an issue + * that needs to limit minimum dispclk */ + if (clk_mgr->smu_ver >= SMU_VER_55_51_0) + debug->min_disp_clk_khz = 0; + + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); + + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 3600000; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr); - - /* SMU Version 55.51.0 and up no longer have an issue - * that needs to limit minimum dispclk */ - if (clk_mgr->smu_ver >= SMU_VER_55_51_0) - debug->min_disp_clk_khz = 0; - - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3600000; - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { - if (clk_mgr->periodic_retraining_disabled) { - rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; - } else { - if (is_green_sardine) - rn_bw_params.wm_table = lpddr4_wm_table_gs; - else - rn_bw_params.wm_table = lpddr4_wm_table_rn; - } + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { + if (clk_mgr->periodic_retraining_disabled) { + rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; } else { if (is_green_sardine) - rn_bw_params.wm_table = ddr4_wm_table_gs; - else { - if (ctx->dc->config.is_single_rank_dimm) - rn_bw_params.wm_table = ddr4_1R_wm_table_rn; - else - rn_bw_params.wm_table = ddr4_wm_table_rn; - } + rn_bw_params.wm_table = lpddr4_wm_table_gs; + else + rn_bw_params.wm_table = lpddr4_wm_table_rn; + } + } else { + if (is_green_sardine) + rn_bw_params.wm_table = ddr4_wm_table_gs; + else { + if (ctx->dc->config.is_single_rank_dimm) + rn_bw_params.wm_table = ddr4_1R_wm_table_rn; + else + rn_bw_params.wm_table = ddr4_wm_table_rn; } - /* Saved clocks configured at boot for debug purposes */ - rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); } + /* Saved clocks configured at boot for debug purposes */ + rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); clk_mgr->base.dprefclk_khz = 600000; dce_clock_read_ss_info(clk_mgr); @@ -786,9 +780,8 @@ void rn_clk_mgr_construct( } } - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { - /* enable powerfeatures when displaycount goes to 0 */ + /* enable powerfeatures when displaycount goes to 0 */ + if (clk_mgr->smu_ver >= 0x00371500) rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); - } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 27fbe906682f..8c9d45e5b13b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -147,17 +147,14 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis VBIOSSMC_MSG_SetDispclkFreq, khz_to_mhz_ceil(requested_dispclk_khz)); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_dispclk_set_mhz / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_dispclk_set_mhz / 7); } // pmfw always set clock more than or equal requested clock - if (!IS_DIAG_DC(dc->ctx->dce_environment)) - ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz)); + ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz)); return actual_dispclk_set_mhz * 1000; } @@ -221,15 +218,13 @@ void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phy int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz) { int actual_dppclk_set_mhz = -1; - struct dc *dc = clk_mgr->base.ctx->dc; actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDppclkFreq, khz_to_mhz_ceil(requested_dpp_khz)); - if (!IS_DIAG_DC(dc->ctx->dce_environment)) - ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz)); + ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz)); return actual_dppclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 3908e7cfd6cb..3271c8c7905d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -521,6 +521,8 @@ void dcn3_clk_mgr_construct( struct pp_smu_funcs *pp_smu, struct dccg *dccg) { + struct clk_state_registers_and_bypass s = { 0 }; + clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn3_funcs; clk_mgr->regs = &clk_mgr_regs; @@ -537,27 +539,19 @@ void dcn3_clk_mgr_construct( clk_mgr->base.dprefclk_khz = 730000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.funcs = &dcn3_fpga_funcs; - clk_mgr->base.dentist_vco_freq_khz = 3650000; - - } else { - struct clk_state_registers_and_bypass s = { 0 }; + /* integer part is now VCO frequency in kHz */ + clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr); - /* integer part is now VCO frequency in kHz */ - clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3650000; - /* Convert dprefclk units from MHz to KHz */ - /* Value already divided by 10, some resolution lost */ + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3650000; + /* Convert dprefclk units from MHz to KHz */ + /* Value already divided by 10, some resolution lost */ - /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */ - //ASSERT(s.dprefclk != 0); - if (s.dprefclk != 0) - clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; - } + /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */ + //ASSERT(s.dprefclk != 0); + if (s.dprefclk != 0) + clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; clk_mgr->dfs_bypass_enabled = false; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 01383aac6b41..a5489fe6875f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -117,7 +117,7 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base, display_count = vg_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ - if (display_count == 0 && !IS_DIAG_DC(dc->ctx->dce_environment)) { + if (display_count == 0) { union display_idle_optimization_u idle_info = { 0 }; idle_info.idle_info.df_request_disabled = 1; @@ -151,10 +151,8 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -664,6 +662,7 @@ void vg_clk_mgr_construct( struct dccg *dccg) { struct smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &vg_funcs; @@ -703,32 +702,25 @@ void vg_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - vg_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->base.base.dentist_vco_freq_khz = 3600000; - } else { - struct clk_log_info log_info = {0}; + clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base); - clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base); + if (clk_mgr->base.smu_ver) + clk_mgr->base.smu_present = true; - if (clk_mgr->base.smu_ver) - clk_mgr->base.smu_present = true; + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.base.dentist_vco_freq_khz == 0) - clk_mgr->base.base.dentist_vco_freq_khz = 3600000; + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.base.dentist_vco_freq_khz == 0) + clk_mgr->base.base.dentist_vco_freq_khz = 3600000; - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - vg_bw_params.wm_table = lpddr5_wm_table; - } else { - vg_bw_params.wm_table = ddr4_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + vg_bw_params.wm_table = lpddr5_wm_table; + } else { + vg_bw_params.wm_table = ddr4_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); @@ -746,12 +738,6 @@ void vg_clk_mgr_construct( if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, smu_dpm_clks.dpm_clks); -/* - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->base.smu_ver) { - enable powerfeatures when displaycount goes to 0 - dcn301_smu_enable_phy_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); - } -*/ } void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 3c743cd3d3f0..7ccd96959256 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -205,10 +205,8 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -672,6 +670,7 @@ void dcn31_clk_mgr_construct( struct dccg *dccg) { struct dcn31_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn31_funcs; @@ -711,29 +710,22 @@ void dcn31_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base); + clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base); - if (clk_mgr->base.smu_ver) - clk_mgr->base.smu_present = true; + if (clk_mgr->base.smu_ver) + clk_mgr->base.smu_present = true; - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn31_bw_params.wm_table = lpddr5_wm_table; - } else { - dcn31_bw_params.wm_table = ddr5_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn31_bw_params.wm_table = lpddr5_wm_table; + } else { + dcn31_bw_params.wm_table = ddr5_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 2a0c696f5861..2f7c8996b19d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -241,10 +241,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -724,6 +722,7 @@ void dcn314_clk_mgr_construct( struct dccg *dccg) { struct dcn314_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn314_funcs; @@ -763,29 +762,22 @@ void dcn314_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base); + clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base); - if (clk_mgr->base.smu_ver) - clk_mgr->base.smu_present = true; + if (clk_mgr->base.smu_ver) + clk_mgr->base.smu_present = true; - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) - dcn314_bw_params.wm_table = lpddr5_wm_table; - else - dcn314_bw_params.wm_table = ddr5_wm_table; + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) + dcn314_bw_params.wm_table = lpddr5_wm_table; + else + dcn314_bw_params.wm_table = ddr5_wm_table; - /* Saved clocks configured at boot for debug purposes */ - dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); - - } + /* Saved clocks configured at boot for debug purposes */ + dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index 300c6a5872d0..b2c4f97afc8b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -184,12 +184,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) - new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; - if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) - new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; - } + if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) + new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; + if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) + new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -600,6 +598,7 @@ void dcn315_clk_mgr_construct( struct dccg *dccg) { struct dcn315_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn315_funcs; @@ -639,26 +638,19 @@ void dcn315_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base); + clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base); - if (clk_mgr->base.smu_ver > 0) - clk_mgr->base.smu_present = true; - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn315_bw_params.wm_table = lpddr5_wm_table; - } else { - dcn315_bw_params.wm_table = ddr5_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); + if (clk_mgr->base.smu_ver > 0) + clk_mgr->base.smu_present = true; + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn315_bw_params.wm_table = lpddr5_wm_table; + } else { + dcn315_bw_params.wm_table = ddr5_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 538126cefd4d..d7de756301cf 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -207,12 +207,10 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - if (new_clocks->dispclk_khz < 100000) - new_clocks->dispclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; + if (new_clocks->dispclk_khz < 100000) + new_clocks->dispclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -616,6 +614,7 @@ void dcn316_clk_mgr_construct( struct dccg *dccg) { struct dcn316_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn316_funcs; @@ -655,35 +654,27 @@ void dcn316_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - clk_mgr->base.base.dentist_vco_freq_khz = 2500000; - } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base); + clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base); - if (clk_mgr->base.smu_ver > 0) - clk_mgr->base.smu_present = true; + if (clk_mgr->base.smu_ver > 0) + clk_mgr->base.smu_present = true; - // Skip this for now as it did not work on DCN315, renable during bring up - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + // Skip this for now as it did not work on DCN315, renable during bring up + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.base.dentist_vco_freq_khz == 0) - clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */ + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.base.dentist_vco_freq_khz == 0) + clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */ - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn316_bw_params.wm_table = lpddr5_wm_table; - } else { - dcn316_bw_params.wm_table = ddr4_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); - + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn316_bw_params.wm_table = lpddr5_wm_table; + } else { + dcn316_bw_params.wm_table = ddr4_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 1df623b298a9..20bff6a346ba 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -941,7 +941,6 @@ void dcn32_clk_mgr_construct( clk_mgr->base.clks.ref_dtbclk_khz = 268750; } - /* integer part is now VCO frequency in kHz */ clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 9f8efee27721..af929ee11af5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3588,29 +3588,27 @@ static void commit_planes_for_stream(struct dc *dc, } } - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - for (i = 0; i < surface_count; i++) { - struct dc_plane_state *plane_state = srf_updates[i].surface; - /*set logical flag for lock/unlock use*/ - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (!pipe_ctx->plane_state) - continue; - if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) - continue; - pipe_ctx->plane_state->triplebuffer_flips = false; - if (update_type == UPDATE_TYPE_FAST && - dc->hwss.program_triplebuffer != NULL && - !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { - /*triple buffer for VUpdate only*/ - pipe_ctx->plane_state->triplebuffer_flips = true; - } - } - if (update_type == UPDATE_TYPE_FULL) { - /* force vsync flip when reconfiguring pipes to prevent underflow */ - plane_state->flip_immediate = false; + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + /*set logical flag for lock/unlock use*/ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + if (!pipe_ctx->plane_state) + continue; + if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + continue; + pipe_ctx->plane_state->triplebuffer_flips = false; + if (update_type == UPDATE_TYPE_FAST && + dc->hwss.program_triplebuffer != NULL && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; } } + if (update_type == UPDATE_TYPE_FULL) { + /* force vsync flip when reconfiguring pipes to prevent underflow */ + plane_state->flip_immediate = false; + } } // Update Type FULL, Surface updates diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 72b261ad9587..0d3ec50b1385 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -490,25 +490,6 @@ bool dc_stream_add_writeback(struct dc *dc, struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; dwb->otg_inst = stream_status->primary_otg_inst; } - if (IS_DIAG_DC(dc->ctx->dce_environment)) { - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* enable writeback */ - if (dc->hwss.enable_writeback) { - struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; - - if (dwb->funcs->is_enabled(dwb)) { - /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, wb_info, dc->current_state); - } else { - /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, wb_info, dc->current_state); - } - } - } return true; } @@ -553,17 +534,6 @@ bool dc_stream_remove_writeback(struct dc *dc, } stream->num_wb_info = j; - if (IS_DIAG_DC(dc->ctx->dce_environment)) { - /* recalculate and apply DML parameters */ - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* disable writeback */ - if (dc->hwss.disable_writeback) - dc->hwss.disable_writeback(dc, dwb_pipe_inst); - } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 3db7a2e11af5..3907eeff560c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -464,8 +464,7 @@ void generic_reg_wait(const struct dc_context *ctx, field_value = get_reg_field_value_ex(reg_val, mask, shift); if (field_value == condition_value) { - if (i * delay_between_poll_us > 1000 && - !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) + if (i * delay_between_poll_us > 1000) DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", delay_between_poll_us * i / 1000, func_name, line); @@ -477,8 +476,7 @@ void generic_reg_wait(const struct dc_context *ctx, delay_between_poll_us, time_out_num_tries, func_name, line); - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - BREAK_TO_DEBUGGER(); + BREAK_TO_DEBUGGER(); } void generic_write_indirect_reg(const struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 150c19286d67..6b4731b5e975 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -69,13 +69,6 @@ enum dce_environment { DCE_ENV_VIRTUAL_HW }; -/* Note: use these macro definitions instead of direct comparison! */ -#define IS_FPGA_MAXIMUS_DC(dce_environment) \ - (dce_environment == DCE_ENV_FPGA_MAXIMUS) - -#define IS_DIAG_DC(dce_environment) \ - (IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG)) - struct dc_perf_trace { unsigned long read_count; unsigned long write_count; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 462c7a3ec3cc..ed8936405dfa 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -920,25 +920,6 @@ static bool dce112_program_pix_clk( struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); struct bp_pixel_clock_parameters bp_pc_params = {0}; - if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { - unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - unsigned dp_dto_ref_100hz = 7000000; - unsigned clock_100hz = pll_settings->actual_pix_clk_100hz; - - /* Set DTO values: phase = target clock, modulo = reference clock */ - REG_WRITE(PHASE[inst], clock_100hz); - REG_WRITE(MODULO[inst], dp_dto_ref_100hz); - - /* Enable DTO */ - if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) - REG_UPDATE_2(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1, - PIPE0_DTO_SRC_SEL, 1); - else - REG_UPDATE(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1); - return true; - } /* First disable SS * ATOMBIOS will enable by default SS on PLL for DP, * do not disable it here @@ -1015,25 +996,6 @@ static bool dcn31_program_pix_clk( REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); } else { - if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { - unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - unsigned dp_dto_ref_100hz = 7000000; - unsigned clock_100hz = pll_settings->actual_pix_clk_100hz; - - /* Set DTO values: phase = target clock, modulo = reference clock */ - REG_WRITE(PHASE[inst], clock_100hz); - REG_WRITE(MODULO[inst], dp_dto_ref_100hz); - - /* Enable DTO */ - if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) - REG_UPDATE_2(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1, - PIPE0_DTO_SRC_SEL, 1); - else - REG_UPDATE(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1); - return true; - } if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) REG_UPDATE(PIXEL_RATE_CNTL[inst], diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index e74266cc0098..63009db8b5a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -1093,11 +1093,9 @@ static void dcn21_dmcu_construct( dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); - dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); - dmcu_dce->base.psp_version = psp_version; - } + psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); + dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); + dmcu_dce->base.psp_version = psp_version; } struct dmcu *dce_dmcu_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 1a0be40d125c..c6fe2c00aedb 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -209,9 +209,6 @@ static bool dce110_enable_display_power_gating( struct dc_context *ctx = dc->ctx; unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - return true; - if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c index 19873ee1f78d..690caaaff019 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c @@ -120,9 +120,6 @@ static bool dce112_enable_display_power_gating( enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - return true; - if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index d4afe6c824d2..45e08c4d5861 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -159,9 +159,6 @@ static bool dce120_enable_display_power_gating( enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - return true; - if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 905246a2ece4..13b4e5118459 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1012,31 +1012,29 @@ static void dcn10_reset_back_end_for_pipe( return; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - link = pipe_ctx->stream->link; - /* DPMS may already disable or */ - /* dpms_off status is incorrect due to fastboot - * feature. When system resume from S4 with second - * screen only, the dpms_off would be true but - * VBIOS lit up eDP, so check link status too. - */ - if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - dc->link_srv->set_dpms_off(pipe_ctx); - else if (pipe_ctx->stream_res.audio) - dc->hwss.disable_audio_stream(pipe_ctx); - - if (pipe_ctx->stream_res.audio) { - /*disable az_endpoint*/ - pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - - /*free audio*/ - if (dc->caps.dynamic_audio == true) { - /*we have to dynamic arbitrate the audio endpoints*/ - /*we free the resource, need reset is_audio_acquired*/ - update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, - pipe_ctx->stream_res.audio, false); - pipe_ctx->stream_res.audio = NULL; - } + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } } @@ -1499,28 +1497,6 @@ void dcn10_init_hw(struct dc *dc) if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init) dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - //Enable ability to power gate / don't force power on permanently - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(hws, true); - - return; - } - if (!dcb->funcs->is_accelerated_mode(dcb)) hws->funcs.disable_vga(dc->hwseq); @@ -1532,23 +1508,21 @@ void dcn10_init_hw(struct dc *dc) res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { + if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - // Not all ASICs have DCCG sw component - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + // Not all ASICs have DCCG sw component + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); @@ -3070,15 +3044,13 @@ void dcn10_prepare_bandwidth( if (dc->debug.sanity_checks) hws->funcs.verify_allow_pstate_change_high(dc); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (context->stream_count == 0) - context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; + if (context->stream_count == 0) + context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - false); - } + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + false); dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, @@ -3110,15 +3082,13 @@ void dcn10_optimize_bandwidth( if (dc->debug.sanity_checks) hws->funcs.verify_allow_pstate_change_high(dc); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (context->stream_count == 0) - context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; + if (context->stream_count == 0) + context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - true); - } + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + true); hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index c9e53dc49c92..e1975991e075 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -653,11 +653,9 @@ void optc1_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - /* Should be fast, status does not update on maximus */ - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 21ec1ba5ed75..a0625209c86d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1651,9 +1651,8 @@ static bool dcn10_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto fail; + &res_create_funcs)) + goto fail; dcn10_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index e74c3ce561ab..b0a13eb8318c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2480,36 +2480,31 @@ static void dcn20_reset_back_end_for_pipe( return; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* DPMS may already disable or */ - /* dpms_off status is incorrect due to fastboot - * feature. When system resume from S4 with second - * screen only, the dpms_off would be true but - * VBIOS lit up eDP, so check link status too. - */ - if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - dc->link_srv->set_dpms_off(pipe_ctx); - else if (pipe_ctx->stream_res.audio) - dc->hwss.disable_audio_stream(pipe_ctx); - - /* free acquired resources */ - if (pipe_ctx->stream_res.audio) { - /*disable az_endpoint*/ - pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - - /*free audio*/ - if (dc->caps.dynamic_audio == true) { - /*we have to dynamic arbitrate the audio endpoints*/ - /*we free the resource, need reset is_audio_acquired*/ - update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, - pipe_ctx->stream_res.audio, false); - pipe_ctx->stream_res.audio = NULL; - } + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } } - else if (pipe_ctx->stream_res.dsc) { - dc->link_srv->set_dsc_enable(pipe_ctx, false); - } /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 4192c522e59a..e4b44e691ce6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -145,8 +145,4 @@ void dcn20_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn20_funcs; dc->hwseq->funcs = dcn20_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index a08c335b7383..e0edc163d767 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -391,10 +391,9 @@ void optc2_triplebuffer_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); } void optc2_triplebuffer_unlock(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 1d8c5805ef20..7dcae3183e07 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2488,15 +2488,9 @@ static bool dcn20_resource_construct( dc->caps.dp_hdmi21_pcon_support = true; - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - pool->base.pipe_count = 4; - pool->base.mpcc_count = pool->base.pipe_count; - dc->debug = debug_defaults_diags; - } else { - dc->debug = debug_defaults_diags; - } + //dcn2.0x dc->work_arounds.dedcn20_305_wa = true; @@ -2734,9 +2728,8 @@ static bool dcn20_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; dcn20_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c index 75472d53ff52..9e027db6d752 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c @@ -231,52 +231,39 @@ void dcn201_init_hw(struct dc *dc) if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); - REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - - hws->funcs.dccg_init(hws); - - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(REFCLK_CNTL, 0); - } else { - hws->funcs.bios_golden_init(dc); - - if (dc->ctx->dc_bios->fw_info_valid) { - res_pool->ref_clocks.xtalin_clock_inKhz = - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); - - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } - } - } else - ASSERT_CRITICAL(false); - for (i = 0; i < dc->link_count; i++) { - /* Power up AND update implementation according to the - * required signal (which may be different from the - * default signal on connector). - */ - struct dc_link *link = dc->links[i]; - - link->link_enc->funcs->hw_init(link->link_enc); + hws->funcs.bios_golden_init(dc); + + if (dc->ctx->dc_bios->fw_info_valid) { + res_pool->ref_clocks.xtalin_clock_inKhz = + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; + + if (res_pool->dccg && res_pool->hubbub) { + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); + + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } - if (hws->fb_offset.quad_part == 0) - read_mmhub_vm_setup(hws); + } else + ASSERT_CRITICAL(false); + for (i = 0; i < dc->link_count; i++) { + /* Power up AND update implementation according to the + * required signal (which may be different from the + * default signal on connector). + */ + struct dc_link *link = dc->links[i]; + + link->link_enc->funcs->hw_init(link->link_enc); } + if (hws->fb_offset.quad_part == 0) + read_mmhub_vm_setup(hws); /* Blank pixel data with OPP DPG */ for (i = 0; i < res_pool->timing_generator_count; i++) { @@ -362,10 +349,6 @@ void dcn201_init_hw(struct dc *dc) tg->funcs->tg_init(tg); } - /* end of FPGA. Below if real ASIC */ - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - return; - for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c index 730875dfd8b4..70fcbec03fb6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c @@ -55,10 +55,9 @@ static void optc201_triplebuffer_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); } static void optc201_triplebuffer_unlock(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 6ea70da28aaa..212c475d95cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -1272,9 +1272,8 @@ static bool dcn201_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; dcn201_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 8b58ce1db035..f024157bd6eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -151,8 +151,4 @@ void dcn21_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn21_funcs; dc->hwseq->funcs = dcn21_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 19aaa557b2db..76268a7b7934 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1503,11 +1503,6 @@ static bool dcn21_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - pool->base.pipe_count = 4; - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; // Init the vm_helper if (dc->vm_helper) @@ -1721,9 +1716,8 @@ static bool dcn21_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; dcn21_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 3303c9aae068..ad4e1023b00e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -330,10 +330,6 @@ void dcn30_enable_writeback( DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\ __func__, wb_info->dwb_pipe_inst,\ wb_info->mpcc_inst); - if (IS_DIAG_DC(dc->ctx->dce_environment)) { - /*till diags switch to warmup interface*/ - dcn30_mmhubbub_warmup(dc, 1, wb_info); - } /* Update writeback pipe */ dcn30_set_writeback(dc, wb_info, context); @@ -447,28 +443,6 @@ void dcn30_init_hw(struct dc *dc) if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - //Enable ability to power gate / don't force power on permanently - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(hws, true); - - return; - } - if (!dcb->funcs->is_accelerated_mode(dcb)) { hws->funcs.bios_golden_init(dc); hws->funcs.disable_vga(dc->hwseq); @@ -491,23 +465,21 @@ void dcn30_init_hw(struct dc *dc) res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { + if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - // Not all ASICs have DCCG sw component - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + // Not all ASICs have DCCG sw component + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 18e94d8ae54f..3d19acaa12f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -151,8 +151,4 @@ void dcn30_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn30_funcs; dc->hwseq->funcs = dcn30_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 34b08d90dc1d..c6f5f3df8061 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -55,10 +55,9 @@ void optc3_triplebuffer_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 67a34cda3774..cd94b5f5fdb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -2376,10 +2376,7 @@ static bool dcn30_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2577,8 +2574,7 @@ static bool dcn30_resource_construct( /* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 5ac2a272c380..1cf84a086fec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1513,10 +1513,7 @@ static bool dcn301_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -1710,9 +1707,8 @@ static bool dcn301_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn301_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 9f93c43115ba..efd98d64588d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -1309,8 +1309,6 @@ static bool dcn302_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; // Init the vm_helper if (dc->vm_helper) @@ -1489,8 +1487,7 @@ static bool dcn302_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, pool, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 7f72ef882ca4..fcd126602178 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -1232,8 +1232,6 @@ static bool dcn303_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; // Init the vm_helper if (dc->vm_helper) @@ -1400,8 +1398,7 @@ static bool dcn303_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, pool, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 55494730e500..2a7f47642a44 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -117,28 +117,6 @@ void dcn31_init_hw(struct dc *dc) if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - //Enable ability to power gate / don't force power on permanently - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(hws, true); - - return; - } - if (!dcb->funcs->is_accelerated_mode(dcb)) { hws->funcs.bios_golden_init(dc); if (hws->funcs.disable_vga) @@ -154,23 +132,21 @@ void dcn31_init_hw(struct dc *dc) res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { - - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); - - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - // Not all ASICs have DCCG sw component - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } + if (res_pool->dccg && res_pool->hubbub) { + + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); + + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + // Not all ASICs have DCCG sw component + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); @@ -553,35 +529,31 @@ static void dcn31_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->set_drr( pipe_ctx->stream_res.tg, NULL); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - link = pipe_ctx->stream->link; - /* DPMS may already disable or */ - /* dpms_off status is incorrect due to fastboot - * feature. When system resume from S4 with second - * screen only, the dpms_off would be true but - * VBIOS lit up eDP, so check link status too. - */ - if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - dc->link_srv->set_dpms_off(pipe_ctx); - else if (pipe_ctx->stream_res.audio) - dc->hwss.disable_audio_stream(pipe_ctx); - - /* free acquired resources */ - if (pipe_ctx->stream_res.audio) { - /*disable az_endpoint*/ - pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - - /*free audio*/ - if (dc->caps.dynamic_audio == true) { - /*we have to dynamic arbitrate the audio endpoints*/ - /*we free the resource, need reset is_audio_acquired*/ - update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, - pipe_ctx->stream_res.audio, false); - pipe_ctx->stream_res.audio = NULL; - } + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } - } else if (pipe_ctx->stream_res.dsc) { - dc->link_srv->set_dsc_enable(pipe_ctx, false); } pipe_ctx->stream = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index ba9e7dee6e5e..fc25cc300a17 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -154,8 +154,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn31_funcs; dc->hwseq->funcs = dcn31_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index ff8cd5076434..9c637c895f91 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -1341,13 +1341,6 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1988,10 +1981,7 @@ static bool dcn31_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2195,9 +2185,8 @@ static bool dcn31_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 90be62c05822..86d6a514dec0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -160,8 +160,4 @@ void dcn314_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn314_funcs; dc->hwseq->funcs = dcn314_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index abeeede38fb3..2483d37e425d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -1375,13 +1375,6 @@ static struct dce_hwseq *dcn314_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -2101,8 +2094,7 @@ static bool dcn314_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 95fd3d087ea3..e6d87c162d26 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -1342,13 +1342,6 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1947,10 +1940,7 @@ static bool dcn315_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2131,9 +2121,8 @@ static bool dcn315_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index 9ead347a33e9..a3be61cc541f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -1340,13 +1340,6 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1844,10 +1837,7 @@ static bool dcn316_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2028,9 +2018,8 @@ static bool dcn316_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 24a890d879b8..6f9a165c1eab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -161,8 +161,4 @@ void dcn32_hw_sequencer_init_functions(struct dc *dc) dc->hwss = dcn32_funcs; dc->hwseq->funcs = dcn32_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 98c394f9f8cf..33abc8c9d4be 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -2260,10 +2260,7 @@ static bool dcn32_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2319,8 +2316,7 @@ static bool dcn32_resource_construct( } /* DML */ - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); + dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); /* IRQ Service */ init_data.ctx = dc->ctx; @@ -2457,9 +2453,8 @@ static bool dcn32_resource_construct( /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer init functions and Plane caps */ dcn32_hw_sequencer_init_functions(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index f4cd9749ffdf..af0bb3e94250 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -1801,10 +1801,7 @@ static bool dcn321_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -1860,8 +1857,7 @@ static bool dcn321_resource_construct( } /* DML */ - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); + dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); /* IRQ Service */ init_data.ctx = dc->ctx; @@ -1993,9 +1989,8 @@ static bool dcn321_resource_construct( /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer init functions and Plane caps */ dcn32_hw_sequencer_init_functions(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c index 422f17aefd4a..6ce90678b33c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c @@ -333,45 +333,43 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param memcpy(s, dcn3_01_soc.clock_limits, sizeof(dcn3_01_soc.clock_limits)); /* Default clock levels are used for diags, which may lead to overclocking. */ - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; - dcn3_01_ip.max_num_dpp = pool->base.pipe_count; - dcn3_01_soc.num_chans = bw_params->num_channels; - - ASSERT(clk_table->num_entries); - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } + dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; + dcn3_01_ip.max_num_dpp = pool->base.pipe_count; + dcn3_01_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; } - - s[i].state = i; - s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; - - s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - s[i].dram_bw_per_chan_gbps = - dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - s[i].phyclk_d18_mhz = - dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; } - if (clk_table->num_entries) { - dcn3_01_soc.num_states = clk_table->num_entries; - /* duplicate last level */ - s[dcn3_01_soc.num_states] = - dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; - s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; - } + s[i].state = i; + s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + + s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + s[i].dram_bw_per_chan_gbps = + dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + s[i].phyclk_d18_mhz = + dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + + if (clk_table->num_entries) { + dcn3_01_soc.num_states = clk_table->num_entries; + /* duplicate last level */ + s[dcn3_01_soc.num_states] = + dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; + s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; } memcpy(dcn3_01_soc.clock_limits, s, sizeof(dcn3_01_soc.clock_limits)); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 19d034341e64..deb6d162a2d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -582,6 +582,7 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits; struct clk_limit_table *clk_table = &bw_params->clk_table; unsigned int i, closest_clk_lvl; + int max_dispclk_mhz = 0, max_dppclk_mhz = 0; int j; dc_assert_fp_enabled(); @@ -589,59 +590,55 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params memcpy(s, dcn3_1_soc.clock_limits, sizeof(dcn3_1_soc.clock_limits)); // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - int max_dispclk_mhz = 0, max_dppclk_mhz = 0; + dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; + dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count; + dcn3_1_soc.num_chans = bw_params->num_channels; - dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; - dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count; - dcn3_1_soc.num_chans = bw_params->num_channels; + ASSERT(clk_table->num_entries); - ASSERT(clk_table->num_entries); + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } - /* Prepass to find max clocks independent of voltage level. */ - for (i = 0; i < clk_table->num_entries; ++i) { - if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; - if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } } - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } - } + s[i].state = i; - s[i].state = i; - - /* Clocks dependent on voltage level. */ - s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * - 2 * clk_table->entries[i].wck_ratio; - - /* Clocks independent of voltage level. */ - s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : - dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - - s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : - dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - - s[i].dram_bw_per_chan_gbps = - dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - s[i].phyclk_d18_mhz = - dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - } - if (clk_table->num_entries) { - dcn3_1_soc.num_states = clk_table->num_entries; - } + /* Clocks dependent on voltage level. */ + s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * + 2 * clk_table->entries[i].wck_ratio; + + /* Clocks independent of voltage level. */ + s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : + dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : + dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + + s[i].dram_bw_per_chan_gbps = + dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + s[i].phyclk_d18_mhz = + dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + if (clk_table->num_entries) { + dcn3_1_soc.num_states = clk_table->num_entries; } memcpy(dcn3_1_soc.clock_limits, s, sizeof(dcn3_1_soc.clock_limits)); @@ -655,10 +652,7 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params dcn3_1_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31); - else - dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31); } void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -719,10 +713,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_15_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315); - else - dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315); } void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -738,71 +729,68 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param memcpy(s, dcn3_16_soc.clock_limits, sizeof(dcn3_16_soc.clock_limits)); // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - - dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; - dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count; - dcn3_16_soc.num_chans = bw_params->num_channels; + dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; + dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count; + dcn3_16_soc.num_chans = bw_params->num_channels; - ASSERT(clk_table->num_entries); + ASSERT(clk_table->num_entries); - /* Prepass to find max clocks independent of voltage level. */ - for (i = 0; i < clk_table->num_entries; ++i) { - if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; - if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; - } + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= - clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } - } - // Ported from DCN315 - if (clk_table->num_entries == 1) { - /*smu gives one DPM level, let's take the highest one*/ - closest_clk_lvl = dcn3_16_soc.num_states - 1; + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= + clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; } + } + // Ported from DCN315 + if (clk_table->num_entries == 1) { + /*smu gives one DPM level, let's take the highest one*/ + closest_clk_lvl = dcn3_16_soc.num_states - 1; + } - s[i].state = i; + s[i].state = i; - /* Clocks dependent on voltage level. */ - s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - if (clk_table->num_entries == 1 && - s[i].dcfclk_mhz < - dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { - /*SMU fix not released yet*/ - s[i].dcfclk_mhz = - dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; - } - s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * - 2 * clk_table->entries[i].wck_ratio; - - /* Clocks independent of voltage level. */ - s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : - dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - - s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : - dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - - s[i].dram_bw_per_chan_gbps = - dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - s[i].phyclk_d18_mhz = - dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - } - if (clk_table->num_entries) { - dcn3_16_soc.num_states = clk_table->num_entries; + /* Clocks dependent on voltage level. */ + s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + if (clk_table->num_entries == 1 && + s[i].dcfclk_mhz < + dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { + /*SMU fix not released yet*/ + s[i].dcfclk_mhz = + dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; } + s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * + 2 * clk_table->entries[i].wck_ratio; + + /* Clocks independent of voltage level. */ + s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : + dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : + dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + + s[i].dram_bw_per_chan_gbps = + dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + s[i].phyclk_d18_mhz = + dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + if (clk_table->num_entries) { + dcn3_16_soc.num_states = clk_table->num_entries; } memcpy(dcn3_16_soc.clock_limits, s, sizeof(dcn3_16_soc.clock_limits)); @@ -817,10 +805,7 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_16_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31); - else - dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31); } int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index fcde8f21b8be..4113ce79c4af 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -1432,14 +1432,6 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip); dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip); - // hack for FPGA - if (mode_lib->project == DML_PROJECT_DCN31_FPGA) { - if (disp_dlg_regs->vratio_prefetch >= (unsigned int) dml_pow(2, 22)) { - disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 22) - 1; - dml_print("vratio_prefetch exceed the max value, the register field is [21:0]\n"); - } - } - disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l); ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13)); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 554152371eb5..318b9c2bc9be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -190,8 +190,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dc_assert_fp_enabled(); // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) { - + if (dc->config.use_default_clock_table == false) { dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count; @@ -267,10 +266,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dcn20_patch_bounding_box(dc, &dcn3_14_soc); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314); - else - dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA); } static bool is_dual_plane(enum surface_pixel_format format) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index 61ba3e33bb11..b3e8dc08030c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -1520,14 +1520,6 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip); dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip); - // hack for FPGA - if (mode_lib->project == DML_PROJECT_DCN31_FPGA) { - if (disp_dlg_regs->vratio_prefetch >= (unsigned int) dml_pow(2, 22)) { - disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 22) - 1; - dml_print("vratio_prefetch exceed the max value, the register field is [21:0]\n"); - } - } - disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l); ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13)); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index d8d8fcd5ef1f..137ff970c9aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2530,80 +2530,78 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa { dc_assert_fp_enabled(); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* Overrides from dc->config options */ - dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - - /* Override from passed dc->bb_overrides if available*/ - if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns - && dc->bb_overrides.sr_exit_time_ns) { - dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; - } + /* Overrides from dc->config options */ + dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000) - != dc->bb_overrides.sr_enter_plus_exit_time_ns - && dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dcn3_2_soc.sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } + /* Override from passed dc->bb_overrides if available*/ + if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns + && dc->bb_overrides.sr_exit_time_ns) { + dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + } - if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns - && dc->bb_overrides.urgent_latency_ns) { - dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - } + if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000) + != dc->bb_overrides.sr_enter_plus_exit_time_ns + && dc->bb_overrides.sr_enter_plus_exit_time_ns) { + dcn3_2_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } - if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000) - != dc->bb_overrides.dram_clock_change_latency_ns - && dc->bb_overrides.dram_clock_change_latency_ns) { - dcn3_2_soc.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } + if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns + && dc->bb_overrides.urgent_latency_ns) { + dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } - if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000) - != dc->bb_overrides.fclk_clock_change_latency_ns - && dc->bb_overrides.fclk_clock_change_latency_ns) { - dcn3_2_soc.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - } + if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000) + != dc->bb_overrides.dram_clock_change_latency_ns + && dc->bb_overrides.dram_clock_change_latency_ns) { + dcn3_2_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } - if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000) - != dc->bb_overrides.dummy_clock_change_latency_ns - && dc->bb_overrides.dummy_clock_change_latency_ns) { - dcn3_2_soc.dummy_pstate_latency_us = - dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; - } + if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000) + != dc->bb_overrides.fclk_clock_change_latency_ns + && dc->bb_overrides.fclk_clock_change_latency_ns) { + dcn3_2_soc.fclk_change_latency_us = + dc->bb_overrides.fclk_clock_change_latency_ns / 1000; + } - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; + if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000) + != dc->bb_overrides.dummy_clock_change_latency_ns + && dc->bb_overrides.dummy_clock_change_latency_ns) { + dcn3_2_soc.dummy_pstate_latency_us = + dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; + } - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_2_soc.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; + /* Override from VBIOS if VBIOS bb_info available */ + if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = {0}; - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_2_soc.sr_enter_plus_exit_time_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; + if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_2_soc.dram_clock_change_latency_us = + bb_info.dram_clock_change_latency_100ns * 10; - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_2_soc.sr_exit_time_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_2_soc.sr_enter_plus_exit_time_us = + bb_info.dram_sr_enter_exit_latency_100ns * 10; - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, - dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_2_soc.sr_exit_time_us = + bb_info.dram_sr_exit_latency_100ns * 10; } + } - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* Override from VBIOS for num_chan */ + if (dc->ctx->dc_bios->vram_info.num_chans) { + dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); } + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* DML DSC delay factor workaround */ dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; @@ -2614,7 +2612,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */ - if ((!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) && (bw_params->clk_table.entries[0].memclk_mhz)) { + if (bw_params->clk_table.entries[0].memclk_mhz) { if (dc->debug.use_legacy_soc_bb_mechanism) { unsigned int i = 0, j = 0, num_states = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 342a1bcb4927..ffd7c3c1b142 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -471,80 +471,78 @@ static void dcn321_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) { dc_assert_fp_enabled(); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* Overrides from dc->config options */ - dcn3_21_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - - /* Override from passed dc->bb_overrides if available*/ - if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns - && dc->bb_overrides.sr_exit_time_ns) { - dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; - } + /* Overrides from dc->config options */ + dcn3_21_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000) - != dc->bb_overrides.sr_enter_plus_exit_time_ns - && dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dcn3_21_soc.sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } + /* Override from passed dc->bb_overrides if available*/ + if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns + && dc->bb_overrides.sr_exit_time_ns) { + dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + } - if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns - && dc->bb_overrides.urgent_latency_ns) { - dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - } + if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000) + != dc->bb_overrides.sr_enter_plus_exit_time_ns + && dc->bb_overrides.sr_enter_plus_exit_time_ns) { + dcn3_21_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } - if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000) - != dc->bb_overrides.dram_clock_change_latency_ns - && dc->bb_overrides.dram_clock_change_latency_ns) { - dcn3_21_soc.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } + if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns + && dc->bb_overrides.urgent_latency_ns) { + dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } - if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000) - != dc->bb_overrides.fclk_clock_change_latency_ns - && dc->bb_overrides.fclk_clock_change_latency_ns) { - dcn3_21_soc.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - } + if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000) + != dc->bb_overrides.dram_clock_change_latency_ns + && dc->bb_overrides.dram_clock_change_latency_ns) { + dcn3_21_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } - if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000) - != dc->bb_overrides.dummy_clock_change_latency_ns - && dc->bb_overrides.dummy_clock_change_latency_ns) { - dcn3_21_soc.dummy_pstate_latency_us = - dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; - } + if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000) + != dc->bb_overrides.fclk_clock_change_latency_ns + && dc->bb_overrides.fclk_clock_change_latency_ns) { + dcn3_21_soc.fclk_change_latency_us = + dc->bb_overrides.fclk_clock_change_latency_ns / 1000; + } - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; + if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000) + != dc->bb_overrides.dummy_clock_change_latency_ns + && dc->bb_overrides.dummy_clock_change_latency_ns) { + dcn3_21_soc.dummy_pstate_latency_us = + dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; + } - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_21_soc.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; + /* Override from VBIOS if VBIOS bb_info available */ + if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = {0}; - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_21_soc.sr_enter_plus_exit_time_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; + if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_21_soc.dram_clock_change_latency_us = + bb_info.dram_clock_change_latency_100ns * 10; - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_21_soc.sr_exit_time_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_21_soc.sr_enter_plus_exit_time_us = + bb_info.dram_sr_enter_exit_latency_100ns * 10; - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, - dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_21_soc.sr_exit_time_us = + bb_info.dram_sr_exit_latency_100ns * 10; } + } - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* Override from VBIOS for num_chan */ + if (dc->ctx->dc_bios->vram_info.num_chans) { + dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); } + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* DML DSC delay factor workaround */ dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; @@ -555,7 +553,6 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */ - if ((!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) && (bw_params->clk_table.entries[0].memclk_mhz)) { if (dc->debug.use_legacy_soc_bb_mechanism) { unsigned int i = 0, j = 0, num_states = 0; @@ -699,6 +696,5 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); if (dc->current_state) dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); - } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index bdf3ac6cadd5..da0cfbb071e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -113,7 +113,6 @@ void dml_init_instance(struct display_mode_lib *lib, lib->funcs = dml30_funcs; break; case DML_PROJECT_DCN31: - case DML_PROJECT_DCN31_FPGA: case DML_PROJECT_DCN315: lib->funcs = dml31_funcs; break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index a9d49ef58fb5..5edf69fa40d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -41,7 +41,6 @@ enum dml_project { DML_PROJECT_DCN30, DML_PROJECT_DCN31, DML_PROJECT_DCN315, - DML_PROJECT_DCN31_FPGA, DML_PROJECT_DCN314, DML_PROJECT_DCN32, }; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index edd7d026a762..586fe25c1702 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -28,24 +28,6 @@ #include "dccg.h" #include "clk_mgr.h" -static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) -{ - switch (link->link_enc->transmitter) { - case TRANSMITTER_UNIPHY_A: - return PHYD32CLKA; - case TRANSMITTER_UNIPHY_B: - return PHYD32CLKB; - case TRANSMITTER_UNIPHY_C: - return PHYD32CLKC; - case TRANSMITTER_UNIPHY_D: - return PHYD32CLKD; - case TRANSMITTER_UNIPHY_E: - return PHYD32CLKE; - default: - return PHYD32CLKA; - } -} - static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { @@ -120,81 +102,26 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); } -static void enable_hpo_dp_fpga_link_output(struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal, - enum clock_source_id clock_source, - const struct dc_link_settings *link_settings) -{ - const struct dc *dc = link->dc; - enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(link); - int phyd32clk_freq_khz = link_settings->link_rate == LINK_RATE_UHBR10 ? 312500 : - link_settings->link_rate == LINK_RATE_UHBR13_5 ? 412875 : - link_settings->link_rate == LINK_RATE_UHBR20 ? 625000 : 0; - - dm_set_phyd32clk(dc->ctx, phyd32clk_freq_khz); - dc->res_pool->dccg->funcs->set_physymclk( - dc->res_pool->dccg, - link->link_enc_hw_inst, - PHYSYMCLK_FORCE_SRC_PHYD32CLK, - true); - dc->res_pool->dccg->funcs->enable_symclk32_le( - dc->res_pool->dccg, - link_res->hpo_dp_link_enc->inst, - phyd32clk); - link_res->hpo_dp_link_enc->funcs->link_enable( - link_res->hpo_dp_link_enc, - link_settings->lane_count); - -} - static void enable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { - if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) - enable_hpo_dp_fpga_link_output(link, link_res, signal, - clock_source, link_settings); - else - link_res->hpo_dp_link_enc->funcs->enable_link_phy( - link_res->hpo_dp_link_enc, - link_settings, - link->link_enc->transmitter, - link->link_enc->hpd_source); -} - - -static void disable_hpo_dp_fpga_link_output(struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal) -{ - const struct dc *dc = link->dc; - - link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); - dc->res_pool->dccg->funcs->disable_symclk32_le( - dc->res_pool->dccg, - link_res->hpo_dp_link_enc->inst); - dc->res_pool->dccg->funcs->set_physymclk( - dc->res_pool->dccg, - link->link_enc_hw_inst, - PHYSYMCLK_FORCE_SRC_SYMCLK, - false); - dm_set_phyd32clk(dc->ctx, 0); + link_res->hpo_dp_link_enc->funcs->enable_link_phy( + link_res->hpo_dp_link_enc, + link_settings, + link->link_enc->transmitter, + link->link_enc->hpd_source); } static void disable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { - if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) { - disable_hpo_dp_fpga_link_output(link, link_res, signal); - } else { link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); link_res->hpo_dp_link_enc->funcs->disable_link_phy( link_res->hpo_dp_link_enc, signal); - } } static void set_hpo_dp_link_test_pattern(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 2267fb097830..2963267fe74a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -765,7 +765,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) struct dc_stream_state *stream = pipe_ctx->stream; bool result = false; - if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + if (dc_is_virtual_signal(stream->signal)) result = true; else result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); @@ -778,7 +778,6 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -816,8 +815,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) - && !dp_is_128b_132b_signal(pipe_ctx)) { + if (dc_is_dp_signal(stream->signal) && !dp_is_128b_132b_signal(pipe_ctx)) { DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -849,7 +847,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) false, NULL, true); - else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + else { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( pipe_ctx->stream_res.stream_enc, OPTC_DSC_DISABLED, 0, 0); @@ -2271,8 +2269,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) } } - if (!IS_DIAG_DC(dc->ctx->dce_environment) && - dc_is_virtual_signal(pipe_ctx->stream->signal)) + if (dc_is_virtual_signal(pipe_ctx->stream->signal)) return; if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) { @@ -2358,6 +2355,8 @@ void link_set_dpms_on( enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + bool apply_edp_fast_boot_optimization = + pipe_ctx->stream->apply_edp_fast_boot_optimization; ASSERT(is_master_pipe_for_link(link, pipe_ctx)); @@ -2375,9 +2374,8 @@ void link_set_dpms_on( } } - if (!IS_DIAG_DC(dc->ctx->dce_environment) && - dc_is_virtual_signal(pipe_ctx->stream->signal)) - return; + if (dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); @@ -2402,138 +2400,126 @@ void link_set_dpms_on( link_hwss->setup_stream_attribute(pipe_ctx); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - bool apply_edp_fast_boot_optimization = - pipe_ctx->stream->apply_edp_fast_boot_optimization; - - pipe_ctx->stream->apply_edp_fast_boot_optimization = false; + pipe_ctx->stream->apply_edp_fast_boot_optimization = false; - // Enable VPG before building infoframe - if (vpg && vpg->funcs->vpg_poweron) - vpg->funcs->vpg_poweron(vpg); + // Enable VPG before building infoframe + if (vpg && vpg->funcs->vpg_poweron) + vpg->funcs->vpg_poweron(vpg); - resource_build_info_frame(pipe_ctx); - dc->hwss.update_info_frame(pipe_ctx); + resource_build_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); - /* Do not touch link on seamless boot optimization. */ - if (pipe_ctx->stream->apply_seamless_boot_optimization) { - pipe_ctx->stream->dpms_off = false; + /* Do not touch link on seamless boot optimization. */ + if (pipe_ctx->stream->apply_seamless_boot_optimization) { + pipe_ctx->stream->dpms_off = false; - /* Still enable stream features & audio on seamless boot for DP external displays */ - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { - enable_stream_features(pipe_ctx); - dc->hwss.enable_audio_stream(pipe_ctx); - } - - update_psp_stream_config(pipe_ctx, false); - return; - } - - /* eDP lit up by bios already, no need to enable again. */ - if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && - apply_edp_fast_boot_optimization && - !pipe_ctx->stream->timing.flags.DSC && - !pipe_ctx->next_odm_pipe) { - pipe_ctx->stream->dpms_off = false; - update_psp_stream_config(pipe_ctx, false); - return; + /* Still enable stream features & audio on seamless boot for DP external displays */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { + enable_stream_features(pipe_ctx); + dc->hwss.enable_audio_stream(pipe_ctx); } - if (pipe_ctx->stream->dpms_off) - return; + update_psp_stream_config(pipe_ctx, false); + return; + } - /* Have to setup DSC before DIG FE and BE are connected (which happens before the - * link training). This is to make sure the bandwidth sent to DIG BE won't be - * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag - * will be automatically set at a later time when the video is enabled - * (DP_VID_STREAM_EN = 1). - */ - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - link_set_dsc_enable(pipe_ctx, true); + /* eDP lit up by bios already, no need to enable again. */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + apply_edp_fast_boot_optimization && + !pipe_ctx->stream->timing.flags.DSC && + !pipe_ctx->next_odm_pipe) { + pipe_ctx->stream->dpms_off = false; + update_psp_stream_config(pipe_ctx, false); + return; + } - } + if (pipe_ctx->stream->dpms_off) + return; - status = enable_link(state, pipe_ctx); + /* Have to setup DSC before DIG FE and BE are connected (which happens before the + * link training). This is to make sure the bandwidth sent to DIG BE won't be + * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag + * will be automatically set at a later time when the video is enabled + * (DP_VID_STREAM_EN = 1). + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) + link_set_dsc_enable(pipe_ctx, true); - if (status != DC_OK) { - DC_LOG_WARNING("enabling link %u failed: %d\n", - pipe_ctx->stream->link->link_index, - status); + } - /* Abort stream enable *unless* the failure was due to - * DP link training - some DP monitors will recover and - * show the stream anyway. But MST displays can't proceed - * without link training. - */ - if (status != DC_FAIL_DP_LINK_TRAINING || - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - if (false == stream->link->link_status.link_active) - disable_link(stream->link, &pipe_ctx->link_res, - pipe_ctx->stream->signal); - BREAK_TO_DEBUGGER(); - return; - } - } + status = enable_link(state, pipe_ctx); - /* turn off otg test pattern if enable */ - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) - pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - COLOR_DEPTH_UNDEFINED); + if (status != DC_OK) { + DC_LOG_WARNING("enabling link %u failed: %d\n", + pipe_ctx->stream->link->link_index, + status); - /* This second call is needed to reconfigure the DIG - * as a workaround for the incorrect value being applied - * from transmitter control. + /* Abort stream enable *unless* the failure was due to + * DP link training - some DP monitors will recover and + * show the stream anyway. But MST displays can't proceed + * without link training. */ - if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || - dp_is_128b_132b_signal(pipe_ctx))) { - if (link_enc) - link_enc->funcs->setup( - link_enc, + if (status != DC_FAIL_DP_LINK_TRAINING || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (false == stream->link->link_status.link_active) + disable_link(stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); - } + BREAK_TO_DEBUGGER(); + return; + } + } - dc->hwss.enable_stream(pipe_ctx); + /* turn off otg test pattern if enable */ + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + COLOR_DEPTH_UNDEFINED); - /* Set DPS PPS SDP (AKA "info frames") */ - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) { - dp_set_dsc_on_rx(pipe_ctx, true); - link_set_dsc_pps_packet(pipe_ctx, true, true); - } + /* This second call is needed to reconfigure the DIG + * as a workaround for the incorrect value being applied + * from transmitter control. + */ + if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || + dp_is_128b_132b_signal(pipe_ctx))) { + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); } - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - allocate_mst_payload(pipe_ctx); - else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - dp_is_128b_132b_signal(pipe_ctx)) - update_sst_payload(pipe_ctx, true); + dc->hwss.enable_stream(pipe_ctx); + + /* Set DPS PPS SDP (AKA "info frames") */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) { + dp_set_dsc_on_rx(pipe_ctx, true); + link_set_dsc_pps_packet(pipe_ctx, true, true); + } + } - dc->hwss.unblank_stream(pipe_ctx, - &pipe_ctx->stream->link->cur_link_settings); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + allocate_mst_payload(pipe_ctx); + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + dp_is_128b_132b_signal(pipe_ctx)) + update_sst_payload(pipe_ctx, true); - if (stream->sink_patches.delay_ignore_msa > 0) - msleep(stream->sink_patches.delay_ignore_msa); + dc->hwss.unblank_stream(pipe_ctx, + &pipe_ctx->stream->link->cur_link_settings); - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - enable_stream_features(pipe_ctx); - update_psp_stream_config(pipe_ctx, false); + if (stream->sink_patches.delay_ignore_msa > 0) + msleep(stream->sink_patches.delay_ignore_msa); - dc->hwss.enable_audio_stream(pipe_ctx); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + enable_stream_features(pipe_ctx); + update_psp_stream_config(pipe_ctx, false); - } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - if (dp_is_128b_132b_signal(pipe_ctx)) - dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx); - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - link_set_dsc_enable(pipe_ctx, true); - } + dc->hwss.enable_audio_stream(pipe_ctx); if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { set_avmute(pipe_ctx, false); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index f46864630506..3a5e80b57711 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -326,8 +326,7 @@ bool dp_is_fec_supported(const struct dc_link *link) return (dc_is_dp_signal(link->connector_signal) && link_enc && link_enc->features.fec_supported && - link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && - !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE); } bool dp_should_enable_fec(const struct dc_link *link) -- cgit v1.2.3 From 4ba439b0b38456c61505568a7fa4fa364850a236 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:43 +0800 Subject: drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn31/dcn31_resource.c:1356:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn31/dcn31_resource.c:894:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 27 ---------------------- 1 file changed, 27 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 9c637c895f91..f2d589c505a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -891,24 +891,6 @@ static const struct dc_debug_options debug_defaults_drv = { .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1353,15 +1335,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn31_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) { unsigned int i; -- cgit v1.2.3 From 0baae624630788862bbd654741929007971e9d5b Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 18 May 2023 11:30:44 -0400 Subject: drm/amd/display: Refactor fast update to use new HWSS build sequence [Description] - Refactor HW sequencer to use a build / execute sequence - Also move gamma updates to become fast v2: squash in build fix ("drm/amd/display: Fix guarding of 'if (dc->debug.visual_confirm)'") Acked-by: Stylon Wang Signed-off-by: Alvin Lee Reviewed-by: Jun Lei Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 274 ++++++++++++++++++--- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 255 +++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../drm/amd/display/dc/dce100/dce100_resource.c | 5 + .../drm/amd/display/dc/dce110/dce110_resource.c | 5 + .../drm/amd/display/dc/dce112/dce112_resource.c | 5 + .../drm/amd/display/dc/dce120/dce120_resource.c | 1 + .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 6 + .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 + .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 1 + .../drm/amd/display/dc/dcn201/dcn201_resource.c | 1 + .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 1 + .../gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 1 + .../drm/amd/display/dc/dcn315/dcn315_resource.c | 1 + .../drm/amd/display/dc/dcn316/dcn316_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 24 ++ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h | 2 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 1 + drivers/gpu/drm/amd/display/dc/inc/core_types.h | 11 + drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 126 ++++++++++ 20 files changed, 693 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/amd/display/dc/dcn31') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0e9403f21dae..60da6d834518 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2589,15 +2589,19 @@ static enum surface_update_type det_surface_update(const struct dc *dc, elevate_update_type(&overall_type, type); } - if (update_flags->bits.input_csc_change - || update_flags->bits.coeff_reduction_change - || update_flags->bits.lut_3d - || update_flags->bits.gamma_change - || update_flags->bits.gamut_remap_change) { + if (update_flags->bits.lut_3d) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } + if (dc->debug.enable_legacy_fast_update && + (update_flags->bits.gamma_change || + update_flags->bits.gamut_remap_change || + update_flags->bits.input_csc_change || + update_flags->bits.coeff_reduction_change)) { + type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, type); + } return overall_type; } @@ -2630,7 +2634,7 @@ static enum surface_update_type check_update_surfaces_for_stream( stream_update->integer_scaling_update) su_flags->bits.scaling = 1; - if (stream_update->out_transfer_func) + if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; if (stream_update->abm_level) @@ -2661,6 +2665,12 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->output_csc_transform || stream_update->output_color_space) su_flags->bits.out_csc = 1; + + /* Output transfer function changes do not require bandwidth recalculation, + * so don't trigger a full update + */ + if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + su_flags->bits.out_tf = 1; } for (i = 0 ; i < surface_count; i++) { @@ -3412,6 +3422,166 @@ void dc_dmub_update_dirty_rect(struct dc *dc, } } +static void build_dmub_update_dirty_rect( + struct dc *dc, + int surface_count, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + struct dc_state *context, + struct dc_dmub_cmd dc_dmub_cmd[], + unsigned int *dmub_cmd_count) +{ + union dmub_rb_cmd cmd; + struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; + unsigned int i, j; + unsigned int panel_inst = 0; + + if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) + return; + + if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) + return; + + memset(&cmd, 0x0, sizeof(cmd)); + cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; + cmd.update_dirty_rect.header.sub_type = 0; + cmd.update_dirty_rect.header.payload_bytes = + sizeof(cmd.update_dirty_rect) - + sizeof(cmd.update_dirty_rect.header); + update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; + + if (!srf_updates[i].surface || !flip_addr) + continue; + /* Do not send in immediate flip mode */ + if (srf_updates[i].surface->flip_immediate) + continue; + update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; + update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; + memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, + sizeof(flip_addr->dirty_rects)); + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (pipe_ctx->stream != stream) + continue; + if (pipe_ctx->plane_state != plane_state) + continue; + update_dirty_rect->panel_inst = panel_inst; + update_dirty_rect->pipe_idx = j; + dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; + dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + (*dmub_cmd_count)++; + } + } +} + + +/** + * ************************************************************************************************ + * build_dmub_cmd_list: Build an array of DMCUB commands to be sent to DMCUB + * + * @param [in]: dc: Current DC state + * @param [in]: srf_updates: Array of surface updates + * @param [in]: surface_count: Number of surfaces that have an updated + * @param [in]: stream: Correponding stream to be updated in the current flip + * @param [in]: context: New DC state to be programmed + * + * @param [out]: dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB + * @param [out]: dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array + * + * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required + * to build an array of commands and have them sent while the OTG lock is acquired. + * + * @return: void + * ************************************************************************************************ + */ +static void build_dmub_cmd_list(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_state *context, + struct dc_dmub_cmd dc_dmub_cmd[], + unsigned int *dmub_cmd_count) +{ + // Initialize cmd count to 0 + *dmub_cmd_count = 0; + build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); +} + +static void commit_planes_for_stream_fast(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type, + struct dc_state *context) +{ + int i, j; + struct pipe_ctx *top_pipe_to_program = NULL; + bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); + dc_z10_restore(dc); + + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->top_pipe && + !pipe_ctx->prev_odm_pipe && + pipe_ctx->stream && + pipe_ctx->stream == stream) { + top_pipe_to_program = pipe_ctx; + } + } + + if (dc->debug.visual_confirm) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + } + } + + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + /*set logical flag for lock/unlock use*/ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->plane_state) + continue; + if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + continue; + pipe_ctx->plane_state->triplebuffer_flips = false; + if (update_type == UPDATE_TYPE_FAST && + dc->hwss.program_triplebuffer && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; + } + } + } + + build_dmub_cmd_list(dc, + srf_updates, + surface_count, + stream, + context, + context->dc_dmub_cmd, + &(context->dmub_cmd_count)); + hwss_build_fast_sequence(dc, + context->dc_dmub_cmd, + context->dmub_cmd_count, + context->block_sequence, + &(context->block_sequence_steps), + top_pipe_to_program); + hwss_execute_sequence(dc, + context->block_sequence, + context->block_sequence_steps); +} + static void commit_planes_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, @@ -3449,21 +3619,6 @@ static void commit_planes_for_stream(struct dc *dc, } } - if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { - /* Optimize seamless boot flag keeps clocks and watermarks high until - * first flip. After first flip, optimization is required to lower - * bandwidth. Important to note that it is expected UEFI will - * only light up a single display on POST, therefore we only expect - * one stream with seamless boot flag set. - */ - if (stream->apply_seamless_boot_optimization) { - stream->apply_seamless_boot_optimization = false; - - if (get_seamless_boot_stream_count(context) == 0) - dc->optimized_required = true; - } - } - if (update_type == UPDATE_TYPE_FULL) { dc_allow_idle_optimizations(dc, false); @@ -4046,6 +4201,43 @@ static bool commit_minimal_transition_state(struct dc *dc, return true; } +/** + * ******************************************************************************* + * update_seamless_boot_flags: Helper function for updating seamless boot flags + * + * @param [in]: dc: Current DC state + * @param [in]: context: New DC state to be programmed + * @param [in]: surface_count: Number of surfaces that have an updated + * @param [in]: stream: Correponding stream to be updated in the current flip + * + * Updating seamless boot flags do not need to be part of the commit sequence. This + * helper function will update the seamless boot flags on each flip (if required) + * outside of the HW commit sequence (fast or slow). + * + * @return: void + * ******************************************************************************* + */ +static void update_seamless_boot_flags(struct dc *dc, + struct dc_state *context, + int surface_count, + struct dc_stream_state *stream) +{ + if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { + /* Optimize seamless boot flag keeps clocks and watermarks high until + * first flip. After first flip, optimization is required to lower + * bandwidth. Important to note that it is expected UEFI will + * only light up a single display on POST, therefore we only expect + * one stream with seamless boot flag set. + */ + if (stream->apply_seamless_boot_optimization) { + stream->apply_seamless_boot_optimization = false; + + if (get_seamless_boot_stream_count(context) == 0) + dc->optimized_required = true; + } + } +} + bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -4112,14 +4304,25 @@ bool dc_update_planes_and_stream(struct dc *dc, update_type = UPDATE_TYPE_FULL; } - commit_planes_for_stream( - dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); + update_seamless_boot_flags(dc, context, surface_count, stream); + if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } if (dc->current_state != context) { @@ -4244,7 +4447,17 @@ void dc_commit_updates_for_stream(struct dc *dc, TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); - commit_planes_for_stream( + update_seamless_boot_flags(dc, context, surface_count, stream); + if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + commit_planes_for_stream( dc, srf_updates, surface_count, @@ -4252,6 +4465,7 @@ void dc_commit_updates_for_stream(struct dc *dc, stream_update, update_type, context); + } /*update current_State*/ if (dc->current_state != context) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 8a98b8dd008e..182c42c63bc5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -27,6 +27,8 @@ #include "core_types.h" #include "timing_generator.h" #include "hw_sequencer.h" +#include "hw_sequencer_private.h" +#include "basics/dc_common.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) @@ -463,6 +465,259 @@ void get_subvp_visual_confirm_color( } } +void hwss_build_fast_sequence(struct dc *dc, + struct dc_dmub_cmd *dc_dmub_cmd, + unsigned int dmub_cmd_count, + struct block_sequence block_sequence[], + int *num_steps, + struct pipe_ctx *pipe_ctx) +{ + struct dc_plane_state *plane = pipe_ctx->plane_state; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dce_hwseq *hws = dc->hwseq; + struct pipe_ctx *current_pipe = NULL; + struct pipe_ctx *current_mpc_pipe = NULL; + unsigned int i = 0; + + *num_steps = 0; // Initialize to 0 + + if (!plane || !stream) + return; + + if (dc->hwss.subvp_pipe_control_lock_fast) { + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; + (*num_steps)++; + } + if (dc->hwss.pipe_control_lock) { + block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; + block_sequence[*num_steps].params.pipe_control_lock_params.lock = true; + block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*num_steps)++; + } + + for (i = 0; i < dmub_cmd_count; i++) { + block_sequence[*num_steps].params.send_dmcub_cmd_params.ctx = dc->ctx; + block_sequence[*num_steps].params.send_dmcub_cmd_params.cmd = &(dc_dmub_cmd[i].dmub_cmd); + block_sequence[*num_steps].params.send_dmcub_cmd_params.wait_type = dc_dmub_cmd[i].wait_type; + block_sequence[*num_steps].func = DMUB_SEND_DMCUB_CMD; + (*num_steps)++; + } + + current_pipe = pipe_ctx; + while (current_pipe) { + current_mpc_pipe = current_pipe; + while (current_mpc_pipe) { + if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; + block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; + (*num_steps)++; + } + if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc; + block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips; + block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; + (*num_steps)++; + } + if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { + block_sequence[*num_steps].params.update_plane_addr_params.dc = dc; + block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR; + (*num_steps)++; + } + + if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) { + block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc; + block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state; + block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; + (*num_steps)++; + } + + if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) { + block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) { + block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_SETUP_DPP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) { + block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; + (*num_steps)++; + } + if (hws->funcs.set_output_transfer_func && current_mpc_pipe->stream->update_flags.bits.out_tf) { + block_sequence[*num_steps].params.set_output_transfer_func_params.dc = dc; + block_sequence[*num_steps].params.set_output_transfer_func_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_output_transfer_func_params.stream = current_mpc_pipe->stream; + block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC; + (*num_steps)++; + } + + current_mpc_pipe = current_mpc_pipe->bottom_pipe; + } + current_pipe = current_pipe->next_odm_pipe; + } + + if (dc->hwss.pipe_control_lock) { + block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; + block_sequence[*num_steps].params.pipe_control_lock_params.lock = false; + block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*num_steps)++; + } + if (dc->hwss.subvp_pipe_control_lock_fast) { + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; + (*num_steps)++; + } + + current_pipe = pipe_ctx; + while (current_pipe) { + current_mpc_pipe = current_pipe; + + while (current_mpc_pipe) { + if (!current_mpc_pipe->bottom_pipe && !pipe_ctx->next_odm_pipe && + current_mpc_pipe->stream && current_mpc_pipe->plane_state && + current_mpc_pipe->plane_state->update_flags.bits.addr_update && + !current_mpc_pipe->plane_state->skip_manual_trigger) { + block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; + (*num_steps)++; + } + current_mpc_pipe = current_mpc_pipe->bottom_pipe; + } + current_pipe = current_pipe->next_odm_pipe; + } +} + +void hwss_execute_sequence(struct dc *dc, + struct block_sequence block_sequence[], + int num_steps) +{ + unsigned int i; + union block_sequence_params *params; + struct dce_hwseq *hws = dc->hwseq; + + for (i = 0; i < num_steps; i++) { + params = &(block_sequence[i].params); + switch (block_sequence[i].func) { + + case DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST: + dc->hwss.subvp_pipe_control_lock_fast(params); + break; + case OPTC_PIPE_CONTROL_LOCK: + dc->hwss.pipe_control_lock(params->pipe_control_lock_params.dc, + params->pipe_control_lock_params.pipe_ctx, + params->pipe_control_lock_params.lock); + break; + case HUBP_SET_FLIP_CONTROL_GSL: + dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx, + params->set_flip_control_gsl_params.flip_immediate); + break; + case HUBP_PROGRAM_TRIPLEBUFFER: + dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc, + params->program_triplebuffer_params.pipe_ctx, + params->program_triplebuffer_params.enableTripleBuffer); + break; + case HUBP_UPDATE_PLANE_ADDR: + dc->hwss.update_plane_addr(params->update_plane_addr_params.dc, + params->update_plane_addr_params.pipe_ctx); + break; + case DPP_SET_INPUT_TRANSFER_FUNC: + hws->funcs.set_input_transfer_func(params->set_input_transfer_func_params.dc, + params->set_input_transfer_func_params.pipe_ctx, + params->set_input_transfer_func_params.plane_state); + break; + case DPP_PROGRAM_GAMUT_REMAP: + dc->hwss.program_gamut_remap(params->program_gamut_remap_params.pipe_ctx); + break; + case DPP_SETUP_DPP: + hwss_setup_dpp(params); + break; + case DPP_PROGRAM_BIAS_AND_SCALE: + hwss_program_bias_and_scale(params); + break; + case OPTC_PROGRAM_MANUAL_TRIGGER: + hwss_program_manual_trigger(params); + break; + case DPP_SET_OUTPUT_TRANSFER_FUNC: + hws->funcs.set_output_transfer_func(params->set_output_transfer_func_params.dc, + params->set_output_transfer_func_params.pipe_ctx, + params->set_output_transfer_func_params.stream); + break; + case MPC_UPDATE_VISUAL_CONFIRM: + dc->hwss.update_visual_confirm_color(params->update_visual_confirm_params.dc, + params->update_visual_confirm_params.pipe_ctx, + params->update_visual_confirm_params.mpcc_id); + break; + case DMUB_SEND_DMCUB_CMD: + hwss_send_dmcub_cmd(params); + break; + default: + ASSERT(false); + break; + } + } +} + +void hwss_send_dmcub_cmd(union block_sequence_params *params) +{ + struct dc_context *ctx = params->send_dmcub_cmd_params.ctx; + union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd; + enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type; + + dm_execute_dmub_cmd(ctx, cmd, wait_type); +} + +void hwss_program_manual_trigger(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx; + + if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) + pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); +} + +void hwss_setup_dpp(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->setup_dpp_params.pipe_ctx; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + + if (dpp && dpp->funcs->dpp_setup) { + // program the input csc + dpp->funcs->dpp_setup(dpp, + plane_state->format, + EXPANSION_MODE_ZERO, + plane_state->input_csc_color_matrix, + plane_state->color_space, + NULL); + } +} + +void hwss_program_bias_and_scale(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->program_bias_and_scale_params.pipe_ctx; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct dc_bias_and_scale bns_params = {0}; + + //TODO :for CNVC set scale and bias registers if necessary + build_prescale_params(&bns_params, plane_state); + if (dpp->funcs->dpp_program_bias_and_scale) + dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); +} + void get_mclk_switch_visual_confirm_color( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7fd9f5a9e191..7ded574f84ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -896,6 +896,7 @@ struct dc_debug_options { bool disable_dp_plus_plus_wa; uint32_t fpo_vactive_min_active_margin_us; uint32_t fpo_vactive_max_blank_us; + bool enable_legacy_fast_update; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 54805802cbd5..42e9b6a529f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -401,6 +401,10 @@ static const struct dc_plane_cap plane_cap = { } }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + #define CTX ctx #define REG(reg) mm ## reg @@ -1071,6 +1075,7 @@ static bool dce100_resource_construct( dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index a4a45a6ce61e..46eca5a21e1c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -424,6 +424,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_plane_cap underlay_plane_cap = { .type = DC_PLANE_TYPE_DCE_UNDERLAY, .per_pixel_alpha = 1, @@ -1368,6 +1372,7 @@ static bool dce110_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index e179e80667d1..808855886183 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -429,6 +429,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + #define CTX ctx #define REG(reg) mm ## reg @@ -1239,6 +1243,7 @@ static bool dce112_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index af631085e88c..18c5a86d2d61 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -526,6 +526,7 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults = { .disable_clock_gate = true, + .enable_legacy_fast_update = true, }; static struct clock_source *dce120_clock_source_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 5825e6f412bd..3935fd455f0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -418,6 +418,10 @@ static const struct dc_plane_cap plane_cap = { } }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE80_REG_LIST() }; @@ -969,6 +973,7 @@ static bool dce80_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * @@ -1369,6 +1374,7 @@ static bool dce83_construct( dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 26ddf73fd5b1..4b02f8443534 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -553,6 +553,7 @@ static const struct dc_debug_options debug_defaults_drv = { .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 6ef7e2634991..4cc8de2627ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -722,6 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; void dcn20_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 5ff09bf4bc30..fdba8a9f5c30 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -613,6 +613,7 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = false, + .enable_legacy_fast_update = true, }; static void dcn201_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index da6d42de0554..d693ea42d033 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -653,6 +653,7 @@ static const struct dc_debug_options debug_defaults_drv = { .usbc_combo_phy_reset_wa = true, .dmub_command_table = true, .use_max_lb = true, + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index f2d589c505a8..fc33b5fcabe1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -887,6 +887,7 @@ static const struct dc_debug_options debug_defaults_drv = { } }, .disable_z10 = true, + .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 8570bdc292b4..0cc853964781 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -887,6 +887,7 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, + .enable_legacy_fast_update = true, .psr_power_use_phy_fsm = 0, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index b6e0aa2ab27a..707cf28bbceb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -884,6 +884,7 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 7f5cd8c8d49b..3f11992e380b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -409,6 +409,30 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, } } +void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params) +{ + struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc; + bool lock = params->subvp_pipe_control_lock_fast_params.lock; + struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx; + bool subvp_immediate_flip = false; + + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) { + if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN && + pipe_ctx->plane_state->flip_immediate) + subvp_immediate_flip = true; + } + + // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. + if (subvp_immediate_flip) { + union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; + + hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; + hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; + hw_lock_cmd.bits.lock = lock; + hw_lock_cmd.bits.should_release = !lock; + dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); + } +} bool dcn32_set_mpc_shaper_3dlut( struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h index 6dbe929cf599..2d2628f31bed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h @@ -84,6 +84,8 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, struct pipe_ctx *top_pipe_to_program, bool subvp_prev_use); +void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params); + void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 8356b31e1d9a..c2490e16a66a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .enable_phantom_streams = dcn32_enable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index d8dd143cf6ea..034610b74a37 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -37,6 +37,7 @@ #include "dwb.h" #include "mcif_wb.h" #include "panel_cntl.h" +#include "dmub/inc/dmub_cmd.h" #define MAX_CLOCK_SOURCES 7 #define MAX_SVP_PHANTOM_STREAMS 2 @@ -499,6 +500,11 @@ struct bw_context { struct display_mode_lib dml; }; +struct dc_dmub_cmd { + union dmub_rb_cmd dmub_cmd; + enum dm_dmub_wait_type wait_type; +}; + /** * struct dc_state - The full description of a state requested by users */ @@ -547,6 +553,11 @@ struct dc_state { */ struct bw_context bw_ctx; + struct block_sequence block_sequence[50]; + unsigned int block_sequence_steps; + struct dc_dmub_cmd dc_dmub_cmd[10]; + unsigned int dmub_cmd_count; + /** * @refcount: refcount reference * diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index df160c6a630c..cc0a3a992f7b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -44,6 +44,112 @@ struct dc_virtual_addr_space_config; struct dpp; struct dce_hwseq; struct link_resource; +struct dc_dmub_cmd; + +struct subvp_pipe_control_lock_fast_params { + struct dc *dc; + bool lock; + struct pipe_ctx *pipe_ctx; +}; + +struct pipe_control_lock_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + bool lock; +}; + +struct set_flip_control_gsl_params { + struct pipe_ctx *pipe_ctx; + bool flip_immediate; +}; + +struct program_triplebuffer_params { + const struct dc *dc; + struct pipe_ctx *pipe_ctx; + bool enableTripleBuffer; +}; + +struct update_plane_addr_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_input_transfer_func_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_plane_state *plane_state; +}; + +struct program_gamut_remap_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_manual_trigger_params { + struct pipe_ctx *pipe_ctx; +}; + +struct send_dmcub_cmd_params { + struct dc_context *ctx; + union dmub_rb_cmd *cmd; + enum dm_dmub_wait_type wait_type; +}; + +struct setup_dpp_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_bias_and_scale_params { + struct pipe_ctx *pipe_ctx; +}; + +struct set_output_transfer_func_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + const struct dc_stream_state *stream; +}; + +struct update_visual_confirm_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + int mpcc_id; +}; + +union block_sequence_params { + struct update_plane_addr_params update_plane_addr_params; + struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params; + struct pipe_control_lock_params pipe_control_lock_params; + struct set_flip_control_gsl_params set_flip_control_gsl_params; + struct program_triplebuffer_params program_triplebuffer_params; + struct set_input_transfer_func_params set_input_transfer_func_params; + struct program_gamut_remap_params program_gamut_remap_params; + struct program_manual_trigger_params program_manual_trigger_params; + struct send_dmcub_cmd_params send_dmcub_cmd_params; + struct setup_dpp_params setup_dpp_params; + struct program_bias_and_scale_params program_bias_and_scale_params; + struct set_output_transfer_func_params set_output_transfer_func_params; + struct update_visual_confirm_params update_visual_confirm_params; +}; + +enum block_sequence_func { + DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST = 0, + OPTC_PIPE_CONTROL_LOCK, + HUBP_SET_FLIP_CONTROL_GSL, + HUBP_PROGRAM_TRIPLEBUFFER, + HUBP_UPDATE_PLANE_ADDR, + DPP_SET_INPUT_TRANSFER_FUNC, + DPP_PROGRAM_GAMUT_REMAP, + OPTC_PROGRAM_MANUAL_TRIGGER, + DMUB_SEND_DMCUB_CMD, + DPP_SETUP_DPP, + DPP_PROGRAM_BIAS_AND_SCALE, + DPP_SET_OUTPUT_TRANSFER_FUNC, + MPC_UPDATE_VISUAL_CONFIRM, +}; + +struct block_sequence { + union block_sequence_params params; + enum block_sequence_func func; +}; struct hw_sequencer_funcs { void (*hardware_release)(struct dc *dc); @@ -252,6 +358,7 @@ struct hw_sequencer_funcs { const struct tg_color *solid_color, int width, int height, int offset); + void (*subvp_pipe_control_lock_fast)(union block_sequence_params *params); void (*z10_restore)(const struct dc *dc); void (*z10_save_init)(struct dc *dc); @@ -313,4 +420,23 @@ void get_mclk_switch_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void hwss_execute_sequence(struct dc *dc, + struct block_sequence block_sequence[], + int num_steps); + +void hwss_build_fast_sequence(struct dc *dc, + struct dc_dmub_cmd *dc_dmub_cmd, + unsigned int dmub_cmd_count, + struct block_sequence block_sequence[], + int *num_steps, + struct pipe_ctx *pipe_ctx); + +void hwss_send_dmcub_cmd(union block_sequence_params *params); + +void hwss_program_manual_trigger(union block_sequence_params *params); + +void hwss_setup_dpp(union block_sequence_params *params); + +void hwss_program_bias_and_scale(union block_sequence_params *params); + #endif /* __DC_HW_SEQUENCER_H__ */ -- cgit v1.2.3