As part of the FAMS work, we need code infrastructure in DC.
dcn30_fpu.c changes went missing during previous upstream
activity.

Reviewed-by: Rodrigo Siqueira <rodrigo.sique...@amd.com>
Acked-by: Qingqing Zhuo <qingqing.z...@amd.com>
Signed-off-by: Qingqing Zhuo <qingqing.z...@amd.com>
---
 .../drm/amd/display/dc/dml/dcn30/dcn30_fpu.c  | 53 ++++++++++++++++---
 1 file changed, 46 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index 4fa636364793..ecfa395f5fa8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -384,9 +384,34 @@ void dcn30_fpu_calculate_wm_and_dlg(
        int i, pipe_idx;
        double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
        bool pstate_en = 
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != 
dm_dram_clock_change_unsupported;
+       unsigned int dummy_latency_index = 0;
 
        dc_assert_fp_enabled();
 
+       context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+
+       if (!pstate_en) {
+               /* only when the mclk switch can not be natural, is the fw 
based vblank stretch attempted */
+               context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching =
+                       
dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
+
+               if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+                       dummy_latency_index = 
dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
+                               context, pipes, pipe_cnt, vlevel);
+
+                       /* After calling 
dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
+                        * we reinstate the original 
dram_clock_change_latency_us on the context
+                        * and all variables that may have changed up to this 
point, except the
+                        * newly found dummy_latency_index
+                        */
+                       context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+                       dcn30_internal_validate_bw(dc, context, pipes, 
&pipe_cnt, &vlevel, false, true);
+                       maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
+                       dcfclk = 
context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+                       pstate_en = 
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != 
dm_dram_clock_change_unsupported;
+               }
+       }
+
        if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
                dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
 
@@ -449,15 +474,29 @@ void dcn30_fpu_calculate_wm_and_dlg(
                unsigned int min_dram_speed_mts = 
context->bw_ctx.dml.vba.DRAMSpeed;
                unsigned int min_dram_speed_mts_margin = 160;
 
-               if 
(context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
 == dm_dram_clock_change_unsupported)
-                       min_dram_speed_mts = 
dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries
 - 1].memclk_mhz * 16;
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+                       
dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
 
-               /* find largest table entry that is lower than dram speed, but 
lower than DPM0 still uses DPM0 */
-               for (i = 3; i > 0; i--)
-                       if (min_dram_speed_mts + min_dram_speed_mts_margin > 
dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
-                               break;
+               if 
(context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] ==
+                       dm_dram_clock_change_unsupported) {
+                       int min_dram_speed_mts_offset = 
dc->clk_mgr->bw_params->clk_table.num_entries - 1;
 
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+                       min_dram_speed_mts =
+                               
dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz 
* 16;
+               }
+
+               if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+                       /* find largest table entry that is lower than dram 
speed,
+                        * but lower than DPM0 still uses DPM0
+                        */
+                       for (dummy_latency_index = 3; dummy_latency_index > 0; 
dummy_latency_index--)
+                               if (min_dram_speed_mts + 
min_dram_speed_mts_margin >
+                                       
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dram_speed_mts)
+                                       break;
+               }
+
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+                       
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
 
                context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
                context->bw_ctx.dml.soc.sr_exit_time_us = 
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
-- 
2.34.1

Reply via email to