From: Rodrigo Siqueira <rodrigo.sique...@amd.com>

This commit replaces spaces with tabs in multiple functions and adjusts
the indentation in some other parts of the code to improve readability.

Reviewed-by: Aurabindo Pillai <aurabindo.pil...@amd.com>
Signed-off-by: Rodrigo Siqueira <rodrigo.sique...@amd.com>
---
 .../drm/amd/display/dc/dcn32/dcn32_resource.c |  44 ++---
 .../drm/amd/display/dc/dml/dcn20/dcn20_fpu.c  | 170 +++++++++---------
 2 files changed, 109 insertions(+), 105 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index e30d1f60695d..0beb11d95eb7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -324,7 +324,6 @@ static const struct dcn10_link_enc_shift le_shift = {
 
 static const struct dcn10_link_enc_mask le_mask = {
        LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \
-
        //DPCS_DCN31_MASK_SH_LIST(_MASK)
 };
 
@@ -2093,27 +2092,28 @@ static bool dcn32_resource_construct(
        uint32_t pipe_fuses = 0;
        uint32_t num_pipes  = 4;
 
-       #undef REG_STRUCT
-       #define REG_STRUCT bios_regs
-               bios_regs_init();
-
-       #undef REG_STRUCT
-       #define REG_STRUCT clk_src_regs
-               clk_src_regs_init(0, A),
-               clk_src_regs_init(1, B),
-               clk_src_regs_init(2, C),
-               clk_src_regs_init(3, D),
-               clk_src_regs_init(4, E);
-       #undef REG_STRUCT
-       #define REG_STRUCT abm_regs
-               abm_regs_init(0),
-               abm_regs_init(1),
-               abm_regs_init(2),
-               abm_regs_init(3);
-
-       #undef REG_STRUCT
-       #define REG_STRUCT dccg_regs
-               dccg_regs_init();
+#undef REG_STRUCT
+#define REG_STRUCT bios_regs
+       bios_regs_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT clk_src_regs
+       clk_src_regs_init(0, A),
+       clk_src_regs_init(1, B),
+       clk_src_regs_init(2, C),
+       clk_src_regs_init(3, D),
+       clk_src_regs_init(4, E);
+
+#undef REG_STRUCT
+#define REG_STRUCT abm_regs
+       abm_regs_init(0),
+       abm_regs_init(1),
+       abm_regs_init(2),
+       abm_regs_init(3);
+
+#undef REG_STRUCT
+#define REG_STRUCT dccg_regs
+       dccg_regs_init();
 
        DC_FP_START();
 
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 38d1f2be8cf3..6e32dc68f7bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -917,19 +917,19 @@ void dcn20_populate_dml_writeback_from_context(struct dc 
*dc,
 }
 
 void dcn20_fpu_set_wb_arb_params(struct mcif_arb_params *wb_arb_params,
-                                struct dc_state *context,
-                                display_e2e_pipe_params_st *pipes,
-                                int pipe_cnt, int i)
+                                struct dc_state *context,
+                                display_e2e_pipe_params_st *pipes,
+                                int pipe_cnt, int i)
 {
-       int k;
+       int k;
 
-       dc_assert_fp_enabled();
+       dc_assert_fp_enabled();
 
-       for (k = 0; k < 
sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); 
k++) {
-               wb_arb_params->cli_watermark[k] = 
get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               wb_arb_params->pstate_watermark[k] = 
get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 
1000;
-       }
-       wb_arb_params->time_per_pixel = 16.0 * 1000 / 
(context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, 
ms */
+       for (k = 0; k < 
sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); 
k++) {
+               wb_arb_params->cli_watermark[k] = 
get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               wb_arb_params->pstate_watermark[k] = 
get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 
1000;
+       }
+       wb_arb_params->time_per_pixel = 16.0 * 1000 / 
(context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, 
ms */
 }
 
 static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
@@ -1037,11 +1037,11 @@ static void dcn20_adjust_freesync_v_startup(
        *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : 
*vstartup_start);
 }
 
-void dcn20_calculate_dlg_params(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt,
-               int vlevel)
+void dcn20_calculate_dlg_params(struct dc *dc,
+                               struct dc_state *context,
+                               display_e2e_pipe_params_st *pipes,
+                               int pipe_cnt,
+                               int vlevel)
 {
        int i, pipe_idx;
 
@@ -1083,6 +1083,7 @@ void dcn20_calculate_dlg_params(
                pipes[pipe_idx].pipe.dest.vupdate_offset = 
get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
                pipes[pipe_idx].pipe.dest.vupdate_width = 
get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
                pipes[pipe_idx].pipe.dest.vready_offset = 
get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
                if 
(context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) 
{
                        // Phantom pipe requires that DET_SIZE = 0 and no 
unbounded requests
                        context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
@@ -1091,6 +1092,7 @@ void dcn20_calculate_dlg_params(
                        context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 
context->bw_ctx.dml.ip.det_buffer_size_kbytes;
                        context->res_ctx.pipe_ctx[i].unbounded_req = 
pipes[pipe_idx].pipe.src.unbounded_req_mode;
                }
+
                if (context->bw_ctx.bw.dcn.clk.dppclk_khz < 
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
                        context->bw_ctx.bw.dcn.clk.dppclk_khz = 
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
                context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
@@ -1118,6 +1120,7 @@ void dcn20_calculate_dlg_params(
                if (!context->res_ctx.pipe_ctx[i].stream)
                        continue;
 
+               /* cstate disabled on 201 */
                if (dc->ctx->dce_version == DCN_VERSION_2_01)
                        cstate_en = false;
 
@@ -1201,11 +1204,10 @@ static void swizzle_to_dml_params(
        }
 }
 
-int dcn20_populate_dml_pipes_from_context(
-               struct dc *dc,
-               struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               bool fast_validate)
+int dcn20_populate_dml_pipes_from_context(struct dc *dc,
+                                         struct dc_state *context,
+                                         display_e2e_pipe_params_st *pipes,
+                                         bool fast_validate)
 {
        int pipe_cnt, i;
        bool synchronized_vblank = true;
@@ -1507,6 +1509,7 @@ int dcn20_populate_dml_pipes_from_context(
                        default:
                                break;
                        }
+
                        pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
                        pipes[pipe_cnt].pipe.src.viewport_y_c = 
scl->viewport_c.y;
                        pipes[pipe_cnt].pipe.src.viewport_x_y = scl->viewport.x;
@@ -1615,13 +1618,12 @@ int dcn20_populate_dml_pipes_from_context(
        return pipe_cnt;
 }
 
-void dcn20_calculate_wm(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int *out_pipe_cnt,
-               int *pipe_split_from,
-               int vlevel,
-               bool fast_validate)
+void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
+                       display_e2e_pipe_params_st *pipes,
+                       int *out_pipe_cnt,
+                       int *pipe_split_from,
+                       int vlevel,
+                       bool fast_validate)
 {
        int pipe_cnt, i, pipe_idx;
 
@@ -1733,8 +1735,11 @@ void dcn20_calculate_wm(
        context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
 }
 
-void dcn20_update_bounding_box(struct dc *dc, struct 
_vcs_dpi_soc_bounding_box_st *bb,
-               struct pp_smu_nv_clock_table *max_clocks, unsigned int 
*uclk_states, unsigned int num_states)
+void dcn20_update_bounding_box(struct dc *dc,
+                              struct _vcs_dpi_soc_bounding_box_st *bb,
+                              struct pp_smu_nv_clock_table *max_clocks,
+                              unsigned int *uclk_states,
+                              unsigned int num_states)
 {
        int num_calculated_states = 0;
        int min_dcfclk = 0;
@@ -1796,9 +1801,8 @@ void dcn20_update_bounding_box(struct dc *dc, struct 
_vcs_dpi_soc_bounding_box_s
        bb->clock_limits[num_calculated_states].state = bb->num_states;
 }
 
-void dcn20_cap_soc_clocks(
-               struct _vcs_dpi_soc_bounding_box_st *bb,
-               struct pp_smu_nv_clock_table max_clocks)
+void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
+                         struct pp_smu_nv_clock_table max_clocks)
 {
        int i;
 
@@ -1954,80 +1958,80 @@ static bool dcn20_validate_bandwidth_internal(struct dc 
*dc, struct dc_state *co
 }
 
 bool dcn20_validate_bandwidth_fp(struct dc *dc,
-                                struct dc_state *context,
-                                bool fast_validate)
+                                struct dc_state *context,
+                                bool fast_validate)
 {
-       bool voltage_supported = false;
-       bool full_pstate_supported = false;
-       bool dummy_pstate_supported = false;
-       double p_state_latency_us;
+       bool voltage_supported = false;
+       bool full_pstate_supported = false;
+       bool dummy_pstate_supported = false;
+       double p_state_latency_us;
 
-       dc_assert_fp_enabled();
+       dc_assert_fp_enabled();
 
-       p_state_latency_us = 
context->bw_ctx.dml.soc.dram_clock_change_latency_us;
-       context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
-               dc->debug.disable_dram_clock_change_vactive_support;
-       context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
-               dc->debug.enable_dram_clock_change_one_display_vactive;
+       p_state_latency_us = 
context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+       context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
+               dc->debug.disable_dram_clock_change_vactive_support;
+       context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
+               dc->debug.enable_dram_clock_change_one_display_vactive;
 
-       /*Unsafe due to current pipe merge and split logic*/
-       ASSERT(context != dc->current_state);
+       /*Unsafe due to current pipe merge and split logic*/
+       ASSERT(context != dc->current_state);
 
-       if (fast_validate) {
-               return dcn20_validate_bandwidth_internal(dc, context, true);
-       }
+       if (fast_validate) {
+               return dcn20_validate_bandwidth_internal(dc, context, true);
+       }
 
-       // Best case, we support full UCLK switch latency
-       voltage_supported = dcn20_validate_bandwidth_internal(dc, context, 
false);
-       full_pstate_supported = 
context->bw_ctx.bw.dcn.clk.p_state_change_support;
+       // Best case, we support full UCLK switch latency
+       voltage_supported = dcn20_validate_bandwidth_internal(dc, context, 
false);
+       full_pstate_supported = 
context->bw_ctx.bw.dcn.clk.p_state_change_support;
 
-       if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
-               (voltage_supported && full_pstate_supported)) {
-               context->bw_ctx.bw.dcn.clk.p_state_change_support = 
full_pstate_supported;
-               goto restore_dml_state;
-       }
+       if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
+               (voltage_supported && full_pstate_supported)) {
+               context->bw_ctx.bw.dcn.clk.p_state_change_support = 
full_pstate_supported;
+               goto restore_dml_state;
+       }
 
-       // Fallback: Try to only support G6 temperature read latency
-       context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+       // Fallback: Try to only support G6 temperature read latency
+       context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
context->bw_ctx.dml.soc.dummy_pstate_latency_us;
 
-       voltage_supported = dcn20_validate_bandwidth_internal(dc, context, 
false);
-       dummy_pstate_supported = 
context->bw_ctx.bw.dcn.clk.p_state_change_support;
+       voltage_supported = dcn20_validate_bandwidth_internal(dc, context, 
false);
+       dummy_pstate_supported = 
context->bw_ctx.bw.dcn.clk.p_state_change_support;
 
-       if (voltage_supported && (dummy_pstate_supported || 
!(context->stream_count))) {
-               context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
-               goto restore_dml_state;
-       }
+       if (voltage_supported && (dummy_pstate_supported || 
!(context->stream_count))) {
+               context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+               goto restore_dml_state;
+       }
 
-       // ERROR: fallback is supposed to always work.
-       ASSERT(false);
+       // ERROR: fallback is supposed to always work.
+       ASSERT(false);
 
 restore_dml_state:
-       context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
p_state_latency_us;
-       return voltage_supported;
+       context->bw_ctx.dml.soc.dram_clock_change_latency_us = 
p_state_latency_us;
+       return voltage_supported;
 }
 
 void dcn20_fpu_set_wm_ranges(int i,
-                            struct pp_smu_wm_range_sets *ranges,
-                            struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
+                            struct pp_smu_wm_range_sets *ranges,
+                            struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
 {
-       dc_assert_fp_enabled();
+       dc_assert_fp_enabled();
 
-       ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? 
(loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
-       ranges->reader_wm_sets[i].max_fill_clk_mhz = 
loaded_bb->clock_limits[i].dram_speed_mts / 16;
+       ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? 
(loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
+       ranges->reader_wm_sets[i].max_fill_clk_mhz = 
loaded_bb->clock_limits[i].dram_speed_mts / 16;
 }
 
 void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
-                            int vlevel,
-                            int max_mpc_comb,
-                            int pipe_idx,
-                            bool is_validating_bw)
+                            int vlevel,
+                            int max_mpc_comb,
+                            int pipe_idx,
+                            bool is_validating_bw)
 {
-       dc_assert_fp_enabled();
+       dc_assert_fp_enabled();
 
-       if (is_validating_bw)
-               v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] *= 2;
-       else
-               v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
+       if (is_validating_bw)
+               v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] *= 2;
+       else
+               v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
 }
 
 int dcn21_populate_dml_pipes_from_context(struct dc *dc,
-- 
2.34.1

Reply via email to