[PATCH 33/33] drm/amd/display: Enable mem low power control for DCN3.1 sub-IP blocks

2021-09-08 Thread Mikita Lipski
From: Michael Strauss 

[WHY]
Sequences to handle powering down these sub-IP blocks are now ready for use

Reviewed-by: Eric Yang 
Acked-by: Mikita Lipski 
Signed-off-by: Michael Strauss 
---
 .../drm/amd/display/dc/dcn31/dcn31_resource.c| 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index cf6392eadaf2..613d34bde7dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1009,15 +1009,15 @@ static const struct dc_debug_options debug_defaults_drv 
= {
.use_max_lb = true,
.enable_mem_low_power = {
.bits = {
-   .vga = false,
-   .i2c = false,
+   .vga = true,
+   .i2c = true,
.dmcu = false, // This is previously known to cause 
hang on S3 cycles if enabled
-   .dscl = false,
-   .cm = false,
-   .mpc = false,
-   .optc = false,
-   .vpg = false,
-   .afmt = false,
+   .dscl = true,
+   .cm = true,
+   .mpc = true,
+   .optc = true,
+   .vpg = true,
+   .afmt = true,
}
},
.optimize_edp_link_rate = true,
-- 
2.25.1



[PATCH 32/33] drm/amd/display: remove force_enable_edp_fec param.

2021-09-08 Thread Mikita Lipski
From: Ian Chen 

Reviewed-by: Wenjing Liu 
Acked-by: Mikita Lipski 
Signed-off-by: Ian Chen 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 ++--
 drivers/gpu/drm/amd/display/dc/dc.h   | 2 --
 2 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 746b31ba2708..4f4992f5bfd9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -4282,8 +4282,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) ||
-   (link->connector_signal == SIGNAL_TYPE_EDP &&
-   link->dc->debug.force_enable_edp_fec == 
false)) // Disable FEC for eDP
+   (link->connector_signal == SIGNAL_TYPE_EDP
+   ))
is_fec_disable = true;
 
if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && 
!is_fec_disable)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 15b67239266e..8897750bdaea 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -641,8 +641,6 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool optimize_edp_link_rate; /* eDP ILR */
-   /* force enable edp FEC */
-   bool force_enable_edp_fec;
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
-- 
2.25.1



[PATCH 31/33] drm/amd/display: Add VPG and AFMT low power support for DCN3.1

2021-09-08 Thread Mikita Lipski
From: Michael Strauss 

[WHY]
Power down VPG and AFMT blocks when not in use

[HOW]
Create afmt31 and vpg31 structs and add necessary fields to reg list

Reviewed-by: Eric Yang 
Acked-by: Mikita Lipski 
Signed-off-by: Michael Strauss 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  |  10 ++
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  17 ++
 drivers/gpu/drm/amd/display/dc/dc.h   |   2 +
 .../display/dc/dcn10/dcn10_stream_encoder.c   |  10 ++
 .../gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c |  24 ++-
 .../gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h |  24 +++
 .../dc/dcn30/dcn30_dio_stream_encoder.c   |   2 +
 .../gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c  |   2 +-
 .../gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h  |  11 ++
 drivers/gpu/drm/amd/display/dc/dcn31/Makefile |   3 +-
 .../gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c |  92 ++
 .../gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h | 126 ++
 .../drm/amd/display/dc/dcn31/dcn31_resource.c |  50 +++---
 .../gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c  |  87 ++
 .../gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h  | 162 ++
 15 files changed, 593 insertions(+), 29 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c
 create mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h
 create mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
 create mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8bebfb0ca206..e66ca19e2f73 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -71,6 +71,8 @@
 
 #include "dmub/dmub_srv.h"
 
+#include "dcn30/dcn30_vpg.h"
+
 #include "i2caux_interface.h"
 #include "dce/dmub_hw_lock_mgr.h"
 
@@ -2555,6 +2557,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
enum surface_update_type update_type,
struct dc_state *context)
 {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   struct vpg *vpg;
+#endif
int j;
 
// Stream updates
@@ -2575,6 +2580,11 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vrr_infopacket ||
stream_update->vsc_infopacket ||
stream_update->vsp_infopacket) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   vpg = pipe_ctx->stream_res.stream_enc->vpg;
+   if (vpg && vpg->funcs->vpg_poweron)
+   vpg->funcs->vpg_poweron(vpg);
+#endif
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a87a71b815ad..746b31ba2708 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -51,6 +51,8 @@
 #include "inc/link_enc_cfg.h"
 #include "inc/link_dpcd.h"
 
+#include "dc/dcn30/dcn30_vpg.h"
+
 #define DC_LOGGER_INIT(logger)
 
 #define LINK_INFO(...) \
@@ -3653,6 +3655,7 @@ void core_link_enable_stream(
struct link_encoder *link_enc;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
+   struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
 #endif
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 
@@ -3744,6 +3747,12 @@ void core_link_enable_stream(
 
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   // Enable VPG before building infoframe
+   if (vpg && vpg->funcs->vpg_poweron)
+   vpg->funcs->vpg_poweron(vpg);
+#endif
+
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
 
@@ -3890,6 +3899,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc  *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+#endif
 
if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
@@ -3973,6 +3985,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)

pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, 
OUT_MUX_DIO);
}
 #endif
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   if (vpg &

[PATCH 30/33] drm/amd/display: Revert adding degamma coefficients

2021-09-08 Thread Mikita Lipski
From: Jaehyun Chung 

[Why]
Degamma coefficients are calculated in our degamma formula using
the regamma coefficients. We do not need to add separate degamma
coefficients.

[How]
Remove the change to add separate degamma coefficients.

Reviewed-by: Krunoslav Kovac 
Acked-by: Mikita Lipski 
Signed-off-by: Jaehyun Chung 
---
 .../amd/display/modules/color/color_gamma.c   | 64 ---
 1 file changed, 25 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 2465c88e2879..64a38f08f497 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -54,18 +54,17 @@ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
  * just multiply with 2^gamma which can be computed once, and save the result 
so we
  * recursively compute all the values.
  */
-   
 /*sRGB 709 2.2 2.4 P3*/
-static const int32_t regamma_numerator01[] = { 31308,   18, 0,  0,  0};
-static const int32_t regamma_numerator02[] = { 12920,   4500,   0,  0,  0};
-static const int32_t regamma_numerator03[] = { 55,  99, 0,  0,  0};
-static const int32_t regamma_numerator04[] = { 55,  99, 0,  0,  0};
-static const int32_t regamma_numerator05[] = { 2400,2200,   2200, 2400, 
2600};
-
-static const int32_t degamma_numerator01[] = { 404500,  18, 0,  0,  0};
-static const int32_t degamma_numerator02[] = { 12920,   4500,   0,  0,  0};
-static const int32_t degamma_numerator03[] = { 55,  99, 0,  0,  0};
-static const int32_t degamma_numerator04[] = { 55,  99, 0,  0,  0};
-static const int32_t degamma_numerator05[] = { 2400,2200,   2200, 2400, 
2600};
+
+/*
+ * Regamma coefficients are used for both regamma and degamma. Degamma
+ * coefficients are calculated in our formula using the regamma coefficients.
+ */
+/*sRGB 
709 2.2 2.4 P3*/
+static const int32_t numerator01[] = { 31308,   18, 0,  0,  0};
+static const int32_t numerator02[] = { 12920,   4500,   0,  0,  0};
+static const int32_t numerator03[] = { 55,  99, 0,  0,  0};
+static const int32_t numerator04[] = { 55,  99, 0,  0,  0};
+static const int32_t numerator05[] = { 2400,2200,   2200, 2400, 2600};
 
 /* one-time setup of X points */
 void setup_x_points_distribution(void)
@@ -295,7 +294,7 @@ struct dividers {
 
 
 static bool build_coefficients(struct gamma_coefficients *coefficients,
-   enum dc_transfer_func_predefined type, bool isRegamma)
+   enum dc_transfer_func_predefined type)
 {
 
uint32_t i = 0;
@@ -318,29 +317,16 @@ static bool build_coefficients(struct gamma_coefficients 
*coefficients,
}
 
do {
-   if (isRegamma) {
-   coefficients->a0[i] = dc_fixpt_from_fraction(
-   regamma_numerator01[index], 1000);
-   coefficients->a1[i] = dc_fixpt_from_fraction(
-   regamma_numerator02[index], 1000);
-   coefficients->a2[i] = dc_fixpt_from_fraction(
-   regamma_numerator03[index], 1000);
-   coefficients->a3[i] = dc_fixpt_from_fraction(
-   regamma_numerator04[index], 1000);
-   coefficients->user_gamma[i] = dc_fixpt_from_fraction(
-   regamma_numerator05[index], 1000);
-   } else {
-   coefficients->a0[i] = dc_fixpt_from_fraction(
-   degamma_numerator01[index], 1000);
-   coefficients->a1[i] = dc_fixpt_from_fraction(
-   degamma_numerator02[index], 1000);
-   coefficients->a2[i] = dc_fixpt_from_fraction(
-   degamma_numerator03[index], 1000);
-   coefficients->a3[i] = dc_fixpt_from_fraction(
-   degamma_numerator04[index], 1000);
-   coefficients->user_gamma[i] = dc_fixpt_from_fraction(
-   degamma_numerator05[index], 1000);
-   }
+   coefficients->a0[i] = dc_fixpt_from_fraction(
+   numerator01[index], 1000);
+   coefficients->a1[i] = dc_fixpt_from_fraction(
+   numerator02[index], 1000);
+   coefficients->a2[i] = dc_fixpt_from_fraction(
+   numerator03[index], 1000);
+   coefficients->a3[i] = dc_fixpt_from_fraction(
+   numerator04[index], 1000);
+   coefficients->user_gamma[i] = dc_fixpt_from_fraction(
+  

[PATCH 25/33] drm/amd/display: [FW Promotion] Release 0.0.82

2021-09-08 Thread Mikita Lipski
From: Anthony Koo 

* PSR SMU optimizations
* MST dock fixes

Reviewed-by: Anthony Koo 
Acked-by: Mikita Lipski 
Signed-off-by: Anthony Koo 
---
 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 8cf86f7cda41..2c4ec3cac70e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x8ebc06e16
+#define DMUB_FW_VERSION_GIT_HASH 0x3f002dea8
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 81
+#define DMUB_FW_VERSION_REVISION 82
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
-- 
2.25.1



[PATCH 28/33] drm/amd/display: Fix unstable HPCP compliance on Chrome Barcelo

2021-09-08 Thread Mikita Lipski
From: Qingqing Zhuo 

[Why]
Intermittently, there presents two occurrences of 0 stream
commits in a single HPD event. Current HDCP sequence does
not consider such scenerio, and will thus disable HDCP.

[How]
Add condition check to include stream remove and re-enable
case for HDCP enable.

Reviewed-by: Bhawanpreet Lakha 
Acked-by: Mikita Lipski 
Signed-off-by: Qingqing Zhuo 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 22 +--
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 87499ef5282c..0753bbb3bfc3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8363,8 +8363,26 @@ static bool is_content_protection_different(struct 
drm_connector_state *state,
state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 
-   /* Check if something is connected/enabled, otherwise we start hdcp but 
nothing is connected/enabled
-* hot-plug, headless s3, dpms
+   /* Stream removed and re-enabled
+*
+* Can sometimes overlap with the HPD case,
+* thus set update_hdcp to false to avoid
+* setting HDCP multiple times.
+*
+* Handles: DESIRED -> DESIRED (Special case)
+*/
+   if (!(old_state->crtc && old_state->crtc->enabled) &&
+   state->crtc && state->crtc->enabled &&
+   connector->state->content_protection == 
DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+   dm_con_state->update_hdcp = false;
+   return true;
+   }
+
+   /* Hot-plug, headless s3, dpms
+*
+* Only start HDCP if the display is connected/enabled.
+* update_hdcp flag will be set to false until the next
+* HPD comes in.
 *
 * Handles: DESIRED -> DESIRED (Special case)
 */
-- 
2.25.1



[PATCH 29/33] drm/amd/display: Link training retry fix for abort case

2021-09-08 Thread Mikita Lipski
From: Meenakshikumar Somasundaram 

[Why]
If link training is aborted, it shall be retried if sink is present.

[How]
Check hpd status to find out whether sink is present or not. If sink is
present, then link training shall be tried again with same settings.
Otherwise, link training shall be aborted.

Reviewed-by: Jimmy Kizito 
Acked-by: Mikita Lipski 
Signed-off-by: Meenakshikumar Somasundaram 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 6fc0e12a715a..ac4896ff912c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2434,9 +2434,13 @@ bool perform_link_training_with_retries(
dp_disable_link_phy(link, signal);
 
/* Abort link training if failure due to sink being unplugged. 
*/
-   if (status == LINK_TRAINING_ABORT)
-   break;
-   else if (do_fallback) {
+   if (status == LINK_TRAINING_ABORT) {
+   enum dc_connection_type type = dc_connection_none;
+
+   dc_link_detect_sink(link, );
+   if (type == dc_connection_none)
+   break;
+   } else if (do_fallback) {
decide_fallback_link_setting(*link_setting, 
_setting, status);
/* Fail link training if reduced link bandwidth no 
longer meets
 * stream requirements.
-- 
2.25.1



[PATCH 27/33] drm/amd/display: 3.2.152

2021-09-08 Thread Mikita Lipski
From: Aric Cyr 

* Correct degamma coefficients
* Optimize bandwidth on following fast update
* Fix multiple memory leaks reported by coverity
* Get backlight from PWM if DMCU is not initialized

Reviewed-by: Aric Cyr 
Acked-by: Mikita Lipski 
Signed-off-by: Aric Cyr 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 134faa7a1937..442605354430 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@
 /* forward declaration */
 struct aux_payload;
 
-#define DC_VER "3.2.151"
+#define DC_VER "3.2.152"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.25.1



[PATCH 22/33] drm/amd/display: dsc mst 2 4K displays go dark with 2 lane HBR3

2021-09-08 Thread Mikita Lipski
From: Hersen Wu 

[Why]
call stack of amdgpu dsc mst pbn, slot num calculation is as below:
-compute_bpp_x16_from_target_bandwidth
-decide_dsc_target_bpp_x16
-setup_dsc_config
-dc_dsc_compute_bandwidth_range
-compute_mst_dsc_configs_for_link
-compute_mst_dsc_configs_for_state

from pbn -> dsc target bpp_x16

bpp_x16 is calulated by compute_bpp_x16_from_target_bandwidth.
Beside pixel clock and bpp, num_slices_h and bpp_increment_div
will also affect bpp_x16.

from dsc target bpp_x16 -> pbn

within dm_update_mst_vcpi_slots_for_dsc,
pbn = drm_dp_calc_pbn_mode(clock, bpp_x16, true);

drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
{
  return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
8 * 54 * 1000 * 1000);
}

bpp / 16 trunc digits after decimal point. This will cause calculation
delta. drm_dp_calc_pbn_mode does not have other informations,
like num_slices_h, bpp_increment_div. therefore, it does not do revese
calcuation properly from bpp_x16 to pbn.

pbn from drm_dp_calc_pbn_mode is less than pbn from
compute_mst_dsc_configs_for_state. This cause not enough mst slot
allocated to display. display could not visually light up.

[How]
pass pbn from compute_mst_dsc_configs_for_state to
dm_update_mst_vcpi_slots_for_dsc

Cc: sta...@vger.kernel.org

Reviewed-by: Scott Foster 
Acked-by: Mikita Lipski 
Signed-off-by: Hersen Wu 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 22 ++-
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 18 +++
 .../display/amdgpu_dm/amdgpu_dm_mst_types.h   | 11 +-
 3 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a6c8c30f8c2d..87499ef5282c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7090,14 +7090,15 @@ const struct drm_encoder_helper_funcs 
amdgpu_dm_encoder_helper_funcs = {
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
-   struct dc_state *dc_state)
+   struct dc_state *dc_state,
+   struct dsc_mst_fairness_vars *vars)
 {
struct dc_stream_state *stream = NULL;
struct drm_connector *connector;
struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
-   int i, j, clock, bpp;
+   int i, j, clock;
int vcpi, pbn_div, pbn = 0;
 
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -7136,9 +7137,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct 
drm_atomic_state *state,
}
 
pbn_div = dm_mst_get_pbn_divider(stream->link);
-   bpp = stream->timing.dsc_cfg.bits_per_pixel;
clock = stream->timing.pix_clk_100hz / 10;
-   pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+   /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+   for (j = 0; j < dc_state->stream_count; j++) {
+   if (vars[j].aconnector == aconnector) {
+   pbn = vars[j].pbn;
+   break;
+   }
+   }
+
vcpi = drm_dp_mst_atomic_enable_dsc(state,
aconnector->port,
pbn, pbn_div,
@@ -10542,6 +10549,9 @@ static int amdgpu_dm_atomic_check(struct drm_device 
*dev,
int ret, i;
bool lock_and_validation_needed = false;
struct dm_crtc_state *dm_old_crtc_state;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   struct dsc_mst_fairness_vars vars[MAX_PIPES];
+#endif
 
trace_amdgpu_dm_atomic_check_begin(state);
 
@@ -10772,10 +10782,10 @@ static int amdgpu_dm_atomic_check(struct drm_device 
*dev,
goto fail;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-   if (!compute_mst_dsc_configs_for_state(state, 
dm_state->context))
+   if (!compute_mst_dsc_configs_for_state(state, 
dm_state->context, vars))
goto fail;
 
-   ret = dm_update_mst_vcpi_slots_for_dsc(state, 
dm_state->context);
+   ret = dm_update_mst_vcpi_slots_for_dsc(state, 
dm_state->context, vars);
if (ret)
goto fail;
 #endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 705f2e67edb5..1a99fcc27078 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -518,12 +518,7 @@ struct dsc_mst_fairness_pa

[PATCH 26/33] drm/amd/display: Correct degamma coefficients

2021-09-08 Thread Mikita Lipski
From: Jaehyun Chung 

[Why]
Some incorrect coefficients were being used

Reviewed-by: Michael Strauss 
Acked-by: Mikita Lipski 
Signed-off-by: Jaehyun Chung 
---
 drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 275f11f8bea3..2465c88e2879 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -61,7 +61,7 @@ static const int32_t regamma_numerator03[] = { 55,  99,   
  0,  0,  0};
 static const int32_t regamma_numerator04[] = { 55,  99, 0,  0,  0};
 static const int32_t regamma_numerator05[] = { 2400,2200,   2200, 2400, 
2600};
 
-static const int32_t degamma_numerator01[] = { 40450,   81, 0,  0,  0};
+static const int32_t degamma_numerator01[] = { 404500,  18, 0,  0,  0};
 static const int32_t degamma_numerator02[] = { 12920,   4500,   0,  0,  0};
 static const int32_t degamma_numerator03[] = { 55,  99, 0,  0,  0};
 static const int32_t degamma_numerator04[] = { 55,  99, 0,  0,  0};
-- 
2.25.1



[PATCH 24/33] drm/amd/display: Add helper for blanking all dp displays

2021-09-08 Thread Mikita Lipski
From: "Leo (Hanghong) Ma" 

[Why & How]
The codes to blank all dp display have been called many times,
so add a helper in dc_link to make it more concise.

Reviewed-by: Aric Cyr 
Acked-by: Mikita Lipski 
Signed-off-by: Leo (Hanghong) Ma 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  |  2 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 45 +++
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  1 +
 .../display/dc/dce110/dce110_hw_sequencer.c   | 24 ++
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 41 ++---
 .../drm/amd/display/dc/dcn30/dcn30_hwseq.c| 39 ++--
 .../drm/amd/display/dc/dcn31/dcn31_hwseq.c| 38 ++--
 7 files changed, 59 insertions(+), 131 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7a1f910d711e..8bebfb0ca206 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1,4 +1,4 @@
-/*
+ /*
  * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 46933a43ef2e..a87a71b815ad 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1904,6 +1904,51 @@ static enum dc_status enable_link_dp_mst(
return enable_link_dp(state, pipe_ctx);
 }
 
+void blank_all_dp_displays(struct dc *dc, bool hw_init)
+{
+   unsigned int i, j, fe;
+   uint8_t dpcd_power_state = '\0';
+   enum dc_status status = DC_ERROR_UNEXPECTED;
+
+   for (i = 0; i < dc->link_count; i++) {
+   enum signal_type signal = dc->links[i]->connector_signal;
+
+   if ((signal == SIGNAL_TYPE_EDP) ||
+   (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+   if (hw_init && signal != SIGNAL_TYPE_EDP) {
+   /* DP 2.0 spec requires that we read LTTPR caps 
first */
+   dp_retrieve_lttpr_cap(dc->links[i]);
+   /* if any of the displays are lit up turn them 
off */
+   status = core_link_read_dpcd(dc->links[i], 
DP_SET_POWER,
+   _power_state, 
sizeof(dpcd_power_state));
+   }
+
+   if ((signal != SIGNAL_TYPE_EDP && status == DC_OK && 
dpcd_power_state == DP_POWER_STATE_D0) ||
+   (!hw_init && 
dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc))) {
+   if (dc->links[i]->ep_type == 
DISPLAY_ENDPOINT_PHY &&
+   
dc->links[i]->link_enc->funcs->get_dig_frontend) {
+   fe = 
dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
+   if (fe == ENGINE_ID_UNKNOWN)
+   continue;
+
+   for (j = 0; j < 
dc->res_pool->stream_enc_count; j++) {
+   if (fe == 
dc->res_pool->stream_enc[j]->id) {
+   
dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
+   
dc->res_pool->stream_enc[j]);
+   break;
+   }
+   }
+   }
+
+   if 
(!dc->links[i]->wa_flags.dp_keep_receiver_powered ||
+   (hw_init && signal != SIGNAL_TYPE_EDP))
+   dp_receiver_power_ctrl(dc->links[i], 
false);
+   }
+   }
+   }
+
+}
+
 static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
enum engine_id eng_id,
struct ext_hdmi_settings *settings)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h 
b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 56340a176554..899e20725d87 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -277,6 +277,7 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
struct psr_context *psr_context);
 
 void dc_link_get_psr_residency(const struct dc_link *link, uint32_t 
*residency);
+void blank_all_dp_displays(struct dc *dc, bool hw_init);
 
 /* Request DC to detect if there is a Panel connected.
  * boot - If this call is during initial boot.
diff --git a/dr

[PATCH 23/33] drm/amd/display: Add periodic detection when zstate is enabled

2021-09-08 Thread Mikita Lipski
From: Eric Yang 

[Why]
When system is in Z10 HPD interrupts cannot fire, we may miss display
configuration changes.

[How]
When Zstate is enabled, if DMUB indicate DCN has lost power, do a
complete detection periodically.

Reviewed-by: Nicholas Kazlauskas 
Acked-by: Mikita Lipski 
Signed-off-by: Eric Yang 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c  |  6 ++
 .../drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c   |  2 ++
 drivers/gpu/drm/amd/display/dc/core/dc.c   |  2 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c  |  4 
 drivers/gpu/drm/amd/display/dc/dc.h|  2 +-
 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c |  2 +-
 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h |  2 +-
 drivers/gpu/drm/amd/display/dc/dm_helpers.h|  4 
 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h  |  2 +-
 drivers/gpu/drm/amd/display/dmub/dmub_srv.h|  4 
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c  |  8 
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h  |  2 ++
 drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c| 10 +-
 13 files changed, 44 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 8c1792637836..1aa69dd8e02f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -793,4 +793,10 @@ void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
// FPGA programming for this clock in diags framework that
// needs to go through dm layer, therefore leave dummy interace here
 }
+
+
+void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
+{
+   /* TODO: add peridic detection implementation */
+}
 #endif
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 1414da4b95d7..d7bf9283dc90 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -142,6 +142,7 @@ static void dcn31_update_clocks(struct clk_mgr 
*clk_mgr_base,
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
new_clocks->zstate_support != 
clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, true);
+   dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, 
true);
clk_mgr_base->clks.zstate_support = 
new_clocks->zstate_support;
}
 
@@ -166,6 +167,7 @@ static void dcn31_update_clocks(struct clk_mgr 
*clk_mgr_base,
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
new_clocks->zstate_support != 
clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, false);
+   dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, 
false);
clk_mgr_base->clks.zstate_support = 
new_clocks->zstate_support;
}
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 822239b59a78..7a1f910d711e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1562,7 +1562,7 @@ static uint8_t get_stream_mask(struct dc *dc, struct 
dc_state *context)
 }
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-void dc_z10_restore(struct dc *dc)
+void dc_z10_restore(const struct dc *dc)
 {
if (dc->hwss.z10_restore)
dc->hwss.z10_restore(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 61e49671fed5..46933a43ef2e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1250,6 +1250,10 @@ bool dc_link_detect(struct dc_link *link, enum 
dc_detect_reason reason)
}
}
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   dc_z10_restore(dc);
+#endif
+
/* get out of low power state */
if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT)
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index ece44796a74f..134faa7a1937 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -1363,7 +1363,7 @@ void dc_hardware_release(struct dc *dc);
 
 bool dc_set_psr_allow_active(struct dc *dc, bool enable);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-void dc_z10_restore(struct dc *dc);
+void dc_z10_restore(const struct dc *dc);
 void dc_z10_save_init(

[PATCH 21/33] drm/amd/display: Refine condition of cursor visibility for pipe-split

2021-09-08 Thread Mikita Lipski
From: Dale Zhao 

[Why]
In some scenarios like fullscreen game, major plane is scaled. Then
if a upper layer owns the cursor, cursor is invisiable in the
majority of the screen.

[How]
Instead assuming upper plane handles cursor, summing up upper
split planes on the same layer. If whole upper plane covers current
half/whole pipe plane, disable cursor.

Reviewed-by: Krunoslav Kovac 
Acked-by: Mikita Lipski 
Signed-off-by: Dale Zhao 
---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 43 +++
 1 file changed, 24 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index b4cf2e92694c..e1edbfa761f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3241,13 +3241,11 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct 
dchub_init_data *dh_data)
 
 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
 {
-   struct pipe_ctx *test_pipe;
+   struct pipe_ctx *test_pipe, *split_pipe;
const struct scaler_data *scl_data = _ctx->plane_res.scl_data;
-   const struct rect *r1 = _data->recout, *r2;
-   int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
+   struct rect r1 = scl_data->recout, r2, r2_half;
+   int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
int cur_layer = pipe_ctx->plane_state->layer_index;
-   bool upper_pipe_exists = false;
-   struct fixed31_32 one = dc_fixpt_from_int(1);
 
/**
 * Disable the cursor if there's another pipe above this with a
@@ -3256,26 +3254,33 @@ static bool dcn10_can_pipe_disable_cursor(struct 
pipe_ctx *pipe_ctx)
 */
for (test_pipe = pipe_ctx->top_pipe; test_pipe;
 test_pipe = test_pipe->top_pipe) {
-   if (!test_pipe->plane_state->visible)
+   // Skip invisible layer and pipe-split plane on same layer
+   if (!test_pipe->plane_state->visible || 
test_pipe->plane_state->layer_index == cur_layer)
continue;
 
-   r2 = _pipe->plane_res.scl_data.recout;
-   r2_r = r2->x + r2->width;
-   r2_b = r2->y + r2->height;
+   r2 = test_pipe->plane_res.scl_data.recout;
+   r2_r = r2.x + r2.width;
+   r2_b = r2.y + r2.height;
+   split_pipe = test_pipe;
 
-   if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= 
r2_b)
-   return true;
+   /**
+* There is another half plane on same layer because of
+* pipe-split, merge together per same height.
+*/
+   for (split_pipe = pipe_ctx->top_pipe; split_pipe;
+split_pipe = split_pipe->top_pipe)
+   if (split_pipe->plane_state->layer_index == 
test_pipe->plane_state->layer_index) {
+   r2_half = split_pipe->plane_res.scl_data.recout;
+   r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
+   r2.width = r2.width + r2_half.width;
+   r2_r = r2.x + r2.width;
+   break;
+   }
 
-   if (test_pipe->plane_state->layer_index < cur_layer)
-   upper_pipe_exists = true;
+   if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= 
r2_b)
+   return true;
}
 
-   // if plane scaled, assume an upper plane can handle cursor if it 
exists.
-   if (upper_pipe_exists &&
-   (scl_data->ratios.horz.value != one.value ||
-   scl_data->ratios.vert.value != one.value))
-   return true;
-
return false;
 }
 
-- 
2.25.1



[PATCH 19/33] drm/amd/display: Apply w/a for hard hang on HPD

2021-09-08 Thread Mikita Lipski
From: Qingqing Zhuo 

[Why]
HPD disable and enable sequences are not mutually exclusive
on Linux. For HPDs that spans under 1s (i.e. HPD low = 1s),
part of the disable sequence (specifically, a request to SMU
to lower refclk) could come right before the call to PHY
enablement, causing DMUB to access an irresponsive PHY
and thus a hard hang on the system.

[How]
Disable 48mhz refclk off when there is any HPD status in
connected state.

Reviewed-by: Hersen Wu 
Acked-by: Mikita Lipski 
Signed-off-by: Qingqing Zhuo 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 12 -
 .../display/dc/irq/dcn21/irq_service_dcn21.c  | 25 +++
 .../display/dc/irq/dcn21/irq_service_dcn21.h  |  2 ++
 .../gpu/drm/amd/display/dc/irq/irq_service.c  |  2 +-
 .../gpu/drm/amd/display/dc/irq/irq_service.h  |  4 +++
 5 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index c6f494f0dcea..3fabf32a0558 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -42,6 +42,7 @@
 #include "clk/clk_10_0_2_sh_mask.h"
 #include "renoir_ip_offset.h"
 
+#include "irq/dcn21/irq_service_dcn21.h"
 
 /* Constants */
 
@@ -129,9 +130,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_clocks *new_clocks = >bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
+   int irq_src;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
+   uint32_t hpd_state;
 
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
 
@@ -147,8 +150,15 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
 
display_count = rn_get_active_display_cnt_wa(dc, 
context);
+
+   for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= 
DC_IRQ_SOURCE_HPD5; irq_src++) {
+   hpd_state = 
dal_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src);
+   if (hpd_state)
+   break;
+   }
+
/* if we can go lower, go lower */
-   if (display_count == 0) {
+   if (display_count == 0 && !hpd_state) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, 
DCN_PWR_STATE_LOW_POWER);
/* update power state */
clk_mgr_base->clks.pwr_state = 
DCN_PWR_STATE_LOW_POWER;
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 
b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index ed54e1c819be..685528734575 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -135,6 +135,31 @@ enum dc_irq_source to_dal_irq_source_dcn21(
return DC_IRQ_SOURCE_INVALID;
 }
 
+uint32_t dal_get_hpd_state_dcn21(struct irq_service *irq_service, enum 
dc_irq_source source)
+{
+   const struct irq_source_info *info;
+   uint32_t addr;
+   uint32_t value;
+   uint32_t current_status;
+
+   info = find_irq_source_info(irq_service, source);
+   if (!info)
+   return 0;
+
+   addr = info->status_reg;
+   if (!addr)
+   return 0;
+
+   value = dm_read_reg(irq_service->ctx, addr);
+   current_status =
+   get_reg_field_value(
+   value,
+   HPD0_DC_HPD_INT_STATUS,
+   DC_HPD_SENSE);
+
+   return current_status;
+}
+
 static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h 
b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
index da2bd0e93d7a..3df2ceeb2b70 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
@@ -31,4 +31,6 @@
 struct irq_service *dal_irq_service_dcn21_create(
struct irq_service_init_data *init_data);
 
+uint32_t dal_get_hpd_state_dcn21(struct irq_service *irq_service, enum 
dc_irq_source source);
+
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c 
b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index a2a4fbeb83f8..4db1133e4466 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
*irq_service = NULL;
 }
 
-static const struct irq_source

[PATCH 20/33] drm/amd/display: Optimize bandwidth on following fast update

2021-09-08 Thread Mikita Lipski
From: Nicholas Kazlauskas 

[Why]
The current call to optimize_bandwidth never occurs because flip is
always pending from the FULL and FAST updates.

[How]
Optimize on the following flip when it's a FAST update and we know we
aren't going to be modifying the clocks again.

Reviewed-by: Bhawanpreet Lakha 
Acked-by: Mikita Lipski 
Signed-off-by: Nicholas Kazlauskas 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 17 -
 1 file changed, 8 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index f58d3956f3e2..822239b59a78 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1801,6 +1801,11 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
 
post_surface_trace(dc);
 
+   if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+   TRACE_DCN_CLOCK_STATE(>bw_ctx.bw.dcn.clk);
+   else
+   TRACE_DCE_CLOCK_STATE(>bw_ctx.bw.dce);
+
if (is_flip_pending_in_pipes(dc, context))
return;
 
@@ -2986,6 +2991,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != 
old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
+   } else if (update_type == UPDATE_TYPE_FAST) {
+   /* Previous frame finished and HW is ready for optimization. */
+   dc_post_update_surfaces_to_stream(dc);
}
 
 
@@ -3042,15 +3050,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
pipe_ctx->plane_state->force_full_update = 
false;
}
}
-   /*let's use current_state to update watermark etc*/
-   if (update_type >= UPDATE_TYPE_FULL) {
-   dc_post_update_surfaces_to_stream(dc);
-
-   if (dc_ctx->dce_version >= DCE_VERSION_MAX)
-   TRACE_DCN_CLOCK_STATE(>bw_ctx.bw.dcn.clk);
-   else
-   TRACE_DCE_CLOCK_STATE(>bw_ctx.bw.dce);
-   }
 
return;
 
-- 
2.25.1



[PATCH 17/33] drm/amd/display: Revert "Directly retrain link from debugfs"

2021-09-08 Thread Mikita Lipski
From: Anson Jacob 

This reverts commit  f5b6a20c7ef40599095c796b0500d842ffdbc639.

This patch broke new settings from taking effect. Hotplug is
required for new settings to take effect.

Reviewed-by: Mikita Lipski 
Acked-by: Mikita Lipski 
Signed-off-by: Anson Jacob 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 87daa78a32b8..f3ada9b6be5a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -247,6 +247,7 @@ static ssize_t dp_link_settings_write(struct file *f, const 
char __user *buf,
 {
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dc_link *link = connector->dc_link;
+   struct dc *dc = (struct dc *)link->dc;
struct dc_link_settings prefer_link_settings;
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
@@ -313,7 +314,7 @@ static ssize_t dp_link_settings_write(struct file *f, const 
char __user *buf,
prefer_link_settings.lane_count = param[0];
prefer_link_settings.link_rate = param[1];
 
-   dp_retrain_link_dp_test(link, _link_settings, false);
+   dc_link_set_preferred_training_settings(dc, _link_settings, 
NULL, link, true);
 
kfree(wr_buf);
return size;
-- 
2.25.1



[PATCH 15/33] drm/amd/display: Fix multiple memory leaks reported by coverity

2021-09-08 Thread Mikita Lipski
From: Anson Jacob 

coccinelle patch used:

@@ expression enc1,vpg,afmt; @@
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }

Addresses-Coverity-ID: 1466017: ("Resource leaks")

Reviewed-by: Aurabindo Jayamohanan Pillai 
Acked-by: Mikita Lipski 
Signed-off-by: Anson Jacob 
---
 .../gpu/drm/amd/display/dc/dcn30/dcn30_resource.c|  6 +-
 .../gpu/drm/amd/display/dc/dcn301/dcn301_resource.c  |  6 +-
 .../gpu/drm/amd/display/dc/dcn302/dcn302_resource.c  |  6 +-
 .../gpu/drm/amd/display/dc/dcn31/dcn31_resource.c| 12 ++--
 4 files changed, 25 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 2feffe75ca62..3a8a3214f770 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1164,8 +1164,12 @@ struct stream_encoder *dcn30_stream_encoder_create(
vpg = dcn30_vpg_create(ctx, vpg_inst);
afmt = dcn30_afmt_create(ctx, afmt_inst);
 
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }
 
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 912285fdce18..73b8fcb3c5c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1195,8 +1195,12 @@ struct stream_encoder *dcn301_stream_encoder_create(
vpg = dcn301_vpg_create(ctx, vpg_inst);
afmt = dcn301_afmt_create(ctx, afmt_inst);
 
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }
 
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 5cd55e8573f7..fcf96cf08c76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -542,8 +542,12 @@ static struct stream_encoder 
*dcn302_stream_encoder_create(enum engine_id eng_id
vpg = dcn302_vpg_create(ctx, vpg_inst);
afmt = dcn302_afmt_create(ctx, afmt_inst);
 
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }
 
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, 
vpg, afmt, _enc_regs[eng_id],
_shift, _mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 91cbc0922ad4..e0b93665bf55 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1366,8 +1366,12 @@ static struct stream_encoder 
*dcn31_stream_encoder_create(
vpg = dcn31_vpg_create(ctx, vpg_inst);
afmt = dcn31_afmt_create(ctx, afmt_inst);
 
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }
 
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
@@ -1412,8 +1416,12 @@ static struct hpo_dp_stream_encoder 
*dcn31_hpo_dp_stream_encoder_create(
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
 
-   if (!hpo_dp_enc31 || !vpg || !apg)
+   if (!hpo_dp_enc31 || !vpg || !apg) {
+   kfree(hpo_dp_enc31);
+   kfree(vpg);
+   kfree(apg);
return NULL;
+   }
 
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
-- 
2.25.1



[PATCH 18/33] drm/amd/display: Add regamma/degamma coefficients and set sRGB when TF is BT709

2021-09-08 Thread Mikita Lipski
From: Jaehyun Chung 

[Why]
In YUV case, need to set the input TF to sRGB instead of BT709,
even though the input TF type is distributed. SRGB was not
being used because pixel format was not being set in the
surface update sequence.
Also, we were using the same coefficients for degamma and
regamma formula, causing the cutoff point of the linear
section of the curve to be incorrect.

[How]
Set pixel format in the surface update sequence. Add separate
coefficient arrays for regamma and degamma.

Reviewed-by: Krunoslav Kovac 
Acked-by: Mikita Lipski 
Signed-off-by: Jaehyun Chung 
---
 .../amd/display/modules/color/color_gamma.c   | 60 ---
 1 file changed, 40 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index ef742d95ef05..275f11f8bea3 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -54,12 +54,18 @@ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
  * just multiply with 2^gamma which can be computed once, and save the result 
so we
  * recursively compute all the values.
  */
-   
/*sRGB   709 2.2 2.4 P3*/
-static const int32_t gamma_numerator01[] = { 31308,18, 0,  0,  
0};
-static const int32_t gamma_numerator02[] = { 12920,4500,   0,  0,  
0};
-static const int32_t gamma_numerator03[] = { 55,   99, 0,  
0,  0};
-static const int32_t gamma_numerator04[] = { 55,   99, 0,  
0,  0};
-static const int32_t gamma_numerator05[] = { 2400, 2200,   2200, 2400, 
2600};
+   
 /*sRGB 709 2.2 2.4 P3*/
+static const int32_t regamma_numerator01[] = { 31308,   18, 0,  0,  0};
+static const int32_t regamma_numerator02[] = { 12920,   4500,   0,  0,  0};
+static const int32_t regamma_numerator03[] = { 55,  99, 0,  0,  0};
+static const int32_t regamma_numerator04[] = { 55,  99, 0,  0,  0};
+static const int32_t regamma_numerator05[] = { 2400,2200,   2200, 2400, 
2600};
+
+static const int32_t degamma_numerator01[] = { 40450,   81, 0,  0,  0};
+static const int32_t degamma_numerator02[] = { 12920,   4500,   0,  0,  0};
+static const int32_t degamma_numerator03[] = { 55,  99, 0,  0,  0};
+static const int32_t degamma_numerator04[] = { 55,  99, 0,  0,  0};
+static const int32_t degamma_numerator05[] = { 2400,2200,   2200, 2400, 
2600};
 
 /* one-time setup of X points */
 void setup_x_points_distribution(void)
@@ -288,7 +294,8 @@ struct dividers {
 };
 
 
-static bool build_coefficients(struct gamma_coefficients *coefficients, enum 
dc_transfer_func_predefined type)
+static bool build_coefficients(struct gamma_coefficients *coefficients,
+   enum dc_transfer_func_predefined type, bool isRegamma)
 {
 
uint32_t i = 0;
@@ -311,16 +318,29 @@ static bool build_coefficients(struct gamma_coefficients 
*coefficients, enum dc_
}
 
do {
-   coefficients->a0[i] = dc_fixpt_from_fraction(
-   gamma_numerator01[index], 1000);
-   coefficients->a1[i] = dc_fixpt_from_fraction(
-   gamma_numerator02[index], 1000);
-   coefficients->a2[i] = dc_fixpt_from_fraction(
-   gamma_numerator03[index], 1000);
-   coefficients->a3[i] = dc_fixpt_from_fraction(
-   gamma_numerator04[index], 1000);
-   coefficients->user_gamma[i] = dc_fixpt_from_fraction(
-   gamma_numerator05[index], 1000);
+   if (isRegamma) {
+   coefficients->a0[i] = dc_fixpt_from_fraction(
+   regamma_numerator01[index], 1000);
+   coefficients->a1[i] = dc_fixpt_from_fraction(
+   regamma_numerator02[index], 1000);
+   coefficients->a2[i] = dc_fixpt_from_fraction(
+   regamma_numerator03[index], 1000);
+   coefficients->a3[i] = dc_fixpt_from_fraction(
+   regamma_numerator04[index], 1000);
+   coefficients->user_gamma[i] = dc_fixpt_from_fraction(
+   regamma_numerator05[index], 1000);
+   } else {
+   coefficients->a0[i] = dc_fixpt_from_fraction(
+   degamma_numerator01[index], 1000);
+   coefficients->a1[i] = dc_fixpt_from_fraction(
+   degamma_numerator02[index], 1000);
+   coefficients->a2[i] = dc_fixpt_from_fraction(
+

[PATCH 16/33] drm/amd/display: Get backlight from PWM if DMCU is not initialized

2021-09-08 Thread Mikita Lipski
From: Harry Wentland 

On Carrizo/Stoney systems we set backlight through panel_cntl, i.e.
directly via the PWM registers, if DMCU is not initialized. We
always read it back through ABM registers which leads to a
mismatch and forces atomic_commit to program the backlight
each time.

Instead make sure we use the same logic for backlight readback,
i.e. read it from panel_cntl if DMCU is not initialized.

We also need to remove some extraneous and incorrect calculations
at the end of dce_get_16_bit_backlight_from_pwm.

Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1666
Cc: sta...@vger.kernel.org

Reviewed-by: Josip Pavic 
Acked-by: Mikita Lipski 
Signed-off-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c| 16 
 .../gpu/drm/amd/display/dc/dce/dce_panel_cntl.c  | 10 --
 2 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3c8eb3e659af..61e49671fed5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2752,13 +2752,21 @@ static struct abm *get_abm_from_stream_res(const struct 
dc_link *link)
 
 int dc_link_get_backlight_level(const struct dc_link *link)
 {
-
struct abm *abm = get_abm_from_stream_res(link);
+   struct panel_cntl *panel_cntl = link->panel_cntl;
+   struct dc  *dc = link->ctx->dc;
+   struct dmcu *dmcu = dc->res_pool->dmcu;
+   bool fw_set_brightness = true;
 
-   if (abm == NULL || abm->funcs->get_current_backlight == NULL)
-   return DC_ERROR_UNEXPECTED;
+   if (dmcu)
+   fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
 
-   return (int) abm->funcs->get_current_backlight(abm);
+   if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+   return panel_cntl->funcs->get_current_backlight(panel_cntl);
+   else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+   return (int) abm->funcs->get_current_backlight(abm);
+   else
+   return DC_ERROR_UNEXPECTED;
 }
 
 int dc_link_get_target_backlight_pwm(const struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index e92339235863..e8570060d007 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -49,7 +49,6 @@
 static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl 
*panel_cntl)
 {
uint64_t current_backlight;
-   uint32_t round_result;
uint32_t bl_period, bl_int_count;
uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct 
panel_cntl *panel_c
current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1;
 
-   current_backlight = (uint64_t)(current_backlight) * bl_period;
-
-   round_result = (uint32_t)(current_backlight & 0x);
-
-   round_result = (round_result >> (bl_int_count-1)) & 1;
-
-   current_backlight >>= bl_int_count;
-   current_backlight += round_result;
-
return (uint32_t)(current_backlight);
 }
 
-- 
2.25.1



[PATCH 11/33] drm/amd/display: Fix for null pointer access for ddc pin and aux engine.

2021-09-08 Thread Mikita Lipski
From: Meenakshikumar Somasundaram 

[Why]
Need a check for NULL pointer access for ddc pin and aux engine.

[How]
Adding a check for ddc pin and aux engine accesses.

Reviewed-by: Jimmy Kizito 
Acked-by: Mikita Lipski 
Signed-off-by: Meenakshikumar Somasundaram 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c   |  2 --
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 12 +---
 drivers/gpu/drm/amd/display/include/dal_asic_id.h|  2 +-
 3 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c5f61be1f6b5..5bfdc66b5867 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -449,9 +449,7 @@ static void update_config(void *handle, struct 
cp_psp_stream_config *config)
struct mod_hdcp_link *link = _work[link_index].link;
struct drm_connector_state *conn_state;
struct dc_sink *sink = NULL;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
bool link_is_hdcp14 = false;
-#endif
 
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index e14f99b4b0c3..4a3b94fa3e40 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -689,8 +689,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
enum aux_return_code_type operation_result;
bool retry_on_defer = false;
struct ddc *ddc_pin = ddc->ddc_pin;
-   struct dce_aux *aux_engine = 
ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
-   struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+   struct dce_aux *aux_engine = NULL;
+   struct aux_engine_dce110 *aux110 = NULL;
uint32_t defer_time_in_ms = 0;
 
int aux_ack_retries = 0,
@@ -699,6 +699,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
aux_timeout_retries = 0,
aux_invalid_reply_retries = 0;
 
+   if (ddc_pin) {
+   aux_engine = 
ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+   aux110 = FROM_AUX_ENGINE(aux_engine);
+   }
+
if (!payload->reply) {
payload_reply = false;
payload->reply = 
@@ -765,7 +770,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,

"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: 
AUX_TRANSACTION_REPLY_AUX_DEFER");
 
/* polling_timeout_period is in us */
-   defer_time_in_ms += 
aux110->polling_timeout_period / 1000;
+   if (aux110)
+   defer_time_in_ms += 
aux110->polling_timeout_period / 1000;
++aux_defer_retries;
fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h 
b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 381c17caace1..3d2f0817e40a 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -227,7 +227,7 @@ enum {
 #define FAMILY_YELLOW_CARP 146
 
 #define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x02// TODO: DCN31 - update with correct B0 
ID
+#define YELLOW_CARP_B0 0x20
 #define YELLOW_CARP_UNKNOWN 0xFF
 
 #ifndef ASICREV_IS_YELLOW_CARP
-- 
2.25.1



[PATCH 13/33] drm/amd/display: Revert "dc: w/a for hard hang on HPD on native DP"

2021-09-08 Thread Mikita Lipski
From: Qingqing Zhuo 

This reverts commit "drm/amd/display: w/a for hard hang on HPD on native DP".

[How & Why]
Revert change as it does not fix the hard hang
in all cases. An alternative w/a will be submitted
separately.

Acked-by: Mikita Lipski 
Signed-off-by: Qingqing Zhuo 
Reviewed-by: Bhawanpreet Lakha 
---
 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 6185f9475fa2..c6f494f0dcea 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -66,11 +66,9 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
 
-   /* Extend the WA to DP for Linux*/
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
-   stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
-   stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
+   stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
}
 
-- 
2.25.1



[PATCH 12/33] drm/amd/display: [FW Promotion] Release 0.0.81

2021-09-08 Thread Mikita Lipski
From: Anthony Koo 

* PSR optimizations
* add support for ABM when ODM is enabled
* Z10 with PSR fixes
* Increase trace buffer

Reviewed-by: Anthony Koo 
Acked-by: Mikita Lipski 
Signed-off-by: Anthony Koo 
---
 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 3388188701d3..8cf86f7cda41 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x591aacca1
+#define DMUB_FW_VERSION_GIT_HASH 0x8ebc06e16
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 80
+#define DMUB_FW_VERSION_REVISION 81
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
-- 
2.25.1



[PATCH 14/33] drm/amd/display: 3.2.151

2021-09-08 Thread Mikita Lipski
From: Aric Cyr 

* coverity kernel memory leak fixes
* NULL pointer dereference fixes
* Add periodic detection when zstate is enabled
* Fork thread to offload work of hpd_rx_irq

Reviewed-by: Aric Cyr 
Acked-by: Mikita Lipski 
Signed-off-by: Aric Cyr 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index bcae2250a574..ece44796a74f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@
 /* forward declaration */
 struct aux_payload;
 
-#define DC_VER "3.2.150"
+#define DC_VER "3.2.151"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.25.1



[PATCH 10/33] drm/amd/display: Fix false BAD_FREE warning from Coverity

2021-09-08 Thread Mikita Lipski
From: Anson Jacob 

This is an attempt to fix false warning raised by Coverity
via multiple CID's.

Addresses-Coverity-ID: 1487412 ("Free of address-of expression")
Cc: Wesley Chalmers 

Reviewed-by: Wesley Chalmers 
Acked-by: Mikita Lipski 
Signed-off-by: Anson Jacob 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
index 72970e49800a..7f25c11f4248 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
@@ -176,12 +176,15 @@ static void dpcd_reduce_address_range(
uint8_t * const reduced_data,
const uint32_t reduced_size)
 {
-   const uint32_t reduced_end_address = END_ADDRESS(reduced_address, 
reduced_size);
-   const uint32_t extended_end_address = END_ADDRESS(extended_address, 
extended_size);
const uint32_t offset = reduced_address - extended_address;
 
-   if (extended_end_address == reduced_end_address && extended_address == 
reduced_address)
-   return; /* extended and reduced address ranges point to the 
same data */
+   /*
+* If the address is same, address was not extended.
+* So we do not need to free any memory.
+* The data is in original buffer(reduced_data).
+*/
+   if (extended_data == reduced_data)
+   return;
 
memcpy(_data[offset], reduced_data, reduced_size);
kfree(extended_data);
-- 
2.25.1



[PATCH 09/33] drm/amd/display: Fix dynamic link encoder access.

2021-09-08 Thread Mikita Lipski
From: Jimmy Kizito 

[Why]
Assuming DIG link encoders are statically mapped to links can cause
system instability due to null pointer accesses.

[How]
- Add checks for non-null link encoder pointers before trying to access
them.
- When a hardware platform uses dynamic DIG assignment (i.e. resource
function 'link_encs_assign' defined) and a link supports flexible
mapping to DIGs, use the link_enc_cfg API to access the DIG assigned to
a link or stream.

Reviewed-by: Meenakshikumar Somasundaram 
Acked-by: Mikita Lipski 
Signed-off-by: Jimmy Kizito 
---
 .../display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c  |  2 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 42 +++
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 36 ++--
 .../drm/amd/display/dc/core/dc_link_enc_cfg.c | 25 ++-
 .../drm/amd/display/dc/core/dc_link_hwss.c|  7 ++--
 .../gpu/drm/amd/display/dc/core/dc_resource.c |  3 +-
 .../display/dc/dce110/dce110_hw_sequencer.c   | 22 +++---
 .../drm/amd/display/dc/dcn10/dcn10_resource.c |  2 +-
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 13 --
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 20 -
 .../drm/amd/display/dc/dcn31/dcn31_hwseq.c|  9 +++-
 .../gpu/drm/amd/display/dc/inc/link_enc_cfg.h |  5 +++
 12 files changed, 147 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 15491e3ca11a..1414da4b95d7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -87,7 +87,7 @@ int dcn31_get_active_display_cnt_wa(
const struct dc_link *link = dc->links[i];
 
/* abusing the fact that the dig and phy are coupled to see if 
the phy is enabled */
-   if (link->link_enc->funcs->is_dig_enabled &&
+   if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&

link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 5d9460e0dbab..3c8eb3e659af 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3457,6 +3457,10 @@ static enum dc_status deallocate_mst_payload(struct 
pipe_ctx *pipe_ctx)
 static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
 {
struct cp_psp *cp_psp = _ctx->stream->ctx->cp_psp;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   struct link_encoder *link_enc = NULL;
+#endif
+
if (cp_psp && cp_psp->funcs.update_stream_config) {
struct cp_psp_stream_config config = {0};
enum dp_panel_mode panel_mode =
@@ -3468,8 +3472,21 @@ static void update_psp_stream_config(struct pipe_ctx 
*pipe_ctx, bool dpms_off)
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - 
ENGINE_ID_DIGA;
-   config.link_enc_idx = 
pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
-   config.phy_idx = pipe_ctx->stream->link->link_enc->transmitter 
- TRANSMITTER_UNIPHY_A;
+   if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) {
+   link_enc = pipe_ctx->stream->link->link_enc;
+   config.phy_idx = link_enc->transmitter - 
TRANSMITTER_UNIPHY_A;
+   } else if 
(pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
+   /* Use link encoder assignment from current DC state - 
which may differ from the DC state to be
+* committed - when updating PSP config.
+*/
+   link_enc = link_enc_cfg_get_link_enc_used_by_stream(
+   
pipe_ctx->stream->link->dc->current_state,
+   pipe_ctx->stream);
+   config.phy_idx = 0; /* Clear phy_idx for non-physical 
display endpoints. */
+   }
+   ASSERT(link_enc);
+   if (link_enc)
+   config.link_enc_idx = link_enc->transmitter - 
TRANSMITTER_UNIPHY_A;
if (is_dp_128b_132b_signal(pipe_ctx)) {
config.stream_enc_idx = 
pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
config.link_enc_idx = 
pipe_ctx->stream->link->hpo_dp_link_enc->inst;
@@ -3576,6 +3593,7 @@ void core_link_enable_stream(
struct dc_stream_state *st

[PATCH 08/33] drm/amd/display: Add flag to detect dpms force off during HPD

2021-09-08 Thread Mikita Lipski
From: Aurabindo Pillai 

[Why] When a connector is unplugged, dpms is forced off so that some
connector allocations are cleared off. This is done outside the commit
sequence from the userspace. This causes HUBP blank. Due to the blank
hubp, a non blocking commit which queues flip will encounter a timeout
waiting for the flip_done because prior to writing the surface flip
address, hubp was in blank.

[How] Add a marker to DM's crtc state and use this field to indicate
whether dpms was forced off during an HPD. Check for this marker before
queuing the flip.

Reviewed-by: Anson Jacob 
Acked-by: Mikita Lipski 
Signed-off-by: Aurabindo Pillai 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 19 +--
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  2 ++
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c| 18 ++
 3 files changed, 29 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c8ef72702e60..a6c8c30f8c2d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2409,7 +2409,7 @@ static void dm_gpureset_commit_state(struct dc_state 
*dc_state,
return;
 }
 
-static void dm_set_dpms_off(struct dc_link *link)
+static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state 
*acrtc_state)
 {
struct dc_stream_state *stream_state;
struct amdgpu_dm_connector *aconnector = link->priv;
@@ -2430,6 +2430,7 @@ static void dm_set_dpms_off(struct dc_link *link)
}
 
stream_update.stream = stream_state;
+   acrtc_state->force_dpms_off = true;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
 stream_state, _update,
 stream_state->ctx->dc->current_state);
@@ -2873,13 +2874,16 @@ static void handle_hpd_irq_helper(struct 
amdgpu_dm_connector *aconnector)
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = 
to_dm_connector_state(connector->state);
-#endif
+   struct dm_crtc_state *dm_crtc_state = NULL;
 
if (adev->dm.disable_hpd_irq)
return;
 
+   if (dm_con_state->base.state && dm_con_state->base.crtc)
+   dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
+   dm_con_state->base.state,
+   dm_con_state->base.crtc));
/*
 * In case of failure or MST no need to update connector status or 
notify the OS
 * since (for MST case) MST does this in its own context.
@@ -2911,8 +2915,9 @@ static void handle_hpd_irq_helper(struct 
amdgpu_dm_connector *aconnector)
 
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
if (new_connection_type == dc_connection_none &&
-   aconnector->dc_link->type == dc_connection_none)
-   dm_set_dpms_off(aconnector->dc_link);
+   aconnector->dc_link->type == dc_connection_none &&
+   dm_crtc_state)
+   dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
 
amdgpu_dm_update_connector_after_detect(aconnector);
 
@@ -6253,6 +6258,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
+   state->force_dpms_off = cur->force_dpms_off;
/* TODO Duplicate dc_stream after objects are stream object is 
flattened */
 
return >base;
@@ -8914,7 +8920,8 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 * and rely on sending it from software.
 */
if (acrtc_attach->base.state->event &&
-   acrtc_state->active_planes > 0) {
+   acrtc_state->active_planes > 0 &&
+   !acrtc_state->force_dpms_off) {
drm_crtc_vblank_get(pcrtc);
 
spin_lock_irqsave(>dev->event_lock, flags);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index a038c70037b5..a85b09986aab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -629,6 +629,8 @@ struct dm_crtc_state {
 
bool dsc_force_changed;
bool vrr_supported;
+
+   bool force_dpms_off;
struct mod_freesync_confi

[PATCH 06/33] drm/amd/display: Fork thread to offload work of hpd_rx_irq

2021-09-08 Thread Mikita Lipski
From: Wayne Lin 

[Why]
Currently, we will try to get dm.dc_lock in handle_hpd_rx_irq() when
link lost happened, which is risky and could cause deadlock.
e.g. If we are under procedure to enable MST streams and then monitor
happens to toggle short hpd to notify link lost, then
handle_hpd_rx_irq() will get blocked due to stream enabling flow has
dc_lock. However, under MST, enabling streams involves communication
with remote sinks which need to use handle_hpd_rx_irq() to handle
sideband messages. Thus, we have deadlock here.

[How]
Target is to have handle_hpd_rx_irq() finished as soon as possilble.
Hence we can react to interrupt quickly. Besides, we should avoid to
grabe dm.dc_lock within handle_hpd_rx_irq() to avoid deadlock situation.

Firstly, revert patches which introduced to use dm.dc_lock in
handle_hpd_rx_irq():

* commit ("drm/amd/display: NULL pointer error during ")

* commit ("drm/amd/display: Only one display lights up while using MST")

* commit ("drm/amd/display: take dc_lock in short pulse handler only")

Instead, create work to handle irq events which needs dm.dc_lock.
Besides:

* Create struct hpd_rx_irq_offload_work_queue for each link to handle
  its short hpd events

* Avoid to handle link lost/ automated test if the link is disconnected

* Defer dc_lock needed works in dc_link_handle_hpd_rx_irq(). This
  function should just handle simple stuff for us (e.g. DPCD R/W).
  However, deferred works should still be handled by the order that
  dc_link_handle_hpd_rx_irq() used to be.

* Change function name dm_handle_hpd_rx_irq() to
  dm_handle_mst_sideband_msg() to be more specific

Reviewed-by: Nicholas Kazlauskas 
Acked-by: Mikita Lipski 
Signed-off-by: Wayne Lin 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 203 ++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  49 -
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |   9 +-
 drivers/gpu/drm/amd/display/dc/dc_link.h  |   6 +-
 4 files changed, 219 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 53363728dbbd..c8ef72702e60 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1208,6 +1208,83 @@ static void vblank_control_worker(struct work_struct 
*work)
 }
 
 #endif
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+   struct hpd_rx_irq_offload_work *offload_work;
+   struct amdgpu_dm_connector *aconnector;
+   struct dc_link *dc_link;
+   struct amdgpu_device *adev;
+   enum dc_connection_type new_connection_type = dc_connection_none;
+   unsigned long flags;
+
+   offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+   aconnector = offload_work->offload_wq->aconnector;
+
+   if (!aconnector) {
+   DRM_ERROR("Can't retrieve aconnector in 
hpd_rx_irq_offload_work");
+   goto skip;
+   }
+
+   adev = drm_to_adev(aconnector->base.dev);
+   dc_link = aconnector->dc_link;
+
+   mutex_lock(>hpd_lock);
+   if (!dc_link_detect_sink(dc_link, _connection_type))
+   DRM_ERROR("KMS: Failed to detect connector\n");
+   mutex_unlock(>hpd_lock);
+
+   if (new_connection_type == dc_connection_none)
+   goto skip;
+
+   if (amdgpu_in_reset(adev))
+   goto skip;
+
+   mutex_lock(>dm.dc_lock);
+   if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+   dc_link_dp_handle_automated_test(dc_link);
+   else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+   hpd_rx_irq_check_link_loss_status(dc_link, 
_work->data) &&
+   dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+   dc_link_dp_handle_link_loss(dc_link);
+   spin_lock_irqsave(_work->offload_wq->offload_lock, 
flags);
+   offload_work->offload_wq->is_handling_link_loss = false;
+   spin_unlock_irqrestore(_work->offload_wq->offload_lock, 
flags);
+   }
+   mutex_unlock(>dm.dc_lock);
+
+skip:
+   kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue 
*hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+   int max_caps = dc->caps.max_links;
+   int i = 0;
+   struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+   hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), 
GFP_KERNEL);
+
+   if (!hpd_rx_offload_wq)
+   return NULL;
+
+
+   for (i = 0; i < max_caps; i++) {
+   hpd_rx_offload_wq[i].wq =
+   
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+   if (hpd_rx_offload_wq[i].wq == NULL) {
+   DRM_ERROR

[PATCH 07/33] drm/amd/display: unblock abm when odm is enabled only on configs that support it

2021-09-08 Thread Mikita Lipski
From: Josip Pavic 

[Why]
When ODM is enabled, ABM is blocked on dcn31 but unblocked on dcn30.

Since the dcn31 firmware is now able to handle ABM interop with ODM, it
is no longer necessary to block ABM when ODM is enabled.

Since the dcn30 firmware does not handle ABM interop with ODM, leaving
that combination unblocked can lead to one side of the screen appearing
brighter than the other.

[How]
When ODM is enabled, unblock abm on dcn31 and block it on dcn30

Reviewed-by: Anthony Koo 
Acked-by: Mikita Lipski 
Signed-off-by: Josip Pavic 
---
 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c |  1 +
 .../gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c| 15 ---
 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c |  1 -
 3 files changed, 1 insertion(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 3a5b53dd2f6d..93f32a312fee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
+   .is_abm_supported = dcn21_is_abm_supported
 };
 
 static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 7feba8a0d847..2434232fb3f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -595,18 +595,3 @@ void dcn31_reset_hw_ctx_wrap(
}
}
 }
-
-bool dcn31_is_abm_supported(struct dc *dc,
-   struct dc_state *context, struct dc_stream_state *stream)
-{
-   int i;
-
-   for (i = 0; i < dc->res_pool->pipe_count; i++) {
-   struct pipe_ctx *pipe_ctx = >res_ctx.pipe_ctx[i];
-
-   if (pipe_ctx->stream == stream &&
-   (pipe_ctx->prev_odm_pipe == NULL && 
pipe_ctx->next_odm_pipe == NULL))
-   return true;
-   }
-   return false;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c 
b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 40011cd3c8ef..10c83f4083b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -98,7 +98,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_pipe = dcn21_set_pipe,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
-   .is_abm_supported = dcn31_is_abm_supported,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
 };
-- 
2.25.1



[PATCH 05/33] drm/amd/display: Add option to defer works of hpd_rx_irq

2021-09-08 Thread Mikita Lipski
From: Wayne Lin 

[Why & How]
Due to some code flow constraints, we need to defer dc_lock needed works
from dc_link_handle_hpd_rx_irq(). Thus, do following changes:

* Change allow_hpd_rx_irq() from static to public
* Change handle_automated_test() from static to public
* Extract link lost handling flow out from dc_link_handle_hpd_rx_irq()
  and put those into a new function dc_link_dp_handle_link_loss()
* Add one option parameter to decide whether defer works within
  dc_link_handle_hpd_rx_irq()

Acked-by: Mikita Lipski 
Signed-off-by: Wayne Lin 
Reviewed-by: Nicholas Kazlauskas 
---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 92 ---
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  3 +
 2 files changed, 63 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 53c3c9c1a79d..22455113ba9d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3432,7 +3432,7 @@ void decide_link_settings(struct dc_stream_state *stream,
 }
 
 /*Short Pulse IRQ***/
-static bool allow_hpd_rx_irq(const struct dc_link *link)
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
 {
/*
 * Don't handle RX IRQ unless one of following is met:
@@ -3941,7 +3941,7 @@ static void dp_test_get_audio_test_data(struct dc_link 
*link, bool disable_video
}
 }
 
-static void handle_automated_test(struct dc_link *link)
+void dc_link_dp_handle_automated_test(struct dc_link *link)
 {
union test_request test_request;
union test_response test_response;
@@ -3990,17 +3990,50 @@ static void handle_automated_test(struct dc_link *link)
sizeof(test_response));
 }
 
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data 
*out_hpd_irq_dpcd_data, bool *out_link_loss)
+void dc_link_dp_handle_link_loss(struct dc_link *link)
+{
+   int i;
+   struct pipe_ctx *pipe_ctx;
+
+   for (i = 0; i < MAX_PIPES; i++) {
+   pipe_ctx = >dc->current_state->res_ctx.pipe_ctx[i];
+   if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == 
link)
+   break;
+   }
+
+   if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+   return;
+
+   for (i = 0; i < MAX_PIPES; i++) {
+   pipe_ctx = >dc->current_state->res_ctx.pipe_ctx[i];
+   if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off 
&&
+   pipe_ctx->stream->link == link && 
!pipe_ctx->prev_odm_pipe) {
+   core_link_disable_stream(pipe_ctx);
+   }
+   }
+
+   for (i = 0; i < MAX_PIPES; i++) {
+   pipe_ctx = >dc->current_state->res_ctx.pipe_ctx[i];
+   if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off 
&&
+   pipe_ctx->stream->link == link && 
!pipe_ctx->prev_odm_pipe) {
+   core_link_enable_stream(link->dc->current_state, 
pipe_ctx);
+   }
+   }
+}
+
+static bool handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data 
*out_hpd_irq_dpcd_data, bool *out_link_loss,
+   bool defer_handling, 
bool *has_left_work)
 {
union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } };
enum dc_status result;
bool status = false;
-   struct pipe_ctx *pipe_ctx;
-   int i;
 
if (out_link_loss)
*out_link_loss = false;
+
+   if (has_left_work)
+   *has_left_work = false;
/* For use cases related to down stream connection status change,
 * PSR and device auto test, refer to function handle_sst_hpd_irq
 * in DAL2.1*/
@@ -4032,11 +4065,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, 
union hpd_irq_data *out_hpd
_service_clear.raw,
sizeof(device_service_clear.raw));
device_service_clear.raw = 0;
-   handle_automated_test(link);
+   if (defer_handling && has_left_work)
+   *has_left_work = true;
+   else
+   dc_link_dp_handle_automated_test(link);
return false;
}
 
-   if (!allow_hpd_rx_irq(link)) {
+   if (!dc_link_dp_allow_hpd_rx_irq(link)) {
DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
__func__, link->link_index);
return false;
@@ -4050,12 +4086,18 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link

[PATCH 04/33] drm/amd/display: update conditions to do dfp cap ext validation

2021-09-08 Thread Mikita Lipski
From: Wenjing Liu 

[why]
According to DP specs dfp cap ext validation is only for branch
device withou 128b/132b channel coding support and
downstream of the branch device doesn't have compression.
Therefore we are adding conditions to only do dfp cap
extension validation for branch devcie supporting 8b/10b
channel coding only and it has no DSC passthrough capability.

Acked-by: Mikita Lipski 
Signed-off-by: Wenjing Liu 
Reviewed-by: George Shen 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f1c9ee53ac67..5d9460e0dbab 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2606,7 +2606,9 @@ static bool dp_active_dongle_validate_timing(
 #if defined(CONFIG_DRM_AMD_DC_DCN)
}
 
-   if (dongle_caps->dfp_cap_ext.supported) {
+   if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 &&
+   
dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT 
== 0 &&
+   dongle_caps->dfp_cap_ext.supported) {
 
if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < 
(timing->pix_clk_100hz / 1))
return false;
-- 
2.25.1



[PATCH 03/33] drm/amd/display: move bpp range decision in decide dsc bw range function

2021-09-08 Thread Mikita Lipski
From: Wenjing Liu 

[why]
Before get dsc bw range is used to compute DSC bw range
based on the given fixed bpp min/max input.
The new change will merge any specs, signal, timing specific
bpp range decision into this function. So the function needs to make
a decision with all aspects considered.

Acked-by: Mikita Lipski 
Signed-off-by: Wenjing Liu 
Reviewed-by: George Shen 
---
 drivers/gpu/drm/amd/display/dc/dc_dsc.h |   6 +-
 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 126 ++--
 2 files changed, 69 insertions(+), 63 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h 
b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index c8cc6a448c36..684713b2cff7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -51,7 +51,6 @@ struct dc_dsc_policy {
int min_slice_height; // Must not be less than 8
uint32_t max_target_bpp;
uint32_t min_target_bpp;
-   uint32_t preferred_bpp_x16;
bool enable_dsc_when_not_needed;
 };
 
@@ -86,6 +85,11 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
const int num_slices_h,
const bool is_dp);
 
+/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to 
DM,
+ * and DM can choose to OVERRIDE the limitation on CASE BY CASE basis.
+ * Hardware/specs limitation should not be writable by DM.
+ * It should be decoupled from DM specific policy and named differently.
+ */
 void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
struct dc_dsc_policy *policy);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c 
b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 1e30a742ae01..0321b4446e05 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -40,8 +40,15 @@ static bool dsc_policy_enable_dsc_when_not_needed;
 
 static bool dsc_policy_disable_dsc_stream_overhead;
 
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
 /* Forward Declerations */
-static void get_dsc_bandwidth_range(
+static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
const uint32_t num_slices_h,
@@ -356,7 +363,7 @@ bool dc_dsc_compute_bandwidth_range(
dsc_min_slice_height_override, max_bpp_x16, 
);
 
if (is_dsc_possible)
-   get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
+   is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, 
max_bpp_x16,
config.num_slices_h, _common_caps, timing, 
range);
 
return is_dsc_possible;
@@ -481,10 +488,12 @@ static uint32_t compute_bpp_x16_from_target_bandwidth(
return dc_fixpt_floor(bpp_x16);
 }
 
-/* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, 
and timing's pixel clock
- * and uncompressed bandwidth.
+/* Decide DSC bandwidth range based on signal, timing, specs specific and 
input min and max
+ * requirements.
+ * The range output includes decided min/max target bpp, the respective 
bandwidth requirements
+ * and native timing bandwidth requirement when DSC is not used.
  */
-static void get_dsc_bandwidth_range(
+static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
const uint32_t num_slices_h,
@@ -492,39 +501,45 @@ static void get_dsc_bandwidth_range(
const struct dc_crtc_timing *timing,
struct dc_dsc_bw_range *range)
 {
-   /* native stream bandwidth */
-   range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
-
-   /* max dsc target bpp */
-   range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
-   max_bpp_x16, num_slices_h, dsc_caps->is_dp);
-   range->max_target_bpp_x16 = max_bpp_x16;
-   if (range->max_kbps > range->stream_kbps) {
-   /* max dsc target bpp is capped to native bandwidth */
-   range->max_kbps = range->stream_kbps;
-   range->max_target_bpp_x16 = 
compute_bpp_x16_from_target_bandwidth(
-   range->max_kbps, timing, num_slices_h,
-   dsc_caps->bpp_increment_div,
-   dsc_caps->is_dp);
+   uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
+
+   memset(range, 0, sizeof(*range));
+
+   /* apply signal, timing, specs and explicitly specified DSC range 
requirements */
+   if (preferred_bpp_x16) {
+   if (preferred_bpp_x16 <= max_bpp_x16 &&
+   preferred_bpp_x16 >= min_bpp_x16) {
+   range->max_

[PATCH 02/33] drm/amd/display: Fix system hang at boot

2021-09-08 Thread Mikita Lipski
From: "Leo (Hanghong) Ma" 

[Why]
During DQE's promotion test, system hang issue is found on linux
system;

[How]
1. Add NULL pointor check for the link in the sequence trace
   function;
2. Get the right link for the stream encoder before blank DP
   stream;

Acked-by: Mikita Lipski 
Signed-off-by: Leo (Hanghong) Ma 
Reviewed-by: Aric Cyr 
---
 .../drm/amd/display/dc/core/dc_link_hwss.c|  2 +-
 .../display/dc/dce110/dce110_hw_sequencer.c   | 27 ---
 2 files changed, 18 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 58abfa5a7bac..b9570b7c557b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -64,7 +64,7 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on)
 
 void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
 {
-   if (link->dc->debug.enable_driver_sequence_debug)
+   if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
_test_mode, sizeof(dp_test_mode));
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index cf8fee721f30..2ce668a23fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1638,23 +1638,30 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 
 static void power_down_encoders(struct dc *dc)
 {
-   int i;
-
-   /* do not know BIOS back-front mapping, simply blank all. It will not
-* hurt for non-DP
-*/
-   for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
-   dc->res_pool->stream_enc[i]->funcs->dp_blank(dc->links[i],
-   dc->res_pool->stream_enc[i]);
-   }
+   int i, j;
 
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
 
if ((signal == SIGNAL_TYPE_EDP) ||
-   (signal == SIGNAL_TYPE_DISPLAY_PORT))
+   (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+   if (dc->links[i]->link_enc->funcs->get_dig_frontend &&
+   
dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) {
+   unsigned int fe = 
dc->links[i]->link_enc->funcs->get_dig_frontend(
+   
dc->links[i]->link_enc);
+
+   for (j = 0; j < dc->res_pool->stream_enc_count; 
j++) {
+   if (fe == 
dc->res_pool->stream_enc[j]->id) {
+   
dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
+   
dc->res_pool->stream_enc[j]);
+   break;
+   }
+   }
+   }
+
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
+   }
 
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
-- 
2.25.1



[PATCH 00/33] DC Patches September 08, 2021

2021-09-08 Thread Mikita Lipski
This DC patchset brings improvements in multiple areas. In summary, we 
highlight:

* bandwidth optimizations on following fast updates
* fixes and code improvements of DP connector blanking
* add thread to offload work of MST HPD IRQ function
* fix gamma coefficients
* provide backlight support for APUs without DMUB support
* coverity memory leak and warning fixes
* DSC MST bandwidth calculation fixes
* DMUB enhances

Anson Jacob (3):
  drm/amd/display: Fix false BAD_FREE warning from Coverity
  drm/amd/display: Fix multiple memory leaks reported by coverity
  drm/amd/display: Revert "Directly retrain link from debugfs"

Anthony Koo (2):
  drm/amd/display: [FW Promotion] Release 0.0.81
  drm/amd/display: [FW Promotion] Release 0.0.82

Aric Cyr (2):
  drm/amd/display: 3.2.151
  drm/amd/display: 3.2.152

Aurabindo Pillai (1):
  drm/amd/display: Add flag to detect dpms force off during HPD

Dale Zhao (1):
  drm/amd/display: Refine condition of cursor visibility for pipe-split

Eric Yang (1):
  drm/amd/display: Add periodic detection when zstate is enabled

Harry Wentland (1):
  drm/amd/display: Get backlight from PWM if DMCU is not initialized

Hersen Wu (1):
  drm/amd/display: dsc mst 2 4K displays go dark with 2 lane HBR3

Ian Chen (1):
  drm/amd/display: remove force_enable_edp_fec param.

Jaehyun Chung (3):
  drm/amd/display: Add regamma/degamma coefficients and set sRGB when TF
is BT709
  drm/amd/display: Correct degamma coefficients
  drm/amd/display: Revert adding degamma coefficients

Jimmy Kizito (1):
  drm/amd/display: Fix dynamic link encoder access.

Josip Pavic (1):
  drm/amd/display: unblock abm when odm is enabled only on configs that
support it

Leo (Hanghong) Ma (3):
  drm/amd/display: Add DPCD writes at key points
  drm/amd/display: Fix system hang at boot
  drm/amd/display: Add helper for blanking all dp displays

Meenakshikumar Somasundaram (2):
  drm/amd/display: Fix for null pointer access for ddc pin and aux
engine.
  drm/amd/display: Link training retry fix for abort case

Michael Strauss (2):
  drm/amd/display: Add VPG and AFMT low power support for DCN3.1
  drm/amd/display: Enable mem low power control for DCN3.1 sub-IP blocks

Nicholas Kazlauskas (1):
  drm/amd/display: Optimize bandwidth on following fast update

Qingqing Zhuo (3):
  drm/amd/display: Revert "dc: w/a for hard hang on HPD on native DP"
  drm/amd/display: Apply w/a for hard hang on HPD
  drm/amd/display: Fix unstable HPCP compliance on Chrome Barcelo

Wayne Lin (2):
  drm/amd/display: Add option to defer works of hpd_rx_irq
  drm/amd/display: Fork thread to offload work of hpd_rx_irq

Wenjing Liu (2):
  drm/amd/display: move bpp range decision in decide dsc bw range
function
  drm/amd/display: update conditions to do dfp cap ext validation

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 266 ++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  51 +++-
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |   3 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c|  16 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c |   6 +
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   |  18 +-
 .../display/amdgpu_dm/amdgpu_dm_mst_types.h   |  11 +-
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c |  16 +-
 .../display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c  |   4 +-
 drivers/gpu/drm/amd/display/dc/core/dc.c  |  31 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 139 -
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 138 ++---
 .../drm/amd/display/dc/core/dc_link_dpcd.c|  11 +-
 .../drm/amd/display/dc/core/dc_link_enc_cfg.c |  25 +-
 .../drm/amd/display/dc/core/dc_link_hwss.c|  20 +-
 .../gpu/drm/amd/display/dc/core/dc_resource.c |   3 +-
 drivers/gpu/drm/amd/display/dc/dc.h   |   9 +-
 drivers/gpu/drm/amd/display/dc/dc_dsc.h   |   6 +-
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  10 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c  |  12 +-
 .../drm/amd/display/dc/dce/dce_panel_cntl.c   |  10 -
 .../amd/display/dc/dce/dce_stream_encoder.c   |   2 +
 .../display/dc/dce110/dce110_hw_sequencer.c   |  55 ++--
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  86 ++
 .../drm/amd/display/dc/dcn10/dcn10_resource.c |   2 +-
 .../display/dc/dcn10/dcn10_stream_encoder.c   |  20 ++
 .../display/dc/dcn10/dcn10_stream_encoder.h   |   2 +
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c|  23 +-
 .../drm/amd/display/dc/dcn20/dcn20_resource.c |  20 +-
 .../display/dc/dcn20/dcn20_stream_encoder.c   |   5 +
 .../display/dc/dcn20/dcn20_stream_encoder.h   |   1 +
 .../gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c |  24 +-
 .../gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h |  24 ++
 .../dc/dcn30/dcn30_dio_stream_encoder.c   |   2 +
 .../drm/amd/display/dc/dcn30/dcn30_hwseq.c|  39 +--
 .../gpu/drm/amd/display/dc/dcn30/dcn30_init.c |   1 +
 .../drm/amd/display/dc/dcn30/dcn30_resource.c |   6 +-
 .../gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c  |   2 +-
 

[PATCH 01/33] drm/amd/display: Add DPCD writes at key points

2021-09-08 Thread Mikita Lipski
From: "Leo (Hanghong) Ma" 

This reverts commit "Revert "Add DPCD writes at key points" ".
The following patch will fix the system hang issue.

Signed-off-by: Leo (Hanghong) Ma 
Acked-by: Mikita Lipski 
Reviewed-by: Aric Cyr 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  7 ++
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |  3 ++-
 .../drm/amd/display/dc/core/dc_link_hwss.c| 13 ++-
 drivers/gpu/drm/amd/display/dc/dc.h   |  1 +
 .../amd/display/dc/dce/dce_stream_encoder.c   |  2 ++
 .../display/dc/dce110/dce110_hw_sequencer.c   | 22 ++-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  4 ++--
 .../display/dc/dcn10/dcn10_stream_encoder.c   | 10 +
 .../display/dc/dcn10/dcn10_stream_encoder.h   |  2 ++
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 10 -
 .../display/dc/dcn20/dcn20_stream_encoder.c   |  5 +
 .../display/dc/dcn20/dcn20_stream_encoder.h   |  1 +
 .../drm/amd/display/dc/dcn30/dcn30_hwseq.c|  2 +-
 .../drm/amd/display/dc/dcn31/dcn31_hwseq.c|  2 +-
 .../amd/display/dc/inc/hw/stream_encoder.h|  2 ++
 .../gpu/drm/amd/display/dc/inc/link_hwss.h|  1 +
 .../dc/virtual/virtual_stream_encoder.c   |  2 ++
 .../gpu/drm/amd/display/include/dpcd_defs.h   |  1 +
 .../amd/display/include/link_service_types.h  | 16 ++
 19 files changed, 94 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 54ff4fb1ea03..f1c9ee53ac67 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3572,6 +3572,7 @@ void core_link_enable_stream(
 {
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
+   struct dc_link *link = stream->sink->link;
enum dc_status status;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
@@ -3624,6 +3625,9 @@ void core_link_enable_stream(

stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
 #endif
 
+   if (dc_is_dp_signal(pipe_ctx->stream->signal))
+   dp_source_sequence_trace(link, 
DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))

pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
@@ -3659,6 +3663,9 @@ void core_link_enable_stream(
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
 
+   if (dc_is_dp_signal(pipe_ctx->stream->signal))
+   dp_source_sequence_trace(link, 
DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index e412a096a4b8..53c3c9c1a79d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2375,6 +2375,7 @@ bool perform_link_training_with_retries(
 #endif
link_enc->funcs->connect_dig_be_to_fe(link_enc,

pipe_ctx->stream_res.stream_enc->id, true);
+   dp_source_sequence_trace(link, 
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
 
for (j = 0; j < attempts; ++j) {
 
@@ -5267,7 +5268,7 @@ bool dc_link_dp_set_test_pattern(
 * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
 */
/* Blank stream */
-   
pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+   pipes->stream_res.stream_enc->funcs->dp_blank(link, 
pipe_ctx->stream_res.stream_enc);
}
 
dp_set_hw_test_pattern(link, test_pattern,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 29b9c128c87c..58abfa5a7bac 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -62,6 +62,13 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on)
sizeof(state));
 }
 
+void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
+{
+   if (link->dc->debug.enable_driver_sequence_debug)
+   core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
+   _test_mode, sizeof(dp_test_mode));
+}
+
 void dp_enable_link_phy(
stru

[PATCH 06/19] drm/amd/display: expose dsc overhead bw in dc dsc header

2021-08-27 Thread Mikita Lipski
From: Wenjing Liu 

[why]
DM needs to know how much overhead is added to DSC as result
of AMD internal DSC limitation.

Reviewed-by: George Shen 
Acked-by: Mikita Lipski 
Signed-off-by: Wenjing Liu 
---
 drivers/gpu/drm/amd/display/dc/dc_dsc.h |  5 ++
 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 64 ++---
 2 files changed, 37 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h 
b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 16cc76ce3739..c8cc6a448c36 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -81,6 +81,11 @@ bool dc_dsc_compute_config(
 uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp);
 
+uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
+   const struct dc_crtc_timing *timing,
+   const int num_slices_h,
+   const bool is_dp);
+
 void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
struct dc_dsc_policy *policy);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c 
b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index f403d8e84a8c..8c04f9f42a3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -261,32 +261,6 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t 
value)
return (value + 9) / 10;
 }
 
-static struct fixed31_32 compute_dsc_max_bandwidth_overhead(
-   const struct dc_crtc_timing *timing,
-   const int num_slices_h,
-   const bool is_dp)
-{
-   struct fixed31_32 max_dsc_overhead;
-   struct fixed31_32 refresh_rate;
-
-   if (dsc_policy_disable_dsc_stream_overhead || !is_dp)
-   return dc_fixpt_from_int(0);
-
-   /* use target bpp that can take entire target bandwidth */
-   refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz);
-   refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total);
-   refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total);
-   refresh_rate = dc_fixpt_mul_int(refresh_rate, 100);
-
-   max_dsc_overhead = dc_fixpt_from_int(num_slices_h);
-   max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total);
-   max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256);
-   max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000);
-   max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate);
-
-   return max_dsc_overhead;
-}
-
 static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
@@ -294,14 +268,14 @@ static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bpp_increment_div,
const bool is_dp)
 {
-   struct fixed31_32 overhead_in_kbps;
+   uint32_t overhead_in_kbps;
struct fixed31_32 effective_bandwidth_in_kbps;
struct fixed31_32 bpp_x16;
 
-   overhead_in_kbps = compute_dsc_max_bandwidth_overhead(
+   overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps(
timing, num_slices_h, is_dp);
effective_bandwidth_in_kbps = dc_fixpt_from_int(bandwidth_in_kbps);
-   effective_bandwidth_in_kbps = dc_fixpt_sub(effective_bandwidth_in_kbps,
+   effective_bandwidth_in_kbps = 
dc_fixpt_sub_int(effective_bandwidth_in_kbps,
overhead_in_kbps);
bpp_x16 = dc_fixpt_mul_int(effective_bandwidth_in_kbps, 10);
bpp_x16 = dc_fixpt_div_int(bpp_x16, timing->pix_clk_100hz);
@@ -933,19 +907,45 @@ bool dc_dsc_compute_config(
 uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp)
 {
-   struct fixed31_32 overhead_in_kbps;
+   uint32_t overhead_in_kbps;
struct fixed31_32 bpp;
struct fixed31_32 actual_bandwidth_in_kbps;
 
-   overhead_in_kbps = compute_dsc_max_bandwidth_overhead(
+   overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps(
timing, num_slices_h, is_dp);
bpp = dc_fixpt_from_fraction(bpp_x16, 16);
actual_bandwidth_in_kbps = 
dc_fixpt_from_fraction(timing->pix_clk_100hz, 10);
actual_bandwidth_in_kbps = dc_fixpt_mul(actual_bandwidth_in_kbps, bpp);
-   actual_bandwidth_in_kbps = dc_fixpt_add(actual_bandwidth_in_kbps, 
overhead_in_kbps);
+   actual_bandwidth_in_kbps = dc_fixpt_add_int(actual_bandwidth_in_kbps, 
overhead_in_kbps);
return dc_fixpt_ceil(actual_bandwidth_in_kbps);
 }
 
+uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
+   const struct dc_crtc_timing *timing,
+   const int num_slices_h,
+   const bool is_dp)
+{
+  

[PATCH 15/19] drm/amd/display: 3.2.151

2021-08-27 Thread Mikita Lipski
From: Aric Cyr 

- memory leak fixes
- fix unitialized lt_settings pointers
- add DP trace functions
- add null pointer checks for DP sequence trace functions
- expose dsc overhead bw parameter to DM
- block abm when odm is enabled

Reviewed-by: Aric Cyr 
Acked-by: Mikita Lipski 
Signed-off-by: Aric Cyr 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index c6c0fb17462e..e0517eaa8fa4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@
 /* forward declaration */
 struct aux_payload;
 
-#define DC_VER "3.2.145"
+#define DC_VER "3.2.151"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.25.1



[PATCH 18/19] drm/amd/display: Revert "Directly retrain link from debugfs"

2021-08-27 Thread Mikita Lipski
From: Anson Jacob 

This reverts commit "drm/amd/display: Directly retrain link from debugfs".

This patch broke new settings from taking effect. Hotplug is
required for new settings to take effect.

Reviewed-by: Mikita Lipski 
Acked-by: Mikita Lipski 
Signed-off-by: Anson Jacob 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 87daa78a32b8..f3ada9b6be5a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -247,6 +247,7 @@ static ssize_t dp_link_settings_write(struct file *f, const 
char __user *buf,
 {
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dc_link *link = connector->dc_link;
+   struct dc *dc = (struct dc *)link->dc;
struct dc_link_settings prefer_link_settings;
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
@@ -313,7 +314,7 @@ static ssize_t dp_link_settings_write(struct file *f, const 
char __user *buf,
prefer_link_settings.lane_count = param[0];
prefer_link_settings.link_rate = param[1];
 
-   dp_retrain_link_dp_test(link, _link_settings, false);
+   dc_link_set_preferred_training_settings(dc, _link_settings, 
NULL, link, true);
 
kfree(wr_buf);
return size;
-- 
2.25.1



[PATCH 04/19] drm/amd/display: Fix system hang at boot

2021-08-27 Thread Mikita Lipski
From: "Leo (Hanghong) Ma" 

[Why]
During DQE's promotion test, system hang issue is found on linux
system;

[How]
1. Add NULL pointor check for the link in the sequence trace
   function;
2. Get the right link for the stream encoder before blank DP
   stream;

Reviewed-by: Aric Cyr 
Acked-by: Mikita Lipski 
Signed-off-by: Leo (Hanghong) Ma 
---
 .../drm/amd/display/dc/core/dc_link_hwss.c|  2 +-
 .../display/dc/dce110/dce110_hw_sequencer.c   | 27 ---
 2 files changed, 18 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 2a064f7b669e..b1dd791c6f87 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -63,7 +63,7 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on)
 
 void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
 {
-   if (link->dc->debug.enable_driver_sequence_debug)
+   if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
_test_mode, sizeof(dp_test_mode));
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 98549e397db9..3870f3d482a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1552,23 +1552,30 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 
 static void power_down_encoders(struct dc *dc)
 {
-   int i;
-
-   /* do not know BIOS back-front mapping, simply blank all. It will not
-* hurt for non-DP
-*/
-   for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
-   dc->res_pool->stream_enc[i]->funcs->dp_blank(dc->links[i],
-   dc->res_pool->stream_enc[i]);
-   }
+   int i, j;
 
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
 
if ((signal == SIGNAL_TYPE_EDP) ||
-   (signal == SIGNAL_TYPE_DISPLAY_PORT))
+   (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+   if (dc->links[i]->link_enc->funcs->get_dig_frontend &&
+   
dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) {
+   unsigned int fe = 
dc->links[i]->link_enc->funcs->get_dig_frontend(
+   
dc->links[i]->link_enc);
+
+   for (j = 0; j < dc->res_pool->stream_enc_count; 
j++) {
+   if (fe == 
dc->res_pool->stream_enc[j]->id) {
+   
dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
+   
dc->res_pool->stream_enc[j]);
+   break;
+   }
+   }
+   }
+
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
+   }
 
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
-- 
2.25.1



[PATCH 05/19] drm/amd/display: Drop unused privacy_mask setters and getters

2021-08-27 Thread Mikita Lipski
From: Oliver Logush 

[Why and How]
dwbc_funcs.set/get_privacy_mask isn't being used anymore, drop it

Reviewed-by: Charlene Liu 
Acked-by: Mikita Lipski 
Signed-off-by: Oliver Logush 
---
 drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h | 6 --
 1 file changed, 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index ec28cb9c3a8e..587bf4aef4bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -220,12 +220,6 @@ struct dwbc_funcs {
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
 
-   void (*get_privacy_mask)(
-   struct dwbc *dwbc, uint32_t *mask_id);
-
-   void (*set_privacy_mask)(
-   struct dwbc *dwbc, uint32_t mask_id);
-
//TODO: merge with output_transfer_func?
bool (*dwb_ogam_set_input_transfer_func)(
struct dwbc *dwbc,
-- 
2.25.1



[PATCH 16/19] drm/amd/display: Fix multiple memory leaks reported by coverity

2021-08-27 Thread Mikita Lipski
From: Anson Jacob 

coccinelle patch used:

@@ expression enc1,vpg,afmt; @@
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }

Addresses-Coverity-ID: 1466017: ("Resource leaks")

Reviewed-by: Aurabindo Jayamohanan Pillai 
Acked-by: Mikita Lipski 
Signed-off-by: Anson Jacob 
---
 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c   | 6 +-
 drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c | 6 +-
 drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c | 6 +-
 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c   | 6 +-
 4 files changed, 20 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 596c97dce67e..338f7a8f7c2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1164,8 +1164,12 @@ struct stream_encoder *dcn30_stream_encoder_create(
vpg = dcn30_vpg_create(ctx, vpg_inst);
afmt = dcn30_afmt_create(ctx, afmt_inst);
 
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }
 
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 9776d1737818..5350c93d7772 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1195,8 +1195,12 @@ struct stream_encoder *dcn301_stream_encoder_create(
vpg = dcn301_vpg_create(ctx, vpg_inst);
afmt = dcn301_afmt_create(ctx, afmt_inst);
 
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }
 
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 7d3ff5d44402..3c196414b1e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -542,8 +542,12 @@ static struct stream_encoder 
*dcn302_stream_encoder_create(enum engine_id eng_id
vpg = dcn302_vpg_create(ctx, vpg_inst);
afmt = dcn302_afmt_create(ctx, afmt_inst);
 
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }
 
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, 
vpg, afmt, _enc_regs[eng_id],
_shift, _mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 38c010afade1..d83b9c47aef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1281,8 +1281,12 @@ static struct stream_encoder 
*dcn31_stream_encoder_create(
vpg = dcn31_vpg_create(ctx, vpg_inst);
afmt = dcn31_afmt_create(ctx, afmt_inst);
 
-   if (!enc1 || !vpg || !afmt)
+   if (!enc1 || !vpg || !afmt) {
+   kfree(enc1);
+   kfree(vpg);
+   kfree(afmt);
return NULL;
+   }
 
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
-- 
2.25.1



[PATCH 14/19] drm/amd/display: [FW Promotion] Release 0.0.81

2021-08-27 Thread Mikita Lipski
From: Anthony Koo 

- Fix PSR1 residency to be captured per panel
- Merge VBIOS DP AUX change
- Pass panel instance in dirty rect message
- Z10 bug fixes
- add detection required bit to boot status
- Match abm_state to interrupt event specified otg_inst

Reviewed-by: Anthony Koo 
Acked-by: Mikita Lipski 
Signed-off-by: Anthony Koo 
---
 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 8b0b4d86986c..df469a73a1d5 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x2d2f6f51e
+#define DMUB_FW_VERSION_GIT_HASH 0x8ebc06e16
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 75
+#define DMUB_FW_VERSION_REVISION 81
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
-- 
2.25.1



[PATCH 08/19] drm/amd/display: Add option to defer works of hpd_rx_irq

2021-08-27 Thread Mikita Lipski
From: Wayne Lin 

[Why & How]
Due to some code flow constraints, we need to defer dc_lock needed works
from dc_link_handle_hpd_rx_irq(). Thus, do following changes:

* Change allow_hpd_rx_irq() from static to public
* Change handle_automated_test() from static to public
* Extract link lost handling flow out from dc_link_handle_hpd_rx_irq()
  and put those into a new function dc_link_dp_handle_link_loss()
* Add one option parameter to decide whether defer works within
  dc_link_handle_hpd_rx_irq()

Reviewed-by: Mikita Lipski 
Acked-by: Mikita Lipski 
Signed-off-by: Wayne Lin 
---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 92 ---
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  3 +
 2 files changed, 63 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 44d7826e7654..f6589dc7a370 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2746,7 +2746,7 @@ void decide_link_settings(struct dc_stream_state *stream,
 }
 
 /*Short Pulse IRQ***/
-static bool allow_hpd_rx_irq(const struct dc_link *link)
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
 {
/*
 * Don't handle RX IRQ unless one of following is met:
@@ -3180,7 +3180,7 @@ static void dp_test_get_audio_test_data(struct dc_link 
*link, bool disable_video
}
 }
 
-static void handle_automated_test(struct dc_link *link)
+void dc_link_dp_handle_automated_test(struct dc_link *link)
 {
union test_request test_request;
union test_response test_response;
@@ -3229,17 +3229,50 @@ static void handle_automated_test(struct dc_link *link)
sizeof(test_response));
 }
 
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data 
*out_hpd_irq_dpcd_data, bool *out_link_loss)
+void dc_link_dp_handle_link_loss(struct dc_link *link)
+{
+   int i;
+   struct pipe_ctx *pipe_ctx;
+
+   for (i = 0; i < MAX_PIPES; i++) {
+   pipe_ctx = >dc->current_state->res_ctx.pipe_ctx[i];
+   if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == 
link)
+   break;
+   }
+
+   if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+   return;
+
+   for (i = 0; i < MAX_PIPES; i++) {
+   pipe_ctx = >dc->current_state->res_ctx.pipe_ctx[i];
+   if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off 
&&
+   pipe_ctx->stream->link == link && 
!pipe_ctx->prev_odm_pipe) {
+   core_link_disable_stream(pipe_ctx);
+   }
+   }
+
+   for (i = 0; i < MAX_PIPES; i++) {
+   pipe_ctx = >dc->current_state->res_ctx.pipe_ctx[i];
+   if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off 
&&
+   pipe_ctx->stream->link == link && 
!pipe_ctx->prev_odm_pipe) {
+   core_link_enable_stream(link->dc->current_state, 
pipe_ctx);
+   }
+   }
+}
+
+static bool handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data 
*out_hpd_irq_dpcd_data, bool *out_link_loss,
+   bool defer_handling, 
bool *has_left_work)
 {
union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } };
enum dc_status result;
bool status = false;
-   struct pipe_ctx *pipe_ctx;
-   int i;
 
if (out_link_loss)
*out_link_loss = false;
+
+   if (has_left_work)
+   *has_left_work = false;
/* For use cases related to down stream connection status change,
 * PSR and device auto test, refer to function handle_sst_hpd_irq
 * in DAL2.1*/
@@ -3271,11 +3304,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, 
union hpd_irq_data *out_hpd
_service_clear.raw,
sizeof(device_service_clear.raw));
device_service_clear.raw = 0;
-   handle_automated_test(link);
+   if (defer_handling && has_left_work)
+   *has_left_work = true;
+   else
+   dc_link_dp_handle_automated_test(link);
return false;
}
 
-   if (!allow_hpd_rx_irq(link)) {
+   if (!dc_link_dp_allow_hpd_rx_irq(link)) {
DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
__func__, link->link_index);
return false;
@@ -3289,12 +3325,18 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link

[PATCH 02/19] drm/amd/display: Initialize lt_settings on instantiation

2021-08-27 Thread Mikita Lipski
From: Michael Strauss 

[WHY]
lt_settings' pointers remain uninitialized but nonzero if display fails
to light up with no DPCD/EDID info populated, leading to a hang on access

Reviewed-by: Nicholas Kazlauskas 
Acked-by: Mikita Lipski 
Signed-off-by: Michael Strauss 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 8b35cd9d4c01..20bbde8524b8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1561,7 +1561,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct dc_link_settings *link_setting)
 {
-   struct link_training_settings lt_settings;
+   struct link_training_settings lt_settings = {0};
 
dp_decide_training_settings(
link,
@@ -1707,7 +1707,7 @@ enum link_training_result 
dc_link_dp_perform_link_training(
bool skip_video_pattern)
 {
enum link_training_result status = LINK_TRAINING_SUCCESS;
-   struct link_training_settings lt_settings;
+   struct link_training_settings lt_settings = {0};
enum dp_link_encoding encoding =
dp_get_link_encoding_format(link_settings);
 
@@ -1944,7 +1944,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
 struct dc_link_settings *link_settings,
 struct dc_link_training_overrides *lt_overrides)
 {
-   struct link_training_settings lt_settings;
+   struct link_training_settings lt_settings = {0};
enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
-- 
2.25.1



[PATCH 13/19] drm/amd/display: Fix for null pointer access for ddc pin and aux engine.

2021-08-27 Thread Mikita Lipski
From: Meenakshikumar Somasundaram 

[Why]
Need a check for NULL pointer access for ddc pin and aux engine.

[How]
Adding a check for ddc pin and aux engine accesses.

Reviewed-by: Jimmy Kizito 
Acked-by: Mikita Lipski 
Signed-off-by: Meenakshikumar Somasundaram 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c  | 12 +---
 drivers/gpu/drm/amd/display/include/dal_asic_id.h |  2 +-
 2 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 2fb88e54a4bf..a75487ed1bb6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -630,8 +630,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
enum aux_return_code_type operation_result;
bool retry_on_defer = false;
struct ddc *ddc_pin = ddc->ddc_pin;
-   struct dce_aux *aux_engine = 
ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
-   struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+   struct dce_aux *aux_engine = NULL;
+   struct aux_engine_dce110 *aux110 = NULL;
uint32_t defer_time_in_ms = 0;
 
int aux_ack_retries = 0,
@@ -640,6 +640,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
aux_timeout_retries = 0,
aux_invalid_reply_retries = 0;
 
+   if (ddc_pin) {
+   aux_engine = 
ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+   aux110 = FROM_AUX_ENGINE(aux_engine);
+   }
+
if (!payload->reply) {
payload_reply = false;
payload->reply = 
@@ -666,7 +671,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
 
case AUX_TRANSACTION_REPLY_AUX_DEFER:
/* polling_timeout_period is in us */
-   defer_time_in_ms += 
aux110->polling_timeout_period / 1000;
+   if (aux110)
+   defer_time_in_ms += 
aux110->polling_timeout_period / 1000;
++aux_defer_retries;
fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h 
b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 381c17caace1..3d2f0817e40a 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -227,7 +227,7 @@ enum {
 #define FAMILY_YELLOW_CARP 146
 
 #define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x02// TODO: DCN31 - update with correct B0 
ID
+#define YELLOW_CARP_B0 0x20
 #define YELLOW_CARP_UNKNOWN 0xFF
 
 #ifndef ASICREV_IS_YELLOW_CARP
-- 
2.25.1



[PATCH 11/19] drm/amd/display: Add flag to detect dpms force off during HPD

2021-08-27 Thread Mikita Lipski
From: Aurabindo Pillai 

[Why] When a connector is unplugged, dpms is forced off so that some
connector allocations are cleared off. This is done outside the commit
sequence from the userspace. This causes HUBP blank. Due to the blank
hubp, a non blocking commit which queues flip will encounter a timeout
waiting for the flip_done because prior to writing the surface flip
address, hubp was in blank.

[How] Add a marker to DM's crtc state and use this field to indicate
whether dpms was forced off during an HPD. Check for this marker before
queuing the flip.

Reviewed-by: Nicholas Kazlauskas 
Acked-by: Mikita Lipski 
Signed-off-by: Aurabindo Pillai 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 20 +--
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  2 ++
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c| 16 +++
 3 files changed, 28 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 54745ed5898c..0ed0468d0533 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2267,7 +2267,7 @@ static void dm_gpureset_commit_state(struct dc_state 
*dc_state,
return;
 }
 
-static void dm_set_dpms_off(struct dc_link *link)
+static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state 
*acrtc_state)
 {
struct dc_stream_state *stream_state;
struct amdgpu_dm_connector *aconnector = link->priv;
@@ -2288,6 +2288,7 @@ static void dm_set_dpms_off(struct dc_link *link)
}
 
stream_update.stream = stream_state;
+   acrtc_state->force_dpms_off = true;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
 stream_state, _update,
 stream_state->ctx->dc->current_state);
@@ -2725,13 +2726,17 @@ static void handle_hpd_irq(void *param)
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = 
to_dm_connector_state(connector->state);
-#endif
+   struct dm_crtc_state *dm_crtc_state = NULL;
 
if (adev->dm.disable_hpd_irq)
return;
 
+   if (dm_con_state->base.state && dm_con_state->base.crtc)
+   dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
+   dm_con_state->base.state,
+   dm_con_state->base.crtc));
+
/*
 * In case of failure or MST no need to update connector status or 
notify the OS
 * since (for MST case) MST does this in its own context.
@@ -2763,8 +2768,9 @@ static void handle_hpd_irq(void *param)
 
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
if (new_connection_type == dc_connection_none &&
-   aconnector->dc_link->type == dc_connection_none)
-   dm_set_dpms_off(aconnector->dc_link);
+   aconnector->dc_link->type == dc_connection_none &&
+   dm_crtc_state)
+   dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
 
amdgpu_dm_update_connector_after_detect(aconnector);
 
@@ -6088,6 +6094,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
+   state->force_dpms_off = cur->force_dpms_off;
/* TODO Duplicate dc_stream after objects are stream object is 
flattened */
 
return >base;
@@ -8740,7 +8747,8 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 * and rely on sending it from software.
 */
if (acrtc_attach->base.state->event &&
-   acrtc_state->active_planes > 0) {
+   acrtc_state->active_planes > 0 &&
+   !acrtc_state->force_dpms_off) {
drm_crtc_vblank_get(pcrtc);
 
spin_lock_irqsave(>dev->event_lock, flags);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 9ac932677c55..97b13017ce18 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -586,6 +586,8 @@ struct dm_crtc_state {
 
bool dsc_force_changed;
bool vrr_supported;
+
+   bool force_dpms_off;
struct mod_freesync_config freesync_config;
struct dc_info_packet vrr_i

[PATCH 09/19] drm/amd/display: Fork thread to offload work of hpd_rx_irq

2021-08-27 Thread Mikita Lipski
From: Wayne Lin 

[Why]
Currently, we will try to get dm.dc_lock in handle_hpd_rx_irq() when
link lost happened, which is risky and could cause deadlock.
e.g. If we are under procedure to enable MST streams and then monitor
happens to toggle short hpd to notify link lost, then
handle_hpd_rx_irq() will get blocked due to stream enabling flow has
dc_lock. However, under MST, enabling streams involves communication
with remote sinks which need to use handle_hpd_rx_irq() to handle
sideband messages. Thus, we have deadlock here.

[How]
Target is to have handle_hpd_rx_irq() finished as soon as possilble.
Hence we can react to interrupt quickly. Besides, we should avoid to
grabe dm.dc_lock within handle_hpd_rx_irq() to avoid deadlock situation.

Firstly, revert patches which introduced to use dm.dc_lock in
handle_hpd_rx_irq():

* ("drm/amd/display: NULL pointer error during ")

* ("drm/amd/display: Only one display lights up while using MST")

* ("drm/amd/display: take dc_lock in short pulse handler only")

Instead, create work to handle irq events which needs dm.dc_lock.
Besides:

* Create struct hpd_rx_irq_offload_work_queue for each link to handle
  its short hpd events

* Avoid to handle link lost/ automated test if the link is disconnected

* Defer dc_lock needed works in dc_link_handle_hpd_rx_irq(). This
  function should just handle simple stuff for us (e.g. DPCD R/W).
  However, deferred works should still be handled by the order that
  dc_link_handle_hpd_rx_irq() used to be.

* Change function name dm_handle_hpd_rx_irq() to
  dm_handle_mst_sideband_msg() to be more specific

Reviewed-by: Nicholas Kazlauskas 
Acked-by: Mikita Lipski 
Signed-off-by: Wayne Lin 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 202 +++---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  49 -
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |   9 +-
 drivers/gpu/drm/amd/display/dc/dc_link.h  |   6 +-
 4 files changed, 219 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0a1dd25e567d..54745ed5898c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1083,6 +1083,83 @@ static struct vblank_workqueue 
*vblank_create_workqueue(struct amdgpu_device *ad
return vblank_work;
 }
 #endif
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+   struct hpd_rx_irq_offload_work *offload_work;
+   struct amdgpu_dm_connector *aconnector;
+   struct dc_link *dc_link;
+   struct amdgpu_device *adev;
+   enum dc_connection_type new_connection_type = dc_connection_none;
+   unsigned long flags;
+
+   offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+   aconnector = offload_work->offload_wq->aconnector;
+
+   if (!aconnector) {
+   DRM_ERROR("Can't retrieve aconnector in 
hpd_rx_irq_offload_work");
+   goto skip;
+   }
+
+   adev = drm_to_adev(aconnector->base.dev);
+   dc_link = aconnector->dc_link;
+
+   mutex_lock(>hpd_lock);
+   if (!dc_link_detect_sink(dc_link, _connection_type))
+   DRM_ERROR("KMS: Failed to detect connector\n");
+   mutex_unlock(>hpd_lock);
+
+   if (new_connection_type == dc_connection_none)
+   goto skip;
+
+   if (amdgpu_in_reset(adev))
+   goto skip;
+
+   mutex_lock(>dm.dc_lock);
+   if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+   dc_link_dp_handle_automated_test(dc_link);
+   else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+   hpd_rx_irq_check_link_loss_status(dc_link, 
_work->data) &&
+   dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+   dc_link_dp_handle_link_loss(dc_link);
+   spin_lock_irqsave(_work->offload_wq->offload_lock, 
flags);
+   offload_work->offload_wq->is_handling_link_loss = false;
+   spin_unlock_irqrestore(_work->offload_wq->offload_lock, 
flags);
+   }
+   mutex_unlock(>dm.dc_lock);
+
+skip:
+   kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue 
*hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+   int max_caps = dc->caps.max_links;
+   int i = 0;
+   struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+   hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), 
GFP_KERNEL);
+
+   if (!hpd_rx_offload_wq)
+   return NULL;
+
+
+   for (i = 0; i < max_caps; i++) {
+   hpd_rx_offload_wq[i].wq =
+   
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+   if (hpd_rx_offload_w

[PATCH 00/19] DC Patches August 23, 2021

2021-08-27 Thread Mikita Lipski
This DC patchset brings improvements in multiple areas. In summary, we 
highlight:

* Memory leak fixes and false positive warnings removed caught by coverity
* Backlight fix for Carrizo/Stoney laptops
* Unblocking ABM when ODM is enabled on DCN31 and up
* Fork thread to offload work of hpd_rx_irq to avoid deadlocks
* Expose DSC overhead bw parameter to DM


Angus Wang (1):
  drm/amd/display: cleanup idents after a revert

Anson Jacob (3):
  drm/amd/display: Fix false BAD_FREE warning from Coverity
  drm/amd/display: Fix multiple memory leaks reported by coverity
  drm/amd/display: Revert "Directly retrain link from debugfs"

Anthony Koo (1):
  drm/amd/display: [FW Promotion] Release 0.0.81

Aric Cyr (1):
  drm/amd/display: 3.2.151

Aurabindo Pillai (1):
  drm/amd/display: Add flag to detect dpms force off during HPD

Harry Wentland (1):
  drm/amd/display: Get backlight from PWM if DMCU is not initialized

Jaehyun Chung (1):
  drm/amd/display: Add regamma/degamma coefficients and set sRGB when TF
is BT709

Josip Pavic (1):
  drm/amd/display: unblock abm when odm is enabled only on configs that
support it

Leo (Hanghong) Ma (2):
  drm/amd/display: Add DPCD writes at key points
  drm/amd/display: Fix system hang at boot

Meenakshikumar Somasundaram (1):
  drm/amd/display: Fix for null pointer access for ddc pin and aux
engine.

Michael Strauss (1):
  drm/amd/display: Initialize lt_settings on instantiation

Oliver Logush (1):
  drm/amd/display: Drop unused privacy_mask setters and getters

Wayne Lin (2):
  drm/amd/display: Add option to defer works of hpd_rx_irq
  drm/amd/display: Fork thread to offload work of hpd_rx_irq

Wenjing Liu (2):
  drm/amd/display: expose dsc overhead bw in dc dsc header
  drm/amd/display: move bpp range decision in decide dsc bw range
function

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 222 ++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  51 +++-
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |   3 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c|  16 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  23 +-
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |  98 +---
 .../drm/amd/display/dc/core/dc_link_dpcd.c|  11 +-
 .../drm/amd/display/dc/core/dc_link_hwss.c|  13 +-
 drivers/gpu/drm/amd/display/dc/dc.h   |   3 +-
 drivers/gpu/drm/amd/display/dc/dc_dsc.h   |  11 +-
 drivers/gpu/drm/amd/display/dc/dc_link.h  |   9 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c  |  12 +-
 .../drm/amd/display/dc/dce/dce_panel_cntl.c   |  10 -
 .../amd/display/dc/dce/dce_stream_encoder.c   |   2 +
 .../display/dc/dce110/dce110_hw_sequencer.c   |  46 +++-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |   4 +-
 .../display/dc/dcn10/dcn10_stream_encoder.c   |  10 +
 .../display/dc/dcn10/dcn10_stream_encoder.h   |   2 +
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c|  10 +-
 .../display/dc/dcn20/dcn20_stream_encoder.c   |   5 +
 .../display/dc/dcn20/dcn20_stream_encoder.h   |   1 +
 .../drm/amd/display/dc/dcn30/dcn30_hwseq.c|   2 +-
 .../gpu/drm/amd/display/dc/dcn30/dcn30_init.c |   1 +
 .../drm/amd/display/dc/dcn30/dcn30_resource.c |   6 +-
 .../amd/display/dc/dcn301/dcn301_resource.c   |   6 +-
 .../amd/display/dc/dcn302/dcn302_resource.c   |   6 +-
 .../drm/amd/display/dc/dcn31/dcn31_hwseq.c|  17 +-
 .../gpu/drm/amd/display/dc/dcn31/dcn31_init.c |   1 -
 .../drm/amd/display/dc/dcn31/dcn31_resource.c |   6 +-
 .../dc/dml/dcn20/display_mode_vba_20v2.c  |   2 +-
 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c   | 196 
 drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h   |   6 -
 .../amd/display/dc/inc/hw/stream_encoder.h|   2 +
 .../gpu/drm/amd/display/dc/inc/link_hwss.h|   1 +
 .../dc/virtual/virtual_stream_encoder.c   |   2 +
 .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h   |   4 +-
 .../gpu/drm/amd/display/include/dal_asic_id.h |   2 +-
 .../gpu/drm/amd/display/include/dpcd_defs.h   |   1 +
 .../amd/display/include/link_service_types.h  |  16 ++
 .../amd/display/modules/color/color_gamma.c   |  60 +++--
 40 files changed, 627 insertions(+), 272 deletions(-)

-- 
2.25.1



[PATCH 03/19] drm/amd/display: Add DPCD writes at key points

2021-08-27 Thread Mikita Lipski
From: "Leo (Hanghong) Ma" 

This reverts "drm/amd/display: Revert "Add DPCD writes at key points"."
The following patch will fix the system hang issue.

Reviewed-by: Aric Cyr 
Acked-by: Mikita Lipski 
Signed-off-by: Leo (Hanghong) Ma 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  7 +++
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |  3 ++-
 .../drm/amd/display/dc/core/dc_link_hwss.c| 13 +++-
 drivers/gpu/drm/amd/display/dc/dc.h   |  1 +
 .../amd/display/dc/dce/dce_stream_encoder.c   |  2 ++
 .../display/dc/dce110/dce110_hw_sequencer.c   | 21 ---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  4 ++--
 .../display/dc/dcn10/dcn10_stream_encoder.c   | 10 +
 .../display/dc/dcn10/dcn10_stream_encoder.h   |  2 ++
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 10 -
 .../display/dc/dcn20/dcn20_stream_encoder.c   |  5 +
 .../display/dc/dcn20/dcn20_stream_encoder.h   |  1 +
 .../drm/amd/display/dc/dcn30/dcn30_hwseq.c|  2 +-
 .../drm/amd/display/dc/dcn31/dcn31_hwseq.c|  2 +-
 .../amd/display/dc/inc/hw/stream_encoder.h|  2 ++
 .../gpu/drm/amd/display/dc/inc/link_hwss.h|  1 +
 .../dc/virtual/virtual_stream_encoder.c   |  2 ++
 .../gpu/drm/amd/display/include/dpcd_defs.h   |  1 +
 .../amd/display/include/link_service_types.h  | 16 ++
 19 files changed, 95 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f68a0d9543f4..3c3637fcc2b8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3220,6 +3220,7 @@ void core_link_enable_stream(
 {
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
+   struct dc_link *link = stream->sink->link;
enum dc_status status;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
@@ -3248,6 +3249,9 @@ void core_link_enable_stream(
stream->use_vsc_sdp_for_colorimetry,

stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
 
+   if (dc_is_dp_signal(pipe_ctx->stream->signal))
+   dp_source_sequence_trace(link, 
DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))

pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
@@ -3283,6 +3287,9 @@ void core_link_enable_stream(
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
 
+   if (dc_is_dp_signal(pipe_ctx->stream->signal))
+   dp_source_sequence_trace(link, 
DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 20bbde8524b8..44d7826e7654 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1814,6 +1814,7 @@ bool perform_link_training_with_retries(
 */
link_enc->funcs->connect_dig_be_to_fe(link_enc,

pipe_ctx->stream_res.stream_enc->id, true);
+   dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
 
for (j = 0; j < attempts; ++j) {
 
@@ -4387,7 +4388,7 @@ bool dc_link_dp_set_test_pattern(
 * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
 */
/* Blank stream */
-   
pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+   pipes->stream_res.stream_enc->funcs->dp_blank(link, 
pipe_ctx->stream_res.stream_enc);
}
 
dp_set_hw_test_pattern(link, test_pattern,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 9c51cd09dcf1..2a064f7b669e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -61,6 +61,13 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on)
sizeof(state));
 }
 
+void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
+{
+   if (link->dc->debug.enable_driver_sequence_debug)
+   core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
+   _test_mode, sizeof(dp_

[PATCH 10/19] drm/amd/display: unblock abm when odm is enabled only on configs that support it

2021-08-27 Thread Mikita Lipski
From: Josip Pavic 

[Why]
When ODM is enabled, ABM is blocked on dcn31 but unblocked on dcn30.

Since the dcn31 firmware is now able to handle ABM interop with ODM, it
is no longer necessary to block ABM when ODM is enabled.

Since the dcn30 firmware does not handle ABM interop with ODM, leaving
that combination unblocked can lead to one side of the screen appearing
brighter than the other.

[How]
When ODM is enabled, unblock abm on dcn31 and block it on dcn30

Reviewed-by: Anthony Koo 
Acked-by: Mikita Lipski 
Signed-off-by: Josip Pavic 
---
 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c |  1 +
 .../gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c| 15 ---
 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c |  1 -
 3 files changed, 1 insertion(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 3a5b53dd2f6d..93f32a312fee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
+   .is_abm_supported = dcn21_is_abm_supported
 };
 
 static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 07a9e0feda99..e7994c4f7f02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -584,21 +584,6 @@ void dcn31_reset_hw_ctx_wrap(
}
 }
 
-bool dcn31_is_abm_supported(struct dc *dc,
-   struct dc_state *context, struct dc_stream_state *stream)
-{
-   int i;
-
-   for (i = 0; i < dc->res_pool->pipe_count; i++) {
-   struct pipe_ctx *pipe_ctx = >res_ctx.pipe_ctx[i];
-
-   if (pipe_ctx->stream == stream &&
-   (pipe_ctx->prev_odm_pipe == NULL && 
pipe_ctx->next_odm_pipe == NULL))
-   return true;
-   }
-   return false;
-}
-
 static void apply_riommu_invalidation_wa(struct dc *dc)
 {
struct dce_hwseq *hws = dc->hwseq;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c 
b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index aaf2dbd095fe..9b0f7c22e7e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -97,7 +97,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.z10_restore = dcn31_z10_restore,
-   .is_abm_supported = dcn31_is_abm_supported,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
 };
-- 
2.25.1



[PATCH 12/19] drm/amd/display: Fix false BAD_FREE warning from Coverity

2021-08-27 Thread Mikita Lipski
From: Anson Jacob 

This is an attempt to fix false warning raised by Coverity
via multiple CID's.

Addresses-Coverity-ID: 1487412 ("Free of address-of expression")
Cc: Wesley Chalmers 

Reviewed-by: Wesley Chalmers 
Acked-by: Mikita Lipski 
Signed-off-by: Anson Jacob 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
index 72970e49800a..7f25c11f4248 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
@@ -176,12 +176,15 @@ static void dpcd_reduce_address_range(
uint8_t * const reduced_data,
const uint32_t reduced_size)
 {
-   const uint32_t reduced_end_address = END_ADDRESS(reduced_address, 
reduced_size);
-   const uint32_t extended_end_address = END_ADDRESS(extended_address, 
extended_size);
const uint32_t offset = reduced_address - extended_address;
 
-   if (extended_end_address == reduced_end_address && extended_address == 
reduced_address)
-   return; /* extended and reduced address ranges point to the 
same data */
+   /*
+* If the address is same, address was not extended.
+* So we do not need to free any memory.
+* The data is in original buffer(reduced_data).
+*/
+   if (extended_data == reduced_data)
+   return;
 
memcpy(_data[offset], reduced_data, reduced_size);
kfree(extended_data);
-- 
2.25.1



[PATCH 19/19] drm/amd/display: Add regamma/degamma coefficients and set sRGB when TF is BT709

2021-08-27 Thread Mikita Lipski
From: Jaehyun Chung 

[Why]
In YUV case, need to set the input TF to sRGB instead of BT709,
even though the input TF type is distributed. SRGB was not
being used because pixel format was not being set in the
surface update sequence.
Also, we were using the same coefficients for degamma and
regamma formula, causing the cutoff point of the linear
section of the curve to be incorrect.

[How]
Set pixel format in the surface update sequence. Add separate
coefficient arrays for regamma and degamma.

Reviewed-by: Krunoslav Kovac 
Acked-by: Mikita Lipski 
Signed-off-by: Jaehyun Chung 
---
 .../amd/display/modules/color/color_gamma.c   | 60 ---
 1 file changed, 40 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index ef742d95ef05..275f11f8bea3 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -54,12 +54,18 @@ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
  * just multiply with 2^gamma which can be computed once, and save the result 
so we
  * recursively compute all the values.
  */
-   
/*sRGB   709 2.2 2.4 P3*/
-static const int32_t gamma_numerator01[] = { 31308,18, 0,  0,  
0};
-static const int32_t gamma_numerator02[] = { 12920,4500,   0,  0,  
0};
-static const int32_t gamma_numerator03[] = { 55,   99, 0,  
0,  0};
-static const int32_t gamma_numerator04[] = { 55,   99, 0,  
0,  0};
-static const int32_t gamma_numerator05[] = { 2400, 2200,   2200, 2400, 
2600};
+   
 /*sRGB 709 2.2 2.4 P3*/
+static const int32_t regamma_numerator01[] = { 31308,   18, 0,  0,  0};
+static const int32_t regamma_numerator02[] = { 12920,   4500,   0,  0,  0};
+static const int32_t regamma_numerator03[] = { 55,  99, 0,  0,  0};
+static const int32_t regamma_numerator04[] = { 55,  99, 0,  0,  0};
+static const int32_t regamma_numerator05[] = { 2400,2200,   2200, 2400, 
2600};
+
+static const int32_t degamma_numerator01[] = { 40450,   81, 0,  0,  0};
+static const int32_t degamma_numerator02[] = { 12920,   4500,   0,  0,  0};
+static const int32_t degamma_numerator03[] = { 55,  99, 0,  0,  0};
+static const int32_t degamma_numerator04[] = { 55,  99, 0,  0,  0};
+static const int32_t degamma_numerator05[] = { 2400,2200,   2200, 2400, 
2600};
 
 /* one-time setup of X points */
 void setup_x_points_distribution(void)
@@ -288,7 +294,8 @@ struct dividers {
 };
 
 
-static bool build_coefficients(struct gamma_coefficients *coefficients, enum 
dc_transfer_func_predefined type)
+static bool build_coefficients(struct gamma_coefficients *coefficients,
+   enum dc_transfer_func_predefined type, bool isRegamma)
 {
 
uint32_t i = 0;
@@ -311,16 +318,29 @@ static bool build_coefficients(struct gamma_coefficients 
*coefficients, enum dc_
}
 
do {
-   coefficients->a0[i] = dc_fixpt_from_fraction(
-   gamma_numerator01[index], 1000);
-   coefficients->a1[i] = dc_fixpt_from_fraction(
-   gamma_numerator02[index], 1000);
-   coefficients->a2[i] = dc_fixpt_from_fraction(
-   gamma_numerator03[index], 1000);
-   coefficients->a3[i] = dc_fixpt_from_fraction(
-   gamma_numerator04[index], 1000);
-   coefficients->user_gamma[i] = dc_fixpt_from_fraction(
-   gamma_numerator05[index], 1000);
+   if (isRegamma) {
+   coefficients->a0[i] = dc_fixpt_from_fraction(
+   regamma_numerator01[index], 1000);
+   coefficients->a1[i] = dc_fixpt_from_fraction(
+   regamma_numerator02[index], 1000);
+   coefficients->a2[i] = dc_fixpt_from_fraction(
+   regamma_numerator03[index], 1000);
+   coefficients->a3[i] = dc_fixpt_from_fraction(
+   regamma_numerator04[index], 1000);
+   coefficients->user_gamma[i] = dc_fixpt_from_fraction(
+   regamma_numerator05[index], 1000);
+   } else {
+   coefficients->a0[i] = dc_fixpt_from_fraction(
+   degamma_numerator01[index], 1000);
+   coefficients->a1[i] = dc_fixpt_from_fraction(
+   degamma_numerator02[index], 1000);
+   coefficients->a2[i] = dc_fixpt_from_fraction(
+

[PATCH 17/19] drm/amd/display: Get backlight from PWM if DMCU is not initialized

2021-08-27 Thread Mikita Lipski
From: Harry Wentland 

On Carrizo/Stoney systems we set backlight through panel_cntl, i.e.
directly via the PWM registers, if DMCU is not initialized. We
always read it back through ABM registers which leads to a
mismatch and forces atomic_commit to program the backlight
each time.

Instead make sure we use the same logic for backlight readback,
i.e. read it from panel_cntl if DMCU is not initialized.

We also need to remove some extraneous and incorrect calculations
at the end of dce_get_16_bit_backlight_from_pwm.

Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1666
Cc: sta...@vger.kernel.org

Reviewed-by: Josip Pavic 
Acked-by: Mikita Lipski 
Signed-off-by: Harry Wentland 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c| 16 
 .../gpu/drm/amd/display/dc/dce/dce_panel_cntl.c  | 10 --
 2 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3c3637fcc2b8..7928852185b8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct 
dc_link *link)
 
 int dc_link_get_backlight_level(const struct dc_link *link)
 {
-
struct abm *abm = get_abm_from_stream_res(link);
+   struct panel_cntl *panel_cntl = link->panel_cntl;
+   struct dc  *dc = link->ctx->dc;
+   struct dmcu *dmcu = dc->res_pool->dmcu;
+   bool fw_set_brightness = true;
 
-   if (abm == NULL || abm->funcs->get_current_backlight == NULL)
-   return DC_ERROR_UNEXPECTED;
+   if (dmcu)
+   fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
 
-   return (int) abm->funcs->get_current_backlight(abm);
+   if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+   return panel_cntl->funcs->get_current_backlight(panel_cntl);
+   else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+   return (int) abm->funcs->get_current_backlight(abm);
+   else
+   return DC_ERROR_UNEXPECTED;
 }
 
 int dc_link_get_target_backlight_pwm(const struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index e92339235863..e8570060d007 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -49,7 +49,6 @@
 static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl 
*panel_cntl)
 {
uint64_t current_backlight;
-   uint32_t round_result;
uint32_t bl_period, bl_int_count;
uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct 
panel_cntl *panel_c
current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1;
 
-   current_backlight = (uint64_t)(current_backlight) * bl_period;
-
-   round_result = (uint32_t)(current_backlight & 0x);
-
-   round_result = (round_result >> (bl_int_count-1)) & 1;
-
-   current_backlight >>= bl_int_count;
-   current_backlight += round_result;
-
return (uint32_t)(current_backlight);
 }
 
-- 
2.25.1



[PATCH 07/19] drm/amd/display: move bpp range decision in decide dsc bw range function

2021-08-27 Thread Mikita Lipski
From: Wenjing Liu 

[why]
Before get dsc bw range is used to compute DSC bw range
based on the given fixed bpp min/max input.
The new change will merge any specs, signal, timing specific
bpp range decision into this function. So the function needs to make
a decision with all aspects considered.

Reviewed-by: George Shen 
Acked-by: Mikita Lipski 
Signed-off-by: Wenjing Liu 
---
 drivers/gpu/drm/amd/display/dc/dc_dsc.h |   6 +-
 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 132 +++-
 2 files changed, 76 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h 
b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index c8cc6a448c36..684713b2cff7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -51,7 +51,6 @@ struct dc_dsc_policy {
int min_slice_height; // Must not be less than 8
uint32_t max_target_bpp;
uint32_t min_target_bpp;
-   uint32_t preferred_bpp_x16;
bool enable_dsc_when_not_needed;
 };
 
@@ -86,6 +85,11 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
const int num_slices_h,
const bool is_dp);
 
+/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to 
DM,
+ * and DM can choose to OVERRIDE the limitation on CASE BY CASE basis.
+ * Hardware/specs limitation should not be writable by DM.
+ * It should be decoupled from DM specific policy and named differently.
+ */
 void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
struct dc_dsc_policy *policy);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c 
b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 8c04f9f42a3b..b510080990d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -39,6 +39,21 @@ static bool dsc_policy_enable_dsc_when_not_needed;
 
 static bool dsc_policy_disable_dsc_stream_overhead;
 
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
+static bool decide_dsc_bandwidth_range(
+   const uint32_t min_bpp_x16,
+   const uint32_t max_bpp_x16,
+   const uint32_t num_slices_h,
+   const struct dsc_enc_caps *dsc_caps,
+   const struct dc_crtc_timing *timing,
+   struct dc_dsc_bw_range *range);
+
 static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int 
*buff_block_size)
 {
 
@@ -285,10 +300,12 @@ static uint32_t compute_bpp_x16_from_target_bandwidth(
return dc_fixpt_floor(bpp_x16);
 }
 
-/* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, 
and timing's pixel clock
- * and uncompressed bandwidth.
+/* Decide DSC bandwidth range based on signal, timing, specs specific and 
input min and max
+ * requirements.
+ * The range output includes decided min/max target bpp, the respective 
bandwidth requirements
+ * and native timing bandwidth requirement when DSC is not used.
  */
-static void get_dsc_bandwidth_range(
+static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
const uint32_t num_slices_h,
@@ -296,39 +313,45 @@ static void get_dsc_bandwidth_range(
const struct dc_crtc_timing *timing,
struct dc_dsc_bw_range *range)
 {
-   /* native stream bandwidth */
-   range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
-
-   /* max dsc target bpp */
-   range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
-   max_bpp_x16, num_slices_h, dsc_caps->is_dp);
-   range->max_target_bpp_x16 = max_bpp_x16;
-   if (range->max_kbps > range->stream_kbps) {
-   /* max dsc target bpp is capped to native bandwidth */
-   range->max_kbps = range->stream_kbps;
-   range->max_target_bpp_x16 = 
compute_bpp_x16_from_target_bandwidth(
-   range->max_kbps, timing, num_slices_h,
-   dsc_caps->bpp_increment_div,
-   dsc_caps->is_dp);
+   uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
+
+   memset(range, 0, sizeof(*range));
+
+   /* apply signal, timing, specs and explicitly specified DSC range 
requirements */
+   if (preferred_bpp_x16) {
+   if (preferred_bpp_x16 <= max_bpp_x16 &&
+   preferred_bpp_x16 >= min_bpp_x16) {
+   range->max_target_bpp_x16 = preferred_bpp_x16;
+   range->min_target_bpp_x16 = preferred_bpp_x16;
+   }
}
+   else {
+   range->max_target_bpp_x16 = max_bpp_x16;
+   range->min_targ

[PATCH 01/19] drm/amd/display: cleanup idents after a revert

2021-08-27 Thread Mikita Lipski
From: Angus Wang 

[WHY]
The change has caused high idle memory clock speed and power
consumption at some resolutions and frame rates for Navi10

[HOW]
Reverted change "drm/amd/display: Fixed Intermittent blue
screen on OLED panel"

Reviewed-by: Aric Cyr  
Acked-by: Mikita Lipski 
Signed-off-by: Angus Wang 
---
 .../gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index fbed5304692d..63bbdf8b8678 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2641,7 +2641,7 @@ static void 
dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if 
(mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb]
 == 0) {
if (mode_lib->vba.DRAMClockChangeWatermark >
-   
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, 
mode_lib->vba.UrgentWatermark))
+   
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, 
mode_lib->vba.UrgentWatermark))
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
-- 
2.25.1



[PATCH 00/19] DC Patches August 23, 2021

2021-08-27 Thread Mikita Lipski
This DC patchset brings improvements in multiple areas. In summary, we 
highlight:

* Memory leak fixes and false positive warnings removed caught by coverity

* Backlight fix for Carrizo/Stoney laptops

* Unblocking ABM when ODM is enabled on DCN31 and up

* Fork thread to offload work of hpd_rx_irq to avoid deadlocks

* Expose DSC overhead bw parameter to DM


Angus Wang (1):
  drm/amd/display: cleanup idents after a revert

Anson Jacob (3):
  drm/amd/display: Fix false BAD_FREE warning from Coverity
  drm/amd/display: Fix multiple memory leaks reported by coverity
  drm/amd/display: Revert "Directly retrain link from debugfs"

Anthony Koo (1):
  drm/amd/display: [FW Promotion] Release 0.0.81

Aric Cyr (1):
  drm/amd/display: 3.2.151

Aurabindo Pillai (1):
  drm/amd/display: Add flag to detect dpms force off during HPD

Harry Wentland (1):
  drm/amd/display: Get backlight from PWM if DMCU is not initialized

Jaehyun Chung (1):
  drm/amd/display: Add regamma/degamma coefficients and set sRGB when TF
is BT709

Josip Pavic (1):
  drm/amd/display: unblock abm when odm is enabled only on configs that
support it

Leo (Hanghong) Ma (2):
  drm/amd/display: Add DPCD writes at key points
  drm/amd/display: Fix system hang at boot

Meenakshikumar Somasundaram (1):
  drm/amd/display: Fix for null pointer access for ddc pin and aux
engine.

Michael Strauss (1):
  drm/amd/display: Initialize lt_settings on instantiation

Oliver Logush (1):
  drm/amd/display: Drop unused privacy_mask setters and getters

Wayne Lin (2):
  drm/amd/display: Add option to defer works of hpd_rx_irq
  drm/amd/display: Fork thread to offload work of hpd_rx_irq

Wenjing Liu (2):
  drm/amd/display: expose dsc overhead bw in dc dsc header
  drm/amd/display: move bpp range decision in decide dsc bw range
function

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 222 ++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  51 +++-
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |   3 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c|  16 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  23 +-
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |  98 +---
 .../drm/amd/display/dc/core/dc_link_dpcd.c|  11 +-
 .../drm/amd/display/dc/core/dc_link_hwss.c|  13 +-
 drivers/gpu/drm/amd/display/dc/dc.h   |   3 +-
 drivers/gpu/drm/amd/display/dc/dc_dsc.h   |  11 +-
 drivers/gpu/drm/amd/display/dc/dc_link.h  |   9 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c  |  12 +-
 .../drm/amd/display/dc/dce/dce_panel_cntl.c   |  10 -
 .../amd/display/dc/dce/dce_stream_encoder.c   |   2 +
 .../display/dc/dce110/dce110_hw_sequencer.c   |  46 +++-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |   4 +-
 .../display/dc/dcn10/dcn10_stream_encoder.c   |  10 +
 .../display/dc/dcn10/dcn10_stream_encoder.h   |   2 +
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c|  10 +-
 .../display/dc/dcn20/dcn20_stream_encoder.c   |   5 +
 .../display/dc/dcn20/dcn20_stream_encoder.h   |   1 +
 .../drm/amd/display/dc/dcn30/dcn30_hwseq.c|   2 +-
 .../gpu/drm/amd/display/dc/dcn30/dcn30_init.c |   1 +
 .../drm/amd/display/dc/dcn30/dcn30_resource.c |   6 +-
 .../amd/display/dc/dcn301/dcn301_resource.c   |   6 +-
 .../amd/display/dc/dcn302/dcn302_resource.c   |   6 +-
 .../drm/amd/display/dc/dcn31/dcn31_hwseq.c|  17 +-
 .../gpu/drm/amd/display/dc/dcn31/dcn31_init.c |   1 -
 .../drm/amd/display/dc/dcn31/dcn31_resource.c |   6 +-
 .../dc/dml/dcn20/display_mode_vba_20v2.c  |   2 +-
 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c   | 196 
 drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h   |   6 -
 .../amd/display/dc/inc/hw/stream_encoder.h|   2 +
 .../gpu/drm/amd/display/dc/inc/link_hwss.h|   1 +
 .../dc/virtual/virtual_stream_encoder.c   |   2 +
 .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h   |   4 +-
 .../gpu/drm/amd/display/include/dal_asic_id.h |   2 +-
 .../gpu/drm/amd/display/include/dpcd_defs.h   |   1 +
 .../amd/display/include/link_service_types.h  |  16 ++
 .../amd/display/modules/color/color_gamma.c   |  60 +++--
 40 files changed, 627 insertions(+), 272 deletions(-)

-- 
2.25.1



Re: [PATCH v3 01/20] drm/amdgpu: Add error handling to amdgpu_dm_initialize_dp_connector()

2021-04-21 Thread Mikita Lipski

Thanks for the change!

Reviewed-by: Mikita Lipski 

On 2021-04-19 6:55 p.m., Lyude Paul wrote:

While working on moving i2c device registration into drm_dp_aux_init() - I
realized that in order to do so we need to make sure that drivers calling
drm_dp_aux_init() handle any errors it could possibly return. In the
process of doing that, I noticed that the majority of AMD's code for DP
connector creation doesn't attempt to do any real error handling.

So, let's fix this and also cleanup amdgpu_dm_initialize_dp_connector()
while we're at it. This way we can handle the error codes from
drm_dp_aux_init().

Signed-off-by: Lyude Paul 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 29 +++-
  .../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 44 +++
  .../display/amdgpu_dm/amdgpu_dm_mst_types.h   |  6 +--
  3 files changed, 45 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a0c8c41e4e57..fc5d315bbb05 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7608,10 +7608,9 @@ static int amdgpu_dm_connector_init(struct 
amdgpu_display_manager *dm,
  
  	aconnector->i2c = i2c;

res = i2c_add_adapter(>base);
-
if (res) {
DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
-   goto out_free;
+   goto fail_free;
}
  
  	connector_type = to_drm_connector_type(link->connector_signal);

@@ -7625,8 +7624,7 @@ static int amdgpu_dm_connector_init(struct 
amdgpu_display_manager *dm,
  
  	if (res) {

DRM_ERROR("connector_init failed\n");
-   aconnector->connector_id = -1;
-   goto out_free;
+   goto fail_id;
}
  
  	drm_connector_helper_add(

@@ -7643,15 +7641,22 @@ static int amdgpu_dm_connector_init(struct 
amdgpu_display_manager *dm,
drm_connector_attach_encoder(
>base, >base);
  
-	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort

-   || connector_type == DRM_MODE_CONNECTOR_eDP)
-   amdgpu_dm_initialize_dp_connector(dm, aconnector, 
link->link_index);
-
-out_free:
-   if (res) {
-   kfree(i2c);
-   aconnector->i2c = NULL;
+   if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+   connector_type == DRM_MODE_CONNECTOR_eDP) {
+   res = amdgpu_dm_initialize_dp_connector(dm, aconnector, 
link->link_index);
+   if (res)
+   goto fail_cleanup;
}
+
+   return 0;
+fail_cleanup:
+   drm_connector_cleanup(>base);
+fail_id:
+   aconnector->connector_id = -1;
+fail_free:
+   kfree(i2c);
+   aconnector->i2c = NULL;
+
return res;
  }
  
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c

index 73cdb9fe981a..3dee9cce9c9e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -425,33 +425,39 @@ static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
  };
  
-void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,

-  struct amdgpu_dm_connector *aconnector,
-  int link_index)
+int amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int link_index)
  {
-   aconnector->dm_dp_aux.aux.name =
-   kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
- link_index);
-   aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
-   aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
+   struct amdgpu_dm_dp_aux *dm_aux = >dm_dp_aux;
+   int ret;
  
-	drm_dp_aux_init(>dm_dp_aux.aux);

-   drm_dp_cec_register_connector(>dm_dp_aux.aux,
- >base);
+   dm_aux->aux.name = kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", 
link_index);
+   if (!dm_aux->aux.name)
+   return -ENOMEM;
+
+   dm_aux->aux.transfer = dm_dp_aux_transfer;
+   dm_aux->ddc_service = aconnector->dc_link->ddc;
+
+   drm_dp_aux_init(_aux->aux);
+   drm_dp_cec_register_connector(_aux->aux, >base);
  
  	if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)

-   return;
+   return 0;
  
  	aconnector->mst_mgr.cbs = _mst_cbs;

-   drm_dp_mst_topology_mgr_init(
-   >mst_mgr,
-   adev_to_drm(dm->adev),
-  

Re: [PATCH] drm/amdgpu/display: fix warning when compiling without debugfs

2020-04-08 Thread Mikita Lipski

Reviewed-by: Mikita Lipski 

Thanks,
Mikita

On 2020-04-08 9:31 a.m., Alex Deucher wrote:

fixes unused variable warning.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1155fb686e36..417925a0d739 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4738,10 +4738,10 @@ amdgpu_dm_connector_atomic_duplicate_state(struct 
drm_connector *connector)
  static int
  amdgpu_dm_connector_late_register(struct drm_connector *connector)
  {
+#if defined(CONFIG_DEBUG_FS)
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
  
-#if defined(CONFIG_DEBUG_FS)

connector_debugfs_init(amdgpu_dm_connector);
  #endif
  


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/amd/display: query hdcp capability during link detect

2020-04-06 Thread Mikita Lipski

Both patches look good to me.

The series is Reviewed-by: Mikita Lipski 

Thanks,
Mikita

On 2020-04-01 5:00 p.m., Bhawanpreet Lakha wrote:

[Why]
Query the hdcp caps of a link, it is useful and can be reported to the user

[How]
Create a query function and call it during link detect

Signed-off-by: Bhawanpreet Lakha 
---
  drivers/gpu/drm/amd/display/dc/core/dc_link.c | 56 
  drivers/gpu/drm/amd/display/dc/dc.h   | 41 +
  drivers/gpu/drm/amd/display/dc/dc_link.h  |  3 +
  .../gpu/drm/amd/display/dc/hdcp/hdcp_msg.c| 89 +++
  .../gpu/drm/amd/display/include/hdcp_types.h  |  7 ++
  5 files changed, 196 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a93997ff0419..49c63e27dfe9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -514,6 +514,50 @@ static void link_disconnect_remap(struct dc_sink 
*prev_sink, struct dc_link *lin
link->local_sink = prev_sink;
  }
  
+#if defined(CONFIG_DRM_AMD_DC_HDCP)

+static void query_hdcp_capability(enum signal_type signal, struct dc_link 
*link)
+{
+   struct hdcp_protection_message msg22;
+   struct hdcp_protection_message msg14;
+
+   memset(, 0, sizeof(struct hdcp_protection_message));
+   memset(, 0, sizeof(struct hdcp_protection_message));
+   memset(link->hdcp_caps.rx_caps.raw, 0,
+   sizeof(link->hdcp_caps.rx_caps.raw));
+
+   if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+   link->ddc->transaction_type ==
+   DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
+   link->connector_signal == SIGNAL_TYPE_EDP) {
+   msg22.data = link->hdcp_caps.rx_caps.raw;
+   msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
+   msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
+   } else {
+   msg22.data = >hdcp_caps.rx_caps.fields.version;
+   msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
+   msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
+   }
+   msg22.version = HDCP_VERSION_22;
+   msg22.link = HDCP_LINK_PRIMARY;
+   msg22.max_retries = 5;
+   dc_process_hdcp_msg(signal, link, );
+
+   if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == 
SIGNAL_TYPE_DISPLAY_PORT_MST) {
+   enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
+
+   msg14.data = >hdcp_caps.bcaps.raw;
+   msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
+   msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
+   msg14.version = HDCP_VERSION_14;
+   msg14.link = HDCP_LINK_PRIMARY;
+   msg14.max_retries = 5;
+
+   status = dc_process_hdcp_msg(signal, link, );
+   }
+
+}
+#endif
+
  static void read_current_link_settings_on_detect(struct dc_link *link)
  {
union lane_count_set lane_count_set = { {0} };
@@ -606,6 +650,12 @@ static bool detect_dp(struct dc_link *link,
dal_ddc_service_set_transaction_type(link->ddc,
 
sink_caps->transaction_type);
  
+#if defined(CONFIG_DRM_AMD_DC_HDCP)

+   /* In case of fallback to SST when topology discovery 
below fails
+* HDCP caps will be querried again later by the upper 
layer (caller
+* of this function). */
+   query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, 
link);
+#endif
/*
 * This call will initiate MST topology discovery. Which
 * will detect MST ports and add new DRM connector DRM
@@ -975,6 +1025,9 @@ static bool dc_link_detect_helper(struct dc_link *link,
 * TODO debug why Dell 2413 doesn't like
 *  two link trainings
 */
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+   query_hdcp_capability(sink->sink_signal, link);
+#endif
  
  			// verify link cap for SST non-seamless boot

if (!perform_dp_seamless_boot)
@@ -988,6 +1041,9 @@ static bool dc_link_detect_helper(struct dc_link *link,
sink = prev_sink;
prev_sink = NULL;
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+   query_hdcp_capability(sink->sink_signal, link);
+#endif
}
  
  		/* HDMI-DVI Dongle */

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 92123b0d1196..9235d04c32dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -29,6 +29,9 @@
  #include "dc_types.h"
  #include "grph_obje

Re: [PATCH] drm/amd/display: Move drm_dp_mst_atomic_check() to the front of dc_validate_global_state()

2020-01-28 Thread Mikita Lipski

Reviewed-by: Mikita Lipski 

Thanks!
Mikita

On 1/28/20 4:44 PM, Zhan Liu wrote:

[Why]
Need to do atomic check first, then validate global state.
If not, when connecting both MST and HDMI displays and
set a bad mode via xrandr, system will hang.

[How]
Move drm_dp_mst_atomic_check() to the front of
dc_validate_global_state().

Signed-off-by: Zhan Liu 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 14 ++
  1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index eed3ed7180fd..805d8d84ebb8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8256,6 +8256,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
  #endif
  
+		/*

+* Perform validation of MST topology in the state:
+* We need to perform MST atomic check before calling
+* dc_validate_global_state(), or there is a chance
+* to get stuck in an infinite loop and hang eventually.
+*/
+   ret = drm_dp_mst_atomic_check(state);
+   if (ret)
+   goto fail;
+
if (dc_validate_global_state(dc, dm_state->context, false) != 
DC_OK) {
ret = -EINVAL;
goto fail;
@@ -8284,10 +8294,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
dc_retain_state(old_dm_state->context);
}
}
-   /* Perform validation of MST topology in the state*/
-   ret = drm_dp_mst_atomic_check(state);
-   if (ret)
-   goto fail;
  
  	/* Store the overall update type for use later in atomic check. */

for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {



--
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2] drm/amd/dm/mst: Ignore payload update failures

2020-01-27 Thread Mikita Lipski




On 1/24/20 5:01 PM, Lyude Paul wrote:

On Fri, 2020-01-24 at 16:46 -0500, Lyude Paul wrote:

On Fri, 2020-01-24 at 14:20 -0500, Mikita Lipski wrote:

On 1/24/20 2:10 PM, Lyude Paul wrote:

Disabling a display on MST can potentially happen after the entire MST
topology has been removed, which means that we can't communicate with
the topology at all in this scenario. Likewise, this also means that we
can't properly update payloads on the topology and as such, it's a good
idea to ignore payload update failures when disabling displays.
Currently, amdgpu makes the mistake of halting the payload update
process when any payload update failures occur, resulting in leaving
DC's local copies of the payload tables out of date.

This ends up causing problems with hotplugging MST topologies, and
causes modesets on the second hotplug to fail like so:

[drm] Failed to updateMST allocation table forpipe idx:1
[ cut here ]
WARNING: CPU: 5 PID: 1511 at
drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2677
update_mst_stream_alloc_table+0x11e/0x130 [amdgpu]
Modules linked in: cdc_ether usbnet fuse xt_conntrack nf_conntrack
nf_defrag_ipv6 libcrc32c nf_defrag_ipv4 ipt_REJECT nf_reject_ipv4
nft_counter nft_compat nf_tables nfnetlink tun bridge stp llc sunrpc
vfat fat wmi_bmof uvcvideo snd_hda_codec_realtek snd_hda_codec_generic
snd_hda_codec_hdmi videobuf2_vmalloc snd_hda_intel videobuf2_memops
videobuf2_v4l2 snd_intel_dspcfg videobuf2_common crct10dif_pclmul
snd_hda_codec videodev crc32_pclmul snd_hwdep snd_hda_core
ghash_clmulni_intel snd_seq mc joydev pcspkr snd_seq_device snd_pcm
sp5100_tco k10temp i2c_piix4 snd_timer thinkpad_acpi ledtrig_audio snd
wmi soundcore video i2c_scmi acpi_cpufreq ip_tables amdgpu(O)
rtsx_pci_sdmmc amd_iommu_v2 gpu_sched mmc_core i2c_algo_bit ttm
drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec drm
crc32c_intel serio_raw hid_multitouch r8152 mii nvme r8169 nvme_core
rtsx_pci pinctrl_amd
CPU: 5 PID: 1511 Comm: gnome-shell Tainted: G   O  5.5.0-
rc7Lyude-Test+ #4
Hardware name: LENOVO FA495SIT26/FA495SIT26, BIOS R12ET22W(0.22 )
01/31/2019
RIP: 0010:update_mst_stream_alloc_table+0x11e/0x130 [amdgpu]
Code: 28 00 00 00 75 2b 48 8d 65 e0 5b 41 5c 41 5d 41 5e 5d c3 0f b6 06
49 89 1c 24 41 88 44 24 08 0f b6 46 01 41 88 44 24 09 eb 93 <0f> 0b e9
2f ff ff ff e8 a6 82 a3 c2 66 0f 1f 44 00 00 0f 1f 44 00
RSP: 0018:ac428127f5b0 EFLAGS: 00010202
RAX: 0002 RBX: 8d1e166eee80 RCX: 
RDX: ac428127f668 RSI: 8d1e166eee80 RDI: ac428127f610
RBP: ac428127f640 R08: c03d94a8 R09: 
R10: 8d1e24b02000 R11: ac428127f5b0 R12: 8d1e1b83d000
R13: 8d1e1bea0b08 R14: 0002 R15: 0002
FS:  7fab23ffcd80() GS:8d1e28b4()
knlGS:
CS:  0010 DS:  ES:  CR0: 80050033
CR2: 7f151f1711e8 CR3: 0005997c CR4: 003406e0
Call Trace:
   ? mutex_lock+0xe/0x30
   dc_link_allocate_mst_payload+0x9a/0x210 [amdgpu]
   ? dm_read_reg_func+0x39/0xb0 [amdgpu]
   ? core_link_enable_stream+0x656/0x730 [amdgpu]
   core_link_enable_stream+0x656/0x730 [amdgpu]
   dce110_apply_ctx_to_hw+0x58e/0x5d0 [amdgpu]
   ? dcn10_verify_allow_pstate_change_high+0x1d/0x280 [amdgpu]
   ? dcn10_wait_for_mpcc_disconnect+0x3c/0x130 [amdgpu]
   dc_commit_state+0x292/0x770 [amdgpu]
   ? add_timer+0x101/0x1f0
   ? ttm_bo_put+0x1a1/0x2f0 [ttm]
   amdgpu_dm_atomic_commit_tail+0xb59/0x1ff0 [amdgpu]
   ? amdgpu_move_blit.constprop.0+0xb8/0x1f0 [amdgpu]
   ? amdgpu_bo_move+0x16d/0x2b0 [amdgpu]
   ? ttm_bo_handle_move_mem+0x118/0x570 [ttm]
   ? ttm_bo_validate+0x134/0x150 [ttm]
   ? dm_plane_helper_prepare_fb+0x1b9/0x2a0 [amdgpu]
   ? _cond_resched+0x15/0x30
   ? wait_for_completion_timeout+0x38/0x160
   ? _cond_resched+0x15/0x30
   ? wait_for_completion_interruptible+0x33/0x190
   commit_tail+0x94/0x130 [drm_kms_helper]
   drm_atomic_helper_commit+0x113/0x140 [drm_kms_helper]
   drm_atomic_helper_set_config+0x70/0xb0 [drm_kms_helper]
   drm_mode_setcrtc+0x194/0x6a0 [drm]
   ? _cond_resched+0x15/0x30
   ? mutex_lock+0xe/0x30
   ? drm_mode_getcrtc+0x180/0x180 [drm]
   drm_ioctl_kernel+0xaa/0xf0 [drm]
   drm_ioctl+0x208/0x390 [drm]
   ? drm_mode_getcrtc+0x180/0x180 [drm]
   amdgpu_drm_ioctl+0x49/0x80 [amdgpu]
   do_vfs_ioctl+0x458/0x6d0
   ksys_ioctl+0x5e/0x90
   __x64_sys_ioctl+0x16/0x20
   do_syscall_64+0x55/0x1b0
   entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x7fab2121f87b
Code: 0f 1e fa 48 8b 05 0d 96 2c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff
ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01
f0 ff ff 73 01 c3 48 8b 0d dd 95 2c 00 f7 d8 64 89 01 48
RSP: 002b:7ffd045f9068 EFLAGS: 0246 ORIG_RAX: 0010
RAX: ffda RBX: 7ffd045f90a0 RCX: 7fab2121f87b
RDX: 7ffd045f90a0 RSI: c06864a2 RDI: 000b
RBP: 7ffd045f90a0 R08:  R09: 55db

Re: [PATCH v2] drm/amd/dm/mst: Ignore payload update failures

2020-01-24 Thread Mikita Lipski
 I have only been able to reproduce this on setups with 2
MST displays.

Changes since v1:
* Don't return false when part 1 or part 2 of updating the payloads
   fails, we don't want to abort at any step of the process even if
   things fail

Signed-off-by: Lyude Paul 
Acked-by: Harry Wentland 
Cc: sta...@vger.kernel.org
---
  .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c   | 13 -
  1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 069b7a6f5597..318b474ff20e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -216,7 +216,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
}
  
-	ret = drm_dp_update_payload_part1(mst_mgr);

+   /* It's OK for this to fail */
+   drm_dp_update_payload_part1(mst_mgr);
  
  	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or

 * AUX message. The sequence is slot 1-63 allocated sequence for each
@@ -225,9 +226,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
  
  	get_payload_table(aconnector, proposed_table);
  
-	if (ret)

-   return false;
-


Sorry for being picky, but I think this might cause compilation error on 
some systems for having unused variable (int ret). Its better just to 
strip out both ret integer declarations.


Otherwise the patch is good. Thanks again!

Reviewed-by: Mikita Lipski 

Mikita


return true;
  }
  
@@ -285,7 +283,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(

struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
-   int ret;
  
  	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
  
@@ -299,10 +296,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(

if (!mst_mgr->mst_state)
return false;
  
-	ret = drm_dp_update_payload_part2(mst_mgr);

-
-   if (ret)
-   return false;
+   /* It's OK for this to fail */
+   drm_dp_update_payload_part2(mst_mgr);
  
  	if (!enable)

drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/dm/mst: Ignore payload update failures on disable

2020-01-24 Thread Mikita Lipski




On 1/24/20 9:55 AM, Harry Wentland wrote:

On 2020-01-23 7:06 p.m., Lyude Paul wrote:

Disabling a display on MST can potentially happen after the entire MST
topology has been removed, which means that we can't communicate with
the topology at all in this scenario. Likewise, this also means that we
can't properly update payloads on the topology and as such, it's a good
idea to ignore payload update failures when disabling displays.
Currently, amdgpu makes the mistake of halting the payload update
process when any payload update failures occur, resulting in leaving
DC's local copies of the payload tables out of date.

This ends up causing problems with hotplugging MST topologies, and
causes modesets on the second hotplug to fail like so:

[drm] Failed to updateMST allocation table forpipe idx:1
[ cut here ]
WARNING: CPU: 5 PID: 1511 at
drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2677
update_mst_stream_alloc_table+0x11e/0x130 [amdgpu]
Modules linked in: cdc_ether usbnet fuse xt_conntrack nf_conntrack
nf_defrag_ipv6 libcrc32c nf_defrag_ipv4 ipt_REJECT nf_reject_ipv4
nft_counter nft_compat nf_tables nfnetlink tun bridge stp llc sunrpc
vfat fat wmi_bmof uvcvideo snd_hda_codec_realtek snd_hda_codec_generic
snd_hda_codec_hdmi videobuf2_vmalloc snd_hda_intel videobuf2_memops
videobuf2_v4l2 snd_intel_dspcfg videobuf2_common crct10dif_pclmul
snd_hda_codec videodev crc32_pclmul snd_hwdep snd_hda_core
ghash_clmulni_intel snd_seq mc joydev pcspkr snd_seq_device snd_pcm
sp5100_tco k10temp i2c_piix4 snd_timer thinkpad_acpi ledtrig_audio snd
wmi soundcore video i2c_scmi acpi_cpufreq ip_tables amdgpu(O)
rtsx_pci_sdmmc amd_iommu_v2 gpu_sched mmc_core i2c_algo_bit ttm
drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec drm
crc32c_intel serio_raw hid_multitouch r8152 mii nvme r8169 nvme_core
rtsx_pci pinctrl_amd
CPU: 5 PID: 1511 Comm: gnome-shell Tainted: G   O  
5.5.0-rc7Lyude-Test+ #4
Hardware name: LENOVO FA495SIT26/FA495SIT26, BIOS R12ET22W(0.22 ) 01/31/2019
RIP: 0010:update_mst_stream_alloc_table+0x11e/0x130 [amdgpu]
Code: 28 00 00 00 75 2b 48 8d 65 e0 5b 41 5c 41 5d 41 5e 5d c3 0f b6 06
49 89 1c 24 41 88 44 24 08 0f b6 46 01 41 88 44 24 09 eb 93 <0f> 0b e9
2f ff ff ff e8 a6 82 a3 c2 66 0f 1f 44 00 00 0f 1f 44 00
RSP: 0018:ac428127f5b0 EFLAGS: 00010202
RAX: 0002 RBX: 8d1e166eee80 RCX: 
RDX: ac428127f668 RSI: 8d1e166eee80 RDI: ac428127f610
RBP: ac428127f640 R08: c03d94a8 R09: 
R10: 8d1e24b02000 R11: ac428127f5b0 R12: 8d1e1b83d000
R13: 8d1e1bea0b08 R14: 0002 R15: 0002
FS:  7fab23ffcd80() GS:8d1e28b4() knlGS:
CS:  0010 DS:  ES:  CR0: 80050033
CR2: 7f151f1711e8 CR3: 0005997c CR4: 003406e0
Call Trace:
  ? mutex_lock+0xe/0x30
  dc_link_allocate_mst_payload+0x9a/0x210 [amdgpu]
  ? dm_read_reg_func+0x39/0xb0 [amdgpu]
  ? core_link_enable_stream+0x656/0x730 [amdgpu]
  core_link_enable_stream+0x656/0x730 [amdgpu]
  dce110_apply_ctx_to_hw+0x58e/0x5d0 [amdgpu]
  ? dcn10_verify_allow_pstate_change_high+0x1d/0x280 [amdgpu]
  ? dcn10_wait_for_mpcc_disconnect+0x3c/0x130 [amdgpu]
  dc_commit_state+0x292/0x770 [amdgpu]
  ? add_timer+0x101/0x1f0
  ? ttm_bo_put+0x1a1/0x2f0 [ttm]
  amdgpu_dm_atomic_commit_tail+0xb59/0x1ff0 [amdgpu]
  ? amdgpu_move_blit.constprop.0+0xb8/0x1f0 [amdgpu]
  ? amdgpu_bo_move+0x16d/0x2b0 [amdgpu]
  ? ttm_bo_handle_move_mem+0x118/0x570 [ttm]
  ? ttm_bo_validate+0x134/0x150 [ttm]
  ? dm_plane_helper_prepare_fb+0x1b9/0x2a0 [amdgpu]
  ? _cond_resched+0x15/0x30
  ? wait_for_completion_timeout+0x38/0x160
  ? _cond_resched+0x15/0x30
  ? wait_for_completion_interruptible+0x33/0x190
  commit_tail+0x94/0x130 [drm_kms_helper]
  drm_atomic_helper_commit+0x113/0x140 [drm_kms_helper]
  drm_atomic_helper_set_config+0x70/0xb0 [drm_kms_helper]
  drm_mode_setcrtc+0x194/0x6a0 [drm]
  ? _cond_resched+0x15/0x30
  ? mutex_lock+0xe/0x30
  ? drm_mode_getcrtc+0x180/0x180 [drm]
  drm_ioctl_kernel+0xaa/0xf0 [drm]
  drm_ioctl+0x208/0x390 [drm]
  ? drm_mode_getcrtc+0x180/0x180 [drm]
  amdgpu_drm_ioctl+0x49/0x80 [amdgpu]
  do_vfs_ioctl+0x458/0x6d0
  ksys_ioctl+0x5e/0x90
  __x64_sys_ioctl+0x16/0x20
  do_syscall_64+0x55/0x1b0
  entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x7fab2121f87b
Code: 0f 1e fa 48 8b 05 0d 96 2c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff
ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01
f0 ff ff 73 01 c3 48 8b 0d dd 95 2c 00 f7 d8 64 89 01 48
RSP: 002b:7ffd045f9068 EFLAGS: 0246 ORIG_RAX: 0010
RAX: ffda RBX: 7ffd045f90a0 RCX: 7fab2121f87b
RDX: 7ffd045f90a0 RSI: c06864a2 RDI: 000b
RBP: 7ffd045f90a0 R08:  R09: 55dbd2985d10
R10: 55dbd2196280 R11: 0246 R12: c06864a2
R13: 000b R14:  R15: 55dbd2196280
---[ end trace 

Re: [PATCH v9 12/18] drm/dp_mst: Add branch bandwidth validation to MST atomic check

2020-01-17 Thread Mikita Lipski




On 1/17/20 10:09 AM, Sean Paul wrote:

On Fri, Dec 13, 2019 at 3:09 PM  wrote:


From: Mikita Lipski 



Hi Mikita,
Unfortunately this patch causes a crash on my i915 device when I
unplug my MST hub. The panic is below.


Hi Sean,

I thought this issue was fixed by Wayne Lin in 
https://patchwork.freedesktop.org/patch/346736/?series=71388=1
but now I checked it seems it never got pushed. I will resend Wayne's 
patch once again.


Thanks
Mikita


[   38.514014] BUG: kernel NULL pointer dereference, address: 0030
[   38.521801] #PF: supervisor read access in kernel mode
[   38.527556] #PF: error_code(0x) - not-present page
[   38.533299] PGD 0 P4D 0
[   38.536127] Oops:  [#1] PREEMPT SMP PTI
[   38.540798] CPU: 1 PID: 1324 Comm: DrmThread Not tainted
5.5.0-rc6-02273-g9bb4096398e7 #36
[   38.550040] Hardware name: Google Fizz/Fizz, BIOS
Google_Fizz.10139.39.0 01/04/2018
[   38.558606] RIP: 0010:drm_dp_mst_atomic_check_bw_limit+0x11/0x102
[   38.565418] Code: 05 ff cb bf 19 48 f7 f6 c3 0f 1f 44 00 00 55 b8
0b 80 ff 0f 48 89 e5 5d c3 55 48 89 e5 41 57 41 56 41 55 41 54 4c 8d
77 30 53 <48> 8b 47 30 49 89 fd 49 89 f7 45 31 e4 48 8d 58 e8 48 8d 53
18 4c
[   38.586422] RSP: 0018:c9000139f9d8 EFLAGS: 00010282
[   38.592264] RAX:  RBX: 888272aeac88 RCX: 888236f529e0
[   38.600242] RDX: 888272aeac88 RSI: 888236f529e0 RDI: 
[   38.608220] RBP: c9000139fa00 R08: 0031 R09: 000e
[   38.616198] R10: 888236f529e8 R11: 8882621f3440 R12: 
[   38.624176] R13: 888236f529d0 R14: 0030 R15: 888236f529e0
[   38.632153] FS:  7cd9229ce700() GS:888276c8()
knlGS:
[   38.641193] CS:  0010 DS:  ES:  CR0: 80050033
[   38.647616] CR2: 0030 CR3: 0002618e8004 CR4: 003606e0
[   38.655593] Call Trace:
[   38.658329]  drm_dp_mst_atomic_check+0x152/0x16d
[   38.663484]  intel_atomic_check+0xcfe/0x1e6f
[   38.668259]  ? trace_hardirqs_on+0x28/0x3d
[   38.672835]  ? intel_pipe_config_compare+0x1b38/0x1b38
[   38.678580]  drm_atomic_check_only+0x5ab/0x70f
[   38.683547]  ? drm_atomic_set_crtc_for_connector+0xf5/0x102
[   38.689778]  ? drm_atomic_helper_shutdown+0xb6/0xb6
[   38.695221]  drm_atomic_commit+0x18/0x53
[   38.699604]  drm_atomic_helper_set_config+0x5a/0x70
[   38.705057]  drm_mode_setcrtc+0x2ab/0x833
[   38.709537]  ? rcu_read_unlock+0x57/0x57
[   38.713920]  ? drm_mode_getcrtc+0x173/0x173
[   38.718594]  drm_ioctl+0x2e5/0x424
[   38.722392]  ? drm_mode_getcrtc+0x173/0x173
[   38.727069]  vfs_ioctl+0x21/0x2f
[   38.730675]  do_vfs_ioctl+0x5fb/0x61e
[   38.734766]  ksys_ioctl+0x55/0x75
[   38.738469]  __x64_sys_ioctl+0x1a/0x1e
[   38.742659]  do_syscall_64+0x5c/0x6d
[   38.746653]  entry_SYSCALL_64_after_hwframe+0x49/0xbe
[   38.752298] RIP: 0033:0x7cd92552d497
[   38.756291] Code: 8a 66 90 48 8b 05 d1 d9 2b 00 64 c7 00 26 00 00
00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00
00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d a1 d9 2b 00 f7 d8 64 89
01 48
[   38.777296] RSP: 002b:7cd9229cd698 EFLAGS: 0246 ORIG_RAX:
0010
[   38.785762] RAX: ffda RBX: 20323373da80 RCX: 7cd92552d497
[   38.793740] RDX: 7cd9229cd6d0 RSI: c06864a2 RDI: 001c
[   38.801717] RBP: 7cd9229cd6c0 R08:  R09: 
[   38.809693] R10:  R11: 0246 R12: 001c
[   38.817670] R13:  R14: 7cd9229cd6d0 R15: c06864a2
[   38.825642] Modules linked in: xt_nat cdc_ether r8152 bridge stp
llc usbhid btusb btrtl btbcm btintel bluetooth asix usbnet
ecdh_generic ecc mii snd_soc_hdac_hdmi snd_soc_dmic xhci_pci xhci_hcd
snd_soc_skl snd_soc_sst_ipc snd_soc_sst_dsp snd_hda_ext_core
snd_intel_dspcfg snd_hda_core usbcore usb_common acpi_als kfifo_buf
industrialio xt_MASQUERADE iptable_nat nf_nat xt_mark fuse
ip6table_filter iwlmvm mac80211 r8169 realtek iwlwifi lzo_rle
lzo_compress zram cfg80211
[   38.871839] CR2: 0030
[   38.875542] ---[ end trace 6bb39ec52e30c7cb ]---
[   38.886142] RIP: 0010:drm_dp_mst_atomic_check_bw_limit+0x11/0x102
[   38.892957] Code: 05 ff cb bf 19 48 f7 f6 c3 0f 1f 44 00 00 55 b8
0b 80 ff 0f 48 89 e5 5d c3 55 48 89 e5 41 57 41 56 41 55 41 54 4c 8d
77 30 53 <48> 8b 47 30 49 89 fd 49 89 f7 45 31 e4 48 8d 58 e8 48 8d 53
18 4c
[   38.913964] RSP: 0018:c9000139f9d8 EFLAGS: 00010282
[   38.919804] RAX:  RBX: 888272aeac88 RCX: 888236f529e0
[   38.927784] RDX: 888272aeac88 RSI: 888236f529e0 RDI: 
[   38.935765] RBP: c9000139fa00 R08: 0031 R09: 000e
[   38.943733] R10: 888236f529e8 R11: 8882621f3440 R12: 
[   38.951712] R13: 888236f529d0 R14: 0030 R15: 888236f529e0
[   38.959692] FS:  7cd9229ce700() GS:888276

Re: [PATCH] drm/dp_mst: fix documentation of drm_dp_mst_add_affected_dsc_crtcs

2020-01-09 Thread Mikita Lipski

Thank you,
Reviewed-by: Mikita Lipski 

On 1/8/20 10:24 PM, Alex Deucher wrote:

the parameter is the mst manager, not the port.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/drm_dp_mst_topology.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c 
b/drivers/gpu/drm/drm_dp_mst_topology.c
index 7e9b9b7e50cf..a4be2f825899 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -4781,7 +4781,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct 
drm_dp_mst_topology_mgr *mgr,
  /**
   * drm_dp_mst_add_affected_dsc_crtcs
   * @state: Pointer to the new struct drm_dp_mst_topology_state
- * @port: Port pointer of connector with new state
+ * @mgr: MST topology manager
   *
   * Whenever there is a change in mst topology
   * DSC configuration would have to be recalculated



--
Thanks,
Mikita Lipski
Software Engineer, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v9 16/18] drm/amd/display: Recalculate VCPI slots for new DSC connectors

2019-12-23 Thread Mikita Lipski




On 12/20/19 4:41 PM, Lyude Paul wrote:

So I reviewed this already but realized I made a very silly mistake, comments
down below:

On Fri, 2019-12-13 at 15:08 -0500, mikita.lip...@amd.com wrote:

From: Mikita Lipski 

[why]
Since for DSC MST connector's PBN is claculated differently
due to compression, we have to recalculate both PBN and
VCPI slots for that connector.

[how]
The function iterates through all the active streams to
find, which have DSC enabled, then recalculates PBN for
it and calls drm_dp_helper_update_vcpi_slots_for_dsc to
update connector's VCPI slots.

v2: - use drm_dp_mst_atomic_enable_dsc per port to
enable/disable DSC

v3: - Iterate through connector states from the state passed
 - On each connector state get stream from dc_state,
instead CRTC state

Reviewed-by: Lyude Paul 
Signed-off-by: Mikita Lipski 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 76 +--
  1 file changed, 71 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 93a230d956ee..2ac3a2f0b452 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4986,6 +4986,69 @@ const struct drm_encoder_helper_funcs
amdgpu_dm_encoder_helper_funcs = {
.atomic_check = dm_encoder_helper_atomic_check
  };
  
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,

+   struct dc_state *dc_state)
+{
+   struct dc_stream_state *stream = NULL;
+   struct drm_connector *connector;
+   struct drm_connector_state *new_con_state, *old_con_state;
+   struct amdgpu_dm_connector *aconnector;
+   struct dm_connector_state *dm_conn_state;
+   int i, j, clock, bpp;
+   int vcpi, pbn_div, pbn = 0;
+
+   for_each_oldnew_connector_in_state(state, connector, old_con_state,
new_con_state, i) {
+
+   aconnector = to_amdgpu_dm_connector(connector);
+
+   if (!aconnector->port)
+   continue;
+
+   if (!new_con_state || !new_con_state->crtc)
+   continue;
+
+   dm_conn_state = to_dm_connector_state(new_con_state);
+
+   for (j = 0; j < dc_state->stream_count; j++) {
+   stream = dc_state->streams[j];
+   if (!stream)
+   continue;
+
+   if ((struct amdgpu_dm_connector*)stream-

dm_stream_context == aconnector)

+   break;
+
+   stream = NULL;
+   }
+
+   if (!stream)
+   continue;
+
+   if (stream->timing.flags.DSC != 1) {
+   drm_dp_mst_atomic_enable_dsc(state,
+aconnector->port,
+dm_conn_state->pbn,
+0,
+false);
+   continue;
+   }
+
+   pbn_div = dm_mst_get_pbn_divider(stream->link);
+   bpp = stream->timing.dsc_cfg.bits_per_pixel;
+   clock = stream->timing.pix_clk_100hz / 10;
+   pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+   vcpi = drm_dp_mst_atomic_enable_dsc(state,
+   aconnector->port,
+   pbn, pbn_div,
+   true);
+   if (vcpi < 0)
+   return vcpi;
+
+   dm_conn_state->pbn = pbn;
+   dm_conn_state->vcpi_slots = vcpi;
+   }
+   return 0;
+}
+
  static void dm_drm_plane_reset(struct drm_plane *plane)
  {
struct dm_plane_state *amdgpu_state = NULL;
@@ -8022,11 +8085,6 @@ static int amdgpu_dm_atomic_check(struct drm_device
*dev,
if (ret)
goto fail;
  
-	/* Perform validation of MST topology in the state*/

-   ret = drm_dp_mst_atomic_check(state);
-   if (ret)
-   goto fail;
-
if (state->legacy_cursor_update) {
/*
 * This is a fast cursor update coming from the plane update
@@ -8098,6 +8156,10 @@ static int amdgpu_dm_atomic_check(struct drm_device
*dev,
if (!compute_mst_dsc_configs_for_state(state, dm_state-

context))

goto fail;
  
+		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state-

context);

+   if (ret)
+   goto fail;
+
if (dc_validate_global_state(dc, dm_state->context, false) !=
DC_OK) {
ret = -EINVAL;
goto fail;
@@ -8126,6 +8188,10 @@ stati

Re: [PATCH] drm/amd/display: replace BUG_ON with WARN_ON

2019-12-19 Thread Mikita Lipski




On 12/18/19 11:15 AM, Aditya Pakki wrote:

In skip_modeset label within dm_update_crtc_state(), the dc stream
cannot be NULL. Using BUG_ON as an assertion is not required and
can be removed. The patch replaces the check with a WARN_ON in case
dm_new_crtc_state->stream is NULL.

Signed-off-by: Aditya Pakki 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7aac9568d3be..03cb30913c20 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7012,7 +7012,7 @@ static int dm_update_crtc_state(struct 
amdgpu_display_manager *dm,
 * 3. Is currently active and enabled.
 * => The dc stream state currently exists.
 */
-   BUG_ON(dm_new_crtc_state->stream == NULL);
+   WARN_ON(!dm_new_crtc_state->stream);
  


Thanks for the patch, but this is NAK from me since it doesn't really do 
anything to prevent it or fix it.


If the stream is NULL and it passed this far in the function then 
something really wrong has happened and the process should be stopped.


I'm currently dealing with an issue where dm_new_crtc_state->stream is 
NULL. One of the scenarios could be that driver creates stream for a 
fake sink instead of failing, that is connected over MST, and calls 
dm_update_crtc_state to enable CRTC.



/* Scaling or underscan settings */
if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))



--
Thanks,
Mikita Lipski
Software Engineer, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v8 11/17] drm/dp_mst: Add DSC enablement helpers to DRM

2019-12-09 Thread Mikita Lipski



On 12/6/19 7:24 PM, Lyude Paul wrote:

Nice! All I've got is a couple of typos I noticed and one question, this looks
great :)


Thanks! I'll clean it up. The response to the question is below.


On Tue, 2019-12-03 at 09:35 -0500, mikita.lip...@amd.com wrote:
From: Mikita Lipski 

Adding a helper function to be called by
drivers outside of DRM to enable DSC on
the MST ports.

Function is called to recalculate VCPI allocation
if DSC is enabled and raise the DSC flag to enable.
In case of disabling DSC the flag is set to false
and recalculation of VCPI slots is expected to be done
in encoder's atomic_check.

v2: squash separate functions into one and call it per
port

Cc: Harry Wentland 
Cc: Lyude Paul 
Signed-off-by: Mikita Lipski 
---
  drivers/gpu/drm/drm_dp_mst_topology.c | 61 +++
  include/drm/drm_dp_mst_helper.h   |  5 +++
  2 files changed, 66 insertions(+)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
b/drivers/gpu/drm/drm_dp_mst_topology.c
index f1d883960831..5e549f48ffb8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -4742,6 +4742,67 @@ drm_dp_mst_atomic_check_topology_state(struct
drm_dp_mst_topology_mgr *mgr,
return 0;
  }
  
+/**

+ * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
+ * @state: Pointer to the new drm_atomic_state
+ * @pointer: Pointer to the affected MST Port

Typo here


+ * @pbn: Newly recalculated bw required for link with DSC enabled
+ * @pbn_div: Divider to calculate correct number of pbn per slot
+ * @enable: Boolean flag enabling or disabling DSC on the port
+ *
+ * This function enables DSC on the given Port
+ * by recalculating its vcpi from pbn provided
+ * and sets dsc_enable flag to keep track of which
+ * ports have DSC enabled
+ *
+ */
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+struct drm_dp_mst_port *port,
+int pbn, int pbn_div,
+bool enable)
+{
+   struct drm_dp_mst_topology_state *mst_state;
+   struct drm_dp_vcpi_allocation *pos;
+   bool found = false;
+   int vcpi = 0;
+
+   mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
+
+   if (IS_ERR(mst_state))
+   return PTR_ERR(mst_state);
+
+   list_for_each_entry(pos, _state->vcpis, next) {
+   if (pos->port == port) {
+   found = true;
+   break;
+   }
+   }
+
+   if (!found) {
+   DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation
in mst state %p\n",
+port, mst_state);
+   return -EINVAL;
+   }


Just double checking-does this handle the case where we're enabling DSC on a
port that didn't previously have a VCPI allocation because it wasn't enabled
previously? Or do we not need to handle that here


Because we call encoder atomic check previously to allocate VCPI slots - 
the port should have VCPI allocation before enabling DSC even if it 
wasn't enabled previously.
Therefore, I was thinking, that if encoder atomic check fails to 
allocate VCPI slots for the port - we shouldn't enable DSC on it and 
probably should fail atomic check if that is even requested.


Assuming you did the right thing here, with the small typo fixes:

Reviewed-by: Lyude Paul 


+
+   if (pos->dsc_enabled == enable) {
+   DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d,
returning %d VCPI slots\n",
+port, enable, pos->vcpi);
+   vcpi = pos->vcpi;
+   }
+
+   if (enable) {
+   vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port,
pbn, pbn_div);
+   DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag,
reallocating %d VCPI slots on the port\n",
+port, vcpi);
+   if (vcpi < 0)
+   return -EINVAL;
+   }
+
+   pos->dsc_enabled = enable;
+
+   return vcpi;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
  /**
   * drm_dp_mst_atomic_check - Check that the new state of an MST topology in
an
   * atomic update is valid
diff --git a/include/drm/drm_dp_mst_helper.h
b/include/drm/drm_dp_mst_helper.h
index 0f813d6346aa..830c94b7f45d 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -502,6 +502,7 @@ struct drm_dp_payload {
  struct drm_dp_vcpi_allocation {
struct drm_dp_mst_port *port;
int vcpi;
+   bool dsc_enabled;
struct list_head next;
  };
  
@@ -773,6 +774,10 @@ drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state

*state,
  struct drm_dp_mst_topology_mgr *mgr,
  struct drm_dp_mst_port *port, int pbn,
  int pbn_div);
+int dr

Re: [PATCH v9] drm/dp_mst: Add PBN calculation for DSC modes

2019-12-04 Thread Mikita Lipski



On 12/4/19 11:45 AM, Jani Nikula wrote:

On Tue, 03 Dec 2019,  wrote:

From: David Francis 

With DSC, bpp can be fractional in multiples of 1/16.


Can be?

I worry a bit that "bpp" can either be integer or fixed point depending
on other variables. I admit I haven't followed up on this too much, but
how widespread it is? It seems like something that is bound to be broken
in subtle ways, when it won't even cross people's minds that bpp is
fractional.



Hi Jani,

Target rate is expected to be a multiple of bits per pixel of 
incremental divider (which is 16), so incoming bpp variable is an 
integer and not a fixed point. It is expected, as it is described in the 
DP1.4a spec. It is also true that if driver is passing non DSC bit rate 
with dsc flag set to true it will cause issue on MST as there likely 
won't be enough bandwidth allocated.


But you have pointed to an error, I shouldn't divide  (64 * 1006) by 16 
since it comes out to 18645,45 and after it is floored to an integer it 
will cause pbn to be lower than needed.


Instead the numerator should rather be like this:
mul_u32_u32(clock * bpp / 16, 64 * 1006)

Mikita


BR,
Jani.




Change drm_dp_calc_pbn_mode to reflect this, adding a new
parameter bool dsc. When this parameter is true, treat the
bpp parameter as having units not of bits per pixel, but
1/16 of a bit per pixel

v2: Don't add separate function for this
v3: Keep the calculation in a single equation
v4: Update the tests in test-drm_dp_mst_helper.c

Reviewed-by: Manasi Navare 
Reviewed-by: Lyude Paul 
Reviewed-by: Harry Wentland 
Signed-off-by: David Francis 
Signed-off-by: Mikita Lipski 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  |  2 +-
  drivers/gpu/drm/drm_dp_mst_topology.c  | 12 +++-
  drivers/gpu/drm/i915/display/intel_dp_mst.c|  3 ++-
  drivers/gpu/drm/nouveau/dispnv50/disp.c|  2 +-
  drivers/gpu/drm/radeon/radeon_dp_mst.c |  2 +-
  drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c | 10 ++
  include/drm/drm_dp_mst_helper.h|  3 +--
  7 files changed, 23 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 455c51c38720  ..9fc03fc1017d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4967,7 +4967,7 @@ static int dm_encoder_helper_atomic_check(struct 
drm_encoder *encoder,
is_y420);
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
clock = adjusted_mode->clock;
-   dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp);
+   dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, 
false);
}
dm_new_connector_state->vcpi_slots = 
drm_dp_atomic_find_vcpi_slots(state,
   
mst_mgr,
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c 
b/drivers/gpu/drm/drm_dp_mst_topology.c
index ae5809a1f19a..828b5eae529c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -4342,10 +4342,11 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
   * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
   * @clock: dot clock for the mode
   * @bpp: bpp for the mode.
+ * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
   *
   * This uses the formula in the spec to calculate the PBN value for a mode.
   */
-int drm_dp_calc_pbn_mode(int clock, int bpp)
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
  {
/*
 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -4356,7 +4357,16 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
 * peak_kbps *= (1006/1000)
 * peak_kbps *= (64/54)
 * peak_kbps *= 8convert to bytes
+*
+* If the bpp is in units of 1/16, further divide by 16. Put this
+* factor in the numerator rather than the denominator to avoid
+* integer overflow
 */
+
+   if(dsc)
+   return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 / 
16),
+   8 * 54 * 1000 * 1000);
+
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
8 * 54 * 1000 * 1000);
  }
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c 
b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 03d1cba0b696..92be17711287 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -61,7 +61,8 @@ static int intel_dp_mst_compute_link_config(struct 
intel_encoder *encoder,
crtc_state->pipe_bpp = bpp;
  
  		crtc_state->pbn = drm_dp_calc_pbn_mo

Re: [PATCH v7 11/17] drm/dp_mst: Add DSC enablement helpers to DRM

2019-11-26 Thread Mikita Lipski



On 26/11/2019 10:24, Leo wrote:



On 2019-11-16 5:01 p.m., mikita.lip...@amd.com wrote:

From: Mikita Lipski 

Adding a helper function to be called by
drivers outside of DRM to enable DSC on
the MST ports.

Function is called to recalculate VCPI allocation
if DSC is enabled and raise the DSC flag to enable.
In case of disabling DSC the flag is set to false
and recalculation of VCPI slots is expected to be done
in encoder's atomic_check.

v2: squash separate functions into one and call it per
port

Cc: Harry Wentland 
Cc: Lyude Paul 
Signed-off-by: Mikita Lipski 
---
  drivers/gpu/drm/drm_dp_mst_topology.c | 61 +++
  include/drm/drm_dp_mst_helper.h   |  5 +++
  2 files changed, 66 insertions(+)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c 
b/drivers/gpu/drm/drm_dp_mst_topology.c
index 94bb259ab73e..98cc93d5ddd7 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3876,6 +3876,67 @@ drm_dp_mst_atomic_check_topology_state(struct 
drm_dp_mst_topology_mgr *mgr,
return 0;
  }
  
+/**

+ * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
+ * @state: Pointer to the new drm_atomic_state
+ * @pointer: Pointer to the affected MST Port


s/@pointer/@port/


Thanks,
will update the comment.


+ * @pbn: Newly recalculated bw required for link with DSC enabled
+ * @pbn_div: Divider to calculate correct number of pbn per slot
+ * @enable: Boolean flag enabling or disabling DSC on the port
+ *
+ * This function enables DSC on the given Port
+ * by recalculating its vcpi from pbn provided
+ * and sets dsc_enable flag to keep track of which
+ * ports have DSC enabled


Would be good to document the return value as well.


+ *
+ */
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+struct drm_dp_mst_port *port,
+int pbn, int pbn_div,
+bool enable)
+{
+   struct drm_dp_mst_topology_state *mst_state;
+   struct drm_dp_vcpi_allocation *pos;
+   bool found = false;
+   int vcpi = 0;
+
+   mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
+
+   if (IS_ERR(mst_state))
+   return PTR_ERR(mst_state);
+
+   list_for_each_entry(pos, _state->vcpis, next) {
+   if (pos->port == port) {
+   found = true;
+   break;
+   }
+   }
+
+   if (!found) {
+   DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst 
state %p\n",
+port, mst_state);
+   return -EINVAL;
+   }
+
+   if (pos->dsc_enabled == enable) {
+   DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, 
returning %d VCPI slots\n",
+port, enable, pos->vcpi);
+   vcpi = pos->vcpi;


Do we want to early return here?


Not if a new compression rate is set.
The DSC could still be enabled, but the new device is introduced - 
higher compression will be required, so PBN will change and we will have 
to reassign the slots to the port.




- Leo


+   }
+
+   if (enable) {
+   vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, 
pbn, pbn_div);
+   DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d 
VCPI slots on the port\n",
+port, vcpi);
+   if (vcpi < 0)
+   return -EINVAL;
+   }
+
+   pos->dsc_enabled = enable;
+
+   return vcpi;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
  /**
   * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
   * atomic update is valid
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index fc19094b06c3..b1b00de3083b 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -431,6 +431,7 @@ struct drm_dp_payload {
  struct drm_dp_vcpi_allocation {
struct drm_dp_mst_port *port;
int vcpi;
+   bool dsc_enabled;
struct list_head next;
  };
  
@@ -663,6 +664,10 @@ drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,

  struct drm_dp_mst_topology_mgr *mgr,
  struct drm_dp_mst_port *port, int pbn,
  int pbn_div);
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+struct drm_dp_mst_port *port,
+int pbn, int pbn_div,
+bool enable);
  int __must_check
  drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
 struct drm_dp_mst_topology_mgr *mgr,


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https:

Re: [PATCH] drm/dsc: Return unsigned long on compute offset

2019-11-20 Thread Mikita Lipski



On 20/11/2019 05:17, Ville Syrjälä wrote:

On Tue, Nov 19, 2019 at 04:11:43PM -0500, Mikita Lipski wrote:



On 19/11/2019 16:09, Mikita Lipski wrote:



On 19/11/2019 12:11, Ville Syrjälä wrote:

On Tue, Nov 19, 2019 at 04:59:40PM +, Cornij, Nikola wrote:

If you're going to make all of them the same, then u64, please.

This is because I'm not sure if calculations require 64-bit at some
stage.


If it does then it's already broken. Someone should probably figure out
what's actally needed instead of shooting ducks with an icbm.




Sorry made a type below. Supposed to be "I don't think it is broken"


I mean that it's broken if it actually needs u64 when it's
currently using unsigned long. So u64 is either overkill or the
code is currently broken.



None of the calculations exceed u32, so u64 would be an overkill, since 
none of the variables in the structure exceed 16 bits. Therefore u32 is 
enough.





I don't think it is not broken, cause I'm currently testing DSC.
The patch I sent early simply fixes the error of comparing  signed and
unsigned variables.

We can then submit a second patch addressing the issue of using unsigned
long int instead of u32. Also, since the variables in drm_dsc_config
structure are all of type u8 and u16, the calculation values shouldn't
exceed the size of u32.

Thanks



-Original Message-
From: Lipski, Mikita 
Sent: November 19, 2019 10:08 AM
To: Ville Syrjälä ; Lipski, Mikita

Cc: amd-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org;
Cornij, Nikola 
Subject: Re: [PATCH] drm/dsc: Return unsigned long on compute offset



On 19/11/2019 09:56, Ville Syrjälä wrote:

On Tue, Nov 19, 2019 at 09:45:26AM -0500, mikita.lip...@amd.com wrote:

From: Mikita Lipski 

We shouldn't compare int with unsigned long to find the max value and
since we are not expecting negative value returned from
compute_offset we should make this function return unsigned long so
we can compare the values when computing rc parameters.


Why are there other unsigned longs in dsc parameter computation in the
first place?


I believe it was initially set to be unsigned long for variable
consistency, when we ported intel_compute_rc_parameters into
drm_dsc_compute_rc_parameters. But now that I look at it, we can
actually just set them to u32 or u64, as nothing should exceed that.




Cc: Nikola Cornij 
Cc: Harry Wentland 
Signed-off-by: Mikita Lipski 
---
    drivers/gpu/drm/drm_dsc.c | 6 +++---
    1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 74f3527f567d..ec40604ab6a2 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -245,11 +245,11 @@ void drm_dsc_pps_payload_pack(struct
drm_dsc_picture_parameter_set *pps_payload,
    }
    EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
-static int compute_offset(struct drm_dsc_config *vdsc_cfg, int
pixels_per_group,
+static unsigned long compute_offset(struct drm_dsc_config
*vdsc_cfg, int pixels_per_group,
    int groups_per_line, int grpcnt)
    {
-    int offset = 0;
-    int grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay,
pixels_per_group);
+    unsigned long offset = 0;
+    unsigned long grpcnt_id =
DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group);
    if (grpcnt <= grpcnt_id)
    offset = DIV_ROUND_UP(grpcnt * pixels_per_group *
vdsc_cfg->bits_per_pixel, 16);
--
2.17.1

___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel




--
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com






--
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com




--
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/dsc: Return unsigned long on compute offset

2019-11-19 Thread Mikita Lipski



On 19/11/2019 16:09, Mikita Lipski wrote:



On 19/11/2019 12:11, Ville Syrjälä wrote:

On Tue, Nov 19, 2019 at 04:59:40PM +, Cornij, Nikola wrote:

If you're going to make all of them the same, then u64, please.

This is because I'm not sure if calculations require 64-bit at some 
stage.


If it does then it's already broken. Someone should probably figure out
what's actally needed instead of shooting ducks with an icbm.




Sorry made a type below. Supposed to be "I don't think it is broken"


I don't think it is not broken, cause I'm currently testing DSC.
The patch I sent early simply fixes the error of comparing  signed and 
unsigned variables.


We can then submit a second patch addressing the issue of using unsigned 
long int instead of u32. Also, since the variables in drm_dsc_config 
structure are all of type u8 and u16, the calculation values shouldn't 
exceed the size of u32.


Thanks



-Original Message-
From: Lipski, Mikita 
Sent: November 19, 2019 10:08 AM
To: Ville Syrjälä ; Lipski, Mikita 

Cc: amd-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org; 
Cornij, Nikola 

Subject: Re: [PATCH] drm/dsc: Return unsigned long on compute offset



On 19/11/2019 09:56, Ville Syrjälä wrote:

On Tue, Nov 19, 2019 at 09:45:26AM -0500, mikita.lip...@amd.com wrote:

From: Mikita Lipski 

We shouldn't compare int with unsigned long to find the max value and
since we are not expecting negative value returned from
compute_offset we should make this function return unsigned long so
we can compare the values when computing rc parameters.


Why are there other unsigned longs in dsc parameter computation in the
first place?


I believe it was initially set to be unsigned long for variable 
consistency, when we ported intel_compute_rc_parameters into 
drm_dsc_compute_rc_parameters. But now that I look at it, we can 
actually just set them to u32 or u64, as nothing should exceed that.




Cc: Nikola Cornij 
Cc: Harry Wentland 
Signed-off-by: Mikita Lipski 
---
   drivers/gpu/drm/drm_dsc.c | 6 +++---
   1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 74f3527f567d..ec40604ab6a2 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -245,11 +245,11 @@ void drm_dsc_pps_payload_pack(struct 
drm_dsc_picture_parameter_set *pps_payload,

   }
   EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
-static int compute_offset(struct drm_dsc_config *vdsc_cfg, int 
pixels_per_group,
+static unsigned long compute_offset(struct drm_dsc_config 
*vdsc_cfg, int pixels_per_group,

   int groups_per_line, int grpcnt)
   {
-    int offset = 0;
-    int grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, 
pixels_per_group);

+    unsigned long offset = 0;
+    unsigned long grpcnt_id = 
DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group);

   if (grpcnt <= grpcnt_id)
   offset = DIV_ROUND_UP(grpcnt * pixels_per_group * 
vdsc_cfg->bits_per_pixel, 16);

--
2.17.1

___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel




--
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com






--
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/dsc: Return unsigned long on compute offset

2019-11-19 Thread Mikita Lipski



On 19/11/2019 12:11, Ville Syrjälä wrote:

On Tue, Nov 19, 2019 at 04:59:40PM +, Cornij, Nikola wrote:

If you're going to make all of them the same, then u64, please.

This is because I'm not sure if calculations require 64-bit at some stage.


If it does then it's already broken. Someone should probably figure out
what's actally needed instead of shooting ducks with an icbm.


I don't think it is not broken, cause I'm currently testing DSC.
The patch I sent early simply fixes the error of comparing  signed and 
unsigned variables.


We can then submit a second patch addressing the issue of using unsigned 
long int instead of u32. Also, since the variables in drm_dsc_config 
structure are all of type u8 and u16, the calculation values shouldn't 
exceed the size of u32.


Thanks



-Original Message-
From: Lipski, Mikita 
Sent: November 19, 2019 10:08 AM
To: Ville Syrjälä ; Lipski, Mikita 

Cc: amd-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org; Cornij, Nikola 

Subject: Re: [PATCH] drm/dsc: Return unsigned long on compute offset



On 19/11/2019 09:56, Ville Syrjälä wrote:

On Tue, Nov 19, 2019 at 09:45:26AM -0500, mikita.lip...@amd.com wrote:

From: Mikita Lipski 

We shouldn't compare int with unsigned long to find the max value and
since we are not expecting negative value returned from
compute_offset we should make this function return unsigned long so
we can compare the values when computing rc parameters.


Why are there other unsigned longs in dsc parameter computation in the
first place?


I believe it was initially set to be unsigned long for variable consistency, 
when we ported intel_compute_rc_parameters into drm_dsc_compute_rc_parameters. 
But now that I look at it, we can actually just set them to u32 or u64, as 
nothing should exceed that.




Cc: Nikola Cornij 
Cc: Harry Wentland 
Signed-off-by: Mikita Lipski 
---
   drivers/gpu/drm/drm_dsc.c | 6 +++---
   1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 74f3527f567d..ec40604ab6a2 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -245,11 +245,11 @@ void drm_dsc_pps_payload_pack(struct 
drm_dsc_picture_parameter_set *pps_payload,
   }
   EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
   
-static int compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group,

+static unsigned long compute_offset(struct drm_dsc_config *vdsc_cfg, int 
pixels_per_group,
int groups_per_line, int grpcnt)
   {
-   int offset = 0;
-   int grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, 
pixels_per_group);
+   unsigned long offset = 0;
+   unsigned long grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, 
pixels_per_group);
   
   	if (grpcnt <= grpcnt_id)

offset = DIV_ROUND_UP(grpcnt * pixels_per_group * 
vdsc_cfg->bits_per_pixel, 16);
--
2.17.1

___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel




--
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com




--
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/dsc: Return unsigned long on compute offset

2019-11-19 Thread Mikita Lipski



On 19/11/2019 09:56, Ville Syrjälä wrote:

On Tue, Nov 19, 2019 at 09:45:26AM -0500, mikita.lip...@amd.com wrote:

From: Mikita Lipski 

We shouldn't compare int with unsigned long to find the max value
and since we are not expecting negative value returned from
compute_offset we should make this function return unsigned long
so we can compare the values when computing rc parameters.


Why are there other unsigned longs in dsc parameter computation
in the first place?


I believe it was initially set to be unsigned long for variable 
consistency, when we ported intel_compute_rc_parameters into 
drm_dsc_compute_rc_parameters. But now that I look at it, we can 
actually just set them to u32 or u64, as nothing should exceed that.




Cc: Nikola Cornij 
Cc: Harry Wentland 
Signed-off-by: Mikita Lipski 
---
  drivers/gpu/drm/drm_dsc.c | 6 +++---
  1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 74f3527f567d..ec40604ab6a2 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -245,11 +245,11 @@ void drm_dsc_pps_payload_pack(struct 
drm_dsc_picture_parameter_set *pps_payload,
  }
  EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
  
-static int compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group,

+static unsigned long compute_offset(struct drm_dsc_config *vdsc_cfg, int 
pixels_per_group,
int groups_per_line, int grpcnt)
  {
-   int offset = 0;
-   int grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, 
pixels_per_group);
+   unsigned long offset = 0;
+   unsigned long grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, 
pixels_per_group);
  
  	if (grpcnt <= grpcnt_id)

offset = DIV_ROUND_UP(grpcnt * pixels_per_group * 
vdsc_cfg->bits_per_pixel, 16);
--
2.17.1

___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel




--
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [bug report] drm/amd/display: Add MST atomic routines

2019-11-13 Thread Mikita Lipski

Hi Dan,

The bug has been fixed by the patch "drm/amd/display: Fix unsigned 
variable compared to less than zero" by Gustavo A. R. Silva


Thanks

On 13.11.2019 13:28, Dan Carpenter wrote:

Hello Mikita Lipski,

The patch b4c578f08378: "drm/amd/display: Add MST atomic routines"
from Nov 6, 2019, leads to the following static checker warning:

drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:4870 
dm_encoder_helper_atomic_check()
warn: unsigned 'dm_new_connector_state->vcpi_slots' is never less than 
zero.

drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c
   4851  if (!aconnector->port || !aconnector->dc_sink)
   4852  return 0;
   4853
   4854  mst_port = aconnector->port;
   4855  mst_mgr = >mst_port->mst_mgr;
   4856
   4857  if (!crtc_state->connectors_changed && 
!crtc_state->mode_changed)
   4858  return 0;
   4859
   4860  if (!state->duplicated) {
   4861  color_depth = 
convert_color_depth_from_display_info(connector, conn_state);
   4862  bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
   4863  clock = adjusted_mode->clock;
   4864  dm_new_connector_state->pbn = 
drm_dp_calc_pbn_mode(clock, bpp);
   4865  }
   4866  dm_new_connector_state->vcpi_slots = 
drm_dp_atomic_find_vcpi_slots(state,
   4867 
mst_mgr,
   4868 
mst_port,
   4869
 dm_new_connector_state->pbn);
   4870  if (dm_new_connector_state->vcpi_slots < 0) {
 ^^
Impossible condition.

   4871  DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", 
(int)dm_new_connector_state->vcpi_slots);
   4872  return dm_new_connector_state->vcpi_slots;
   4873  }
   4874  return 0;
   4875  }

regards,
dan carpenter



--
Thanks,
Mikita Lipski
Software Engineer, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: Fix unsigned variable compared to less than zero

2019-11-11 Thread Mikita Lipski


Thanks for catching it!

Reviewed-by: Mikita Lipski 


On 11.11.2019 12:25, Gustavo A. R. Silva wrote:

Currenly, the error check below on variable*vcpi_slots*  is always
false because it is a uint64_t type variable, hence, the values
this variable can hold are never less than zero:

drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c:
4870 if (dm_new_connector_state->vcpi_slots < 0) {
4871 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", 
(int)dm_new_connector_stat e->vcpi_slots);
4872 return dm_new_connector_state->vcpi_slots;
4873 }

Fix this by making*vcpi_slots*  of int type

Addresses-Coverity: 1487838 ("Unsigned compared against 0")
Fixes: b4c578f08378 ("drm/amd/display: Add MST atomic routines")
Signed-off-by: Gustavo A. R. Silva
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6db07e9e33ab..a8fc90a927d6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -403,7 +403,7 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
uint8_t abm_level;
-   uint64_t vcpi_slots;
+   int vcpi_slots;
uint64_t pbn;
  };
  
-- 2.23.0


--
Thanks,
Mikita Lipski
Software Engineer, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH][next] drm/amd/display: fix dereference of pointer aconnector when it is null

2019-11-08 Thread Mikita Lipski
Thanks!

Reviewed-by: Mikita Lipski 

On 08.11.2019 9:38, Colin King wrote:
> From: Colin Ian King 
> 
> Currently pointer aconnector is being dereferenced by the call to
> to_dm_connector_state before it is being null checked, this could
> lead to a null pointer dereference.  Fix this by checking that
> aconnector is null before dereferencing it.
> 
> Addresses-Coverity: ("Dereference before null check")
> Fixes: 5133c6241d9c ("drm/amd/display: Add MST atomic routines")
> Signed-off-by: Colin Ian King 
> ---
>   drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 5 ++---
>   1 file changed, 2 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
> index e3cda6984d28..72e677796a48 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
> @@ -193,12 +193,11 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
>* that blocks before commit guaranteeing that the state
>* is not gonna be swapped while still in use in commit tail */
>   
> - dm_conn_state = to_dm_connector_state(aconnector->base.state);
> -
> -
>   if (!aconnector || !aconnector->mst_port)
>   return false;
>   
> + dm_conn_state = to_dm_connector_state(aconnector->base.state);
> +
>   mst_mgr = >mst_port->mst_mgr;
>   
>   if (!mst_mgr->mst_state)
> 

-- 
Thanks,
Mikita Lipski
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 01/13] drm/amd/display: Add MST atomic routines

2019-10-31 Thread Mikita Lipski

On 31.10.2019 9:16, Kazlauskas, Nicholas wrote:
> On 2019-10-30 3:24 p.m., mikita.lip...@amd.com wrote:
>> From: Mikita Lipski 
>>
>> - Adding encoder atomic check to find vcpi slots for a connector
>> - Using DRM helper functions to calculate PBN
>> - Adding connector atomic check to release vcpi slots if connector
>> loses CRTC
>> - Calculate  PBN and VCPI slots only once during atomic
>> check and store them on crtc_state to eliminate
>> redundant calculation
>> - Call drm_dp_mst_atomic_check to verify validity of MST topology
>> during state atomic check
>>
>> v2: squashed previous 3 separate patches, removed DSC PBN calculation,
>> and added PBN and VCPI slots properties to amdgpu connector
>>
>> v3:
>> - moved vcpi_slots and pbn properties to dm_crtc_state and dc_stream_state
>> - updates stream's vcpi_slots and pbn on commit
>> - separated patch from the DSC MST series
>>
>> v4:
>> - set vcpi_slots and pbn properties to dm_connector_state
>> - copy porperties from connector state on to crtc state
>>
>> v5:
>> - keep the pbn and vcpi values only on connnector state
>> - added a void pointer to the stream state instead on two ints,
>> because dc_stream_state is OS agnostic. Pointer points to the
>> current dm_connector_state.
>>
>> v6:
>> - Remove new param from stream
>>
>> v7:
>> - Fix error with using max capable bpc
>>
>> Cc: Jun Lei 
>> Cc: Jerry Zuo 
>> Cc: Harry Wentland 
>> Cc: Nicholas Kazlauskas 
>> Cc: Lyude Paul 
>> Signed-off-by: Mikita Lipski 
> Reviewed-by: Nicholas Kazlauskas 
>
> You might want to verify that this still works as you expect when
> changing "max bpc" on an MST display.
>
> My understanding is that it'd still force a new modeset before encoder
> atomic check is called so you *should* have the correct bpc value during
> MST calculations.

Thanks.

It does still works with MST even if you change the mode to the lower 
resolution.

The encoder atomic check is called during drm_atomic_helper_check_modeset
so new modeset is already forced then.

>> ---
>>.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 65 ++-
>>.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  2 +
>>.../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 51 ---
>>.../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 32 +
>>4 files changed, 109 insertions(+), 41 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
>> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> index 48f5b43e2698..d75726013436 100644
>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> @@ -4180,7 +4180,8 @@ void amdgpu_dm_connector_funcs_reset(struct 
>> drm_connector *connector)
>>  state->underscan_hborder = 0;
>>  state->underscan_vborder = 0;
>>  state->base.max_requested_bpc = 8;
>> -
>> +state->vcpi_slots = 0;
>> +state->pbn = 0;
>>  if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
>>  state->abm_level = amdgpu_dm_abm_level;
>>
>> @@ -4209,7 +4210,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct 
>> drm_connector *connector)
>>  new_state->underscan_enable = state->underscan_enable;
>>  new_state->underscan_hborder = state->underscan_hborder;
>>  new_state->underscan_vborder = state->underscan_vborder;
>> -
>> +new_state->vcpi_slots = state->vcpi_slots;
>> +new_state->pbn = state->pbn;
>>  return _state->base;
>>}
>>
>> @@ -4606,10 +4608,64 @@ static void dm_encoder_helper_disable(struct 
>> drm_encoder *encoder)
>>
>>}
>>
>> +static int convert_dc_color_depth_into_bpc (enum dc_color_depth 
>> display_color_depth)
>> +{
>> +switch (display_color_depth) {
>> +case COLOR_DEPTH_666:
>> +return 6;
>> +case COLOR_DEPTH_888:
>> +return 8;
>> +case COLOR_DEPTH_101010:
>> +return 10;
>> +case COLOR_DEPTH_121212:
>> +return 12;
>> +case COLOR_DEPTH_141414:
>> +return 14;
>> +case COLOR_DEPTH_161616:
>> +return 16;
>> +default:
>> +break;
&g

Re: [PATCH v7] drm/amd/display: Add MST atomic routines

2019-10-30 Thread Mikita Lipski

On 30.10.2019 14:19, Kazlauskas, Nicholas wrote:
> On 2019-10-28 10:31 a.m., mikita.lip...@amd.com wrote:
>> From: Mikita Lipski 
>>
>> - Adding encoder atomic check to find vcpi slots for a connector
>> - Using DRM helper functions to calculate PBN
>> - Adding connector atomic check to release vcpi slots if connector
>> loses CRTC
>> - Calculate  PBN and VCPI slots only once during atomic
>> check and store them on crtc_state to eliminate
>> redundant calculation
>> - Call drm_dp_mst_atomic_check to verify validity of MST topology
>> during state atomic check
>>
>> v2: squashed previous 3 separate patches, removed DSC PBN calculation,
>> and added PBN and VCPI slots properties to amdgpu connector
>>
>> v3:
>> - moved vcpi_slots and pbn properties to dm_crtc_state and dc_stream_state
>> - updates stream's vcpi_slots and pbn on commit
>> - separated patch from the DSC MST series
>>
>> v4:
>> - set vcpi_slots and pbn properties to dm_connector_state
>> - copy porperties from connector state on to crtc state
>>
>> v5:
>> - keep the pbn and vcpi values only on connnector state
>> - added a void pointer to the stream state instead on two ints,
>> because dc_stream_state is OS agnostic. Pointer points to the
>> current dm_connector_state.
>>
>> v6:
>> - Remove new param from stream
>>
>> v7:
>> - Cleanup
>> - Remove state pointer from stream
>>
>> Cc: Jun Lei 
>> Cc: Jerry Zuo 
>> Cc: Harry Wentland 
>> Cc: Nicholas Kazlauskas 
>> Cc: Lyude Paul 
>> Signed-off-by: Mikita Lipski 
>> ---
>>.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 42 ++-
>>.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  2 +
>>.../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 51 +--
>>.../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 32 
>>4 files changed, 86 insertions(+), 41 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
>> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> index 48f5b43e2698..28f6b93ab371 100644
>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> @@ -4180,7 +4180,8 @@ void amdgpu_dm_connector_funcs_reset(struct 
>> drm_connector *connector)
>>  state->underscan_hborder = 0;
>>  state->underscan_vborder = 0;
>>  state->base.max_requested_bpc = 8;
>> -
>> +state->vcpi_slots = 0;
>> +state->pbn = 0;
>>  if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
>>  state->abm_level = amdgpu_dm_abm_level;
>>
>> @@ -4209,7 +4210,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct 
>> drm_connector *connector)
>>  new_state->underscan_enable = state->underscan_enable;
>>  new_state->underscan_hborder = state->underscan_hborder;
>>  new_state->underscan_vborder = state->underscan_vborder;
>> -
>> +new_state->vcpi_slots = state->vcpi_slots;
>> +new_state->pbn = state->pbn;
>>  return _state->base;
>>}
>>
>> @@ -4610,6 +4612,37 @@ static int dm_encoder_helper_atomic_check(struct 
>> drm_encoder *encoder,
>>struct drm_crtc_state *crtc_state,
>>struct drm_connector_state 
>> *conn_state)
>>{
>> +struct drm_atomic_state *state = crtc_state->state;
>> +struct drm_connector *connector = conn_state->connector;
>> +struct amdgpu_dm_connector *aconnector = 
>> to_amdgpu_dm_connector(connector);
>> +struct dm_connector_state *dm_new_connector_state = 
>> to_dm_connector_state(conn_state);
>> +const struct drm_display_mode *adjusted_mode = 
>> _state->adjusted_mode;
>> +struct drm_dp_mst_topology_mgr *mst_mgr;
>> +struct drm_dp_mst_port *mst_port;
>> +int clock, bpp = 0;
>> +
>> +if (!aconnector->port || !aconnector->dc_sink)
>> +return 0;
>> +
>> +mst_port = aconnector->port;
>> +mst_mgr = >mst_port->mst_mgr;
>> +
>> +if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
>> +return 0;
>> +
>> +if (!state->duplicated) {
>> +bpp = (uint8_t)connector->display_info.bpc * 3;
> Is this correct? 

Re: [PATCH v5] drm/amd/display: Add MST atomic routines

2019-10-25 Thread Mikita Lipski

On 25.10.2019 8:06, Kazlauskas, Nicholas wrote:
> On 2019-10-24 5:06 p.m., mikita.lip...@amd.com wrote:
>> From: Mikita Lipski 
>>
>> - Adding encoder atomic check to find vcpi slots for a connector
>> - Using DRM helper functions to calculate PBN
>> - Adding connector atomic check to release vcpi slots if connector
>> loses CRTC
>> - Calculate  PBN and VCPI slots only once during atomic
>> check and store them on crtc_state to eliminate
>> redundant calculation
>> - Call drm_dp_mst_atomic_check to verify validity of MST topology
>> during state atomic check
>>
>> v2: squashed previous 3 separate patches, removed DSC PBN calculation,
>> and added PBN and VCPI slots properties to amdgpu connector
>>
>> v3:
>> - moved vcpi_slots and pbn properties to dm_crtc_state and dc_stream_state
>> - updates stream's vcpi_slots and pbn on commit
>> - separated patch from the DSC MST series
>>
>> v4:
>> - set vcpi_slots and pbn properties to dm_connector_state
>> - copy porperties from connector state on to crtc state
>>
>> v5:
>> - keep the pbn and vcpi values only on connnector state
>> - added a void pointer to the stream state instead on two ints,
>> because dc_stream_state is OS agnostic. Pointer points to the
>> current dm_connector_state.
>>
>> Cc: Jun Lei 
>> Cc: Jerry Zuo 
>> Cc: Harry Wentland 
>> Cc: Nicholas Kazlauskas 
>> Cc: Lyude Paul 
>> Signed-off-by: Mikita Lipski 
> Few comments below, mostly about how you're storing the DRM state in the
> DC stream.

Hi Nick,

Thanks for pointing that out.

It is definitely better not to introduce a new state pointer to the stream.

I'll apply your comments for the next version.

>
>> ---
>>.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 46 ++-
>>.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  2 +
>>.../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 44 ++
>>.../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 32 +
>>drivers/gpu/drm/amd/display/dc/dc_stream.h|  1 +
>>5 files changed, 84 insertions(+), 41 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
>> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> index 48f5b43e2698..1d8d7aaba197 100644
>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> @@ -3747,6 +3747,7 @@ create_stream_for_sink(struct amdgpu_dm_connector 
>> *aconnector,
>>  }
>>
>>  stream->dm_stream_context = aconnector;
>> +stream->dm_stream_state = dm_state;
>>
>>  stream->timing.flags.LTE_340MCSC_SCRAMBLE =
>>  drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
>> @@ -4180,7 +4181,8 @@ void amdgpu_dm_connector_funcs_reset(struct 
>> drm_connector *connector)
>>  state->underscan_hborder = 0;
>>  state->underscan_vborder = 0;
>>  state->base.max_requested_bpc = 8;
>> -
>> +state->vcpi_slots = 0;
>> +state->pbn = 0;
>>  if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
>>  state->abm_level = amdgpu_dm_abm_level;
>>
>> @@ -4209,7 +4211,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct 
>> drm_connector *connector)
>>  new_state->underscan_enable = state->underscan_enable;
>>  new_state->underscan_hborder = state->underscan_hborder;
>>  new_state->underscan_vborder = state->underscan_vborder;
>> -
>> +new_state->vcpi_slots = state->vcpi_slots;
>> +new_state->pbn = state->pbn;
>>  return _state->base;
>>}
>>
>> @@ -4610,6 +4613,37 @@ static int dm_encoder_helper_atomic_check(struct 
>> drm_encoder *encoder,
>>struct drm_crtc_state *crtc_state,
>>struct drm_connector_state 
>> *conn_state)
>>{
>> +struct drm_atomic_state *state = crtc_state->state;
>> +struct drm_connector *connector = conn_state->connector;
>> +struct amdgpu_dm_connector *aconnector = 
>> to_amdgpu_dm_connector(connector);
>> +struct dm_connector_state *dm_new_connector_state = 
>> to_dm_connector_state(conn_state);
>> +const struct drm_display_mode *adjusted_mode = 
>> _state->adjusted_mode;
>> +struct drm_dp_mst_topology_mgr *mst_mgr;
>>

Re: [PATCH] drm/amdgpu/display: fix build when CONFIG_DRM_AMD_DC_DSC_SUPPORT=n

2019-10-16 Thread Mikita Lipski
Reviewed-by: Mikita Lipski 

On 16.10.2019 12:13, Alex Deucher wrote:
> Add proper config check.
> 
> Signed-off-by: Alex Deucher 
> ---
>   drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 ++
>   1 file changed, 2 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
> b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> index 914e378bcda4..4f0331810696 100644
> --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
> @@ -1910,8 +1910,10 @@ int dcn20_populate_dml_pipes_from_context(
>   pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
>   }
>   
> +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
>   if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
>   pipes[pipe_cnt].dout.output_bpp = 
> res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
> +#endif
>   
>   /* todo: default max for now, until there is logic reflecting 
> this in dc*/
>   pipes[pipe_cnt].dout.output_bpc = 12;
> 

-- 
Thanks,
Mikita Lipski
Software Engineer, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 13/14] drm/amd/display: Recalculate VCPI slots for new DSC connectors

2019-10-08 Thread Mikita Lipski
like this in the future and continually building more tech debt for
>> ourselves.
>>
>> Please note as well: if anything I've asked for is confusing, or you don't
>> understand what I'm asking or looking for I am more then willing to help
>> explain things and help out as best as I can. I understand that a lot of the
>> developers working on DRM at AMD may have more experience in Windows and Mac
>> land and as a result, trying to get used to the way that we do things in the
>> Linux kernel can be a confusing endeavor. I'm more then happy to help out
>> with
>> this wherever I can, all you need to do is ask. Asking a few questions about
>> something you aren't sure you understand can save both of us a lot of time,
>> and avoid having to go through this many patch respins.
>>
>> In the mean time, I'd be willing to look at what patches from this series
>> that
>> have already been reviewed which could be pushed to drm-misc or friends in
>> the
>> mean time to speed things up a bit.
>>
>> On Tue, 2019-10-01 at 12:17 -0400, mikita.lip...@amd.com wrote:
>>> From: Mikita Lipski 
>>>
>>> Since for DSC MST connector's PBN is claculated differently
>>> due to compression, we have to recalculate both PBN and
>>> VCPI slots for that connector.
>>>
>>> This patch proposes to use similair logic as in
>>> dm_encoder_helper_atomic_chek, but since we do not know which
>>> connectors will have DSC enabled we have to recalculate PBN only
>>> after that's determined, which is done in
>>> compute_mst_dsc_configs_for_state.
>>>
>>> Cc: Jerry Zuo 
>>> Cc: Harry Wentland 
>>> Cc: Lyude Paul 
>>> Signed-off-by: Mikita Lipski 
>>> ---
>>>   .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 64 +--
>>>   .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c |  6 --
>>>   2 files changed, 59 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>>> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>>> index 81e30918f9ec..7501ce2233ed 100644
>>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>>> @@ -4569,6 +4569,27 @@ static void dm_encoder_helper_disable(struct
>>> drm_encoder *encoder)
>>>   
>>>   }
>>>   
>>> +static int convert_dc_color_depth_into_bpc (enum dc_color_depth
>>> display_color_depth)
>>> +{
>>> +   switch (display_color_depth) {
>>> +   case COLOR_DEPTH_666:
>>> +   return 6;
>>> +   case COLOR_DEPTH_888:
>>> +   return 8;
>>> +   case COLOR_DEPTH_101010:
>>> +   return 10;
>>> +   case COLOR_DEPTH_121212:
>>> +   return 12;
>>> +   case COLOR_DEPTH_141414:
>>> +   return 14;
>>> +   case COLOR_DEPTH_161616:
>>> +   return 16;
>>> +   default:
>>> +   break;
>>> +   }
>>> +   return 0;
>>> +}
>>> +
>>>   static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
>>>   struct drm_crtc_state *crtc_state,
>>>   struct drm_connector_state
>>> *conn_state)
>>> @@ -4616,6 +4637,36 @@ const struct drm_encoder_helper_funcs
>>> amdgpu_dm_encoder_helper_funcs = {
>>> .atomic_check = dm_encoder_helper_atomic_check
>>>   };
>>>   
>>> +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
>>> +static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state
>>> *state,
>>> +   struct dc_state *dc_state)
>>> +{
>>> +   struct dc_stream_state *stream;
>>> +   struct amdgpu_dm_connector *aconnector;
>>> +   int i, clock = 0, bpp = 0;
>>> +
>>> +   for (i = 0; i < dc_state->stream_count; i++) {
>>> +   stream = dc_state->streams[i];
>>> +   aconnector = (struct amdgpu_dm_connector *)stream-
>>>> dm_stream_context;
>>> +
>>> +   if (stream && stream->timing.flags.DSC == 1) {
>>> +   bpp = convert_dc_color_depth_into_bpc(stream-
>>>> timing.display_color_depth)* 3;
>>> +   clock = stream->timing.pix_clk_100hz / 10;
>&g

Re: [PATCH 15/15] drm/amd/display: Trigger modesets on MST DSC connectors

2019-10-01 Thread Mikita Lipski
 { 0 };
>> +
>> +for_each_new_connector_in_state(state, connector, conn_state, i) {
>> +if (conn_state->crtc != crtc)
>> +continue;
>> +
>> +aconnector = to_amdgpu_dm_connector(connector);
>> +if (!aconnector->port)
>> +aconnector = NULL;
>> +else
>> +break;
>> +}
>> +
>> +if (!aconnector)
>> +return 0;
>> +
>> +i = 0;
>> +drm_connector_list_iter_begin(state->dev, _iter);
>> +drm_for_each_connector_iter(connector, _iter) {
>> +if (!connector->state || !connector->state->crtc)
>> +continue;
>> +
>> +aconnector_to_add = to_amdgpu_dm_connector(connector);
>> +if (!aconnector_to_add->port)
>> +continue;
>> +
>> +if (aconnector_to_add->port->mgr != aconnector->port->mgr)
>> +continue;
>> +
>> +if (!aconnector_to_add->dc_sink)
>> +continue;
>> +
>> +if (!aconnector_to_add->dc_sink-
>>> sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
>> +continue;
>> +
>> +if (i >= AMDGPU_MAX_CRTCS)
>> +continue;
>> +
>> +crtcs_affected[i] = connector->state->crtc;
>> +i++;
>> +}
>> +drm_connector_list_iter_end(_iter);
>> +
>> +for (j = 0; j < i; j++) {
>> +new_crtc_state = drm_atomic_get_crtc_state(state,
>> crtcs_affected[j]);
>> +if (IS_ERR(new_crtc_state))
>> +return PTR_ERR(new_crtc_state);
>> +
>> +new_crtc_state->mode_changed = true;
>> +}
>> +
>> +return 0;
>> +
>> +}
>> +#endif
>> +
>>   static void get_freesync_config_for_crtc(
>>  struct dm_crtc_state *new_crtc_state,
>>  struct dm_connector_state *new_con_state)
>> @@ -7388,6 +7456,17 @@ static int amdgpu_dm_atomic_check(struct drm_device
>> *dev,
>>  if (ret)
>>  goto fail;
>>   
>> +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
>> +if (adev->asic_type >= CHIP_NAVI10) {
>> +for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
>> new_crtc_state, i) {
>> +if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
>> +ret = add_affected_mst_dsc_crtcs(state, crtc);
>> +if (ret)
>> +goto fail;
>> +}
>> +}
>> +}
>> +#endif
>>  for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
>> new_crtc_state, i) {
>>  if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
>>  !new_crtc_state->color_mgmt_changed &&

-- 
Thanks,
Mikita Lipski
Software Engineer 2, AMD
mikita.lip...@amd.com
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 05/14] drm/amd/display: Enable SST DSC in DM

2019-08-19 Thread Mikita Lipski
Tested-by: Mikita Lipski 

Mikita Lipski

On 2019-08-19 11:50 a.m., David Francis wrote:
> In create_stream_for_sink, check for SST DP connectors
> 
> Parse DSC caps to DC format, then, if DSC is supported,
> compute the config
> 
> DSC hardware will be programmed by dc_commit_state
> 
> Cc: Mikita Lipski 
> Signed-off-by: David Francis 
> ---
>   .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 32 ---
>   .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c |  4 ++-
>   2 files changed, 24 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 911fe78b47c1..84249057e181 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -3576,6 +3576,10 @@ create_stream_for_sink(struct amdgpu_dm_connector 
> *aconnector,
>   bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
>   int mode_refresh;
>   int preferred_refresh = 0;
> +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
> + struct dsc_dec_dpcd_caps dsc_caps;
> + uint32_t link_bandwidth_kbps;
> +#endif
>   
>   struct dc_sink *sink = NULL;
>   if (aconnector == NULL) {
> @@ -3648,17 +3652,23 @@ create_stream_for_sink(struct amdgpu_dm_connector 
> *aconnector,
>   , >base, con_state, old_stream);
>   
>   #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
> - /* stream->timing.flags.DSC = 0; */
> -/*  */
> - /* if (aconnector->dc_link && */
> - /*  aconnector->dc_link->connector_signal == 
> SIGNAL_TYPE_DISPLAY_PORT #<{(|&& */
> - /*  
> aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.is_dsc_supported|)}>#) 
> */
> - /*  if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, */
> - /*  >dc_link->dpcd_caps.dsc_caps, */
> - /*  dc_link_bandwidth_kbps(aconnector->dc_link, 
> dc_link_get_link_cap(aconnector->dc_link)), */
> - /*  >timing, */
> - /*  >timing.dsc_cfg)) */
> - /*  stream->timing.flags.DSC = 1; */
> + stream->timing.flags.DSC = 0;
> +
> + if (aconnector->dc_link && sink->sink_signal == 
> SIGNAL_TYPE_DISPLAY_PORT) {
> + 
> dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
> +   
> aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
> +   _caps);
> + link_bandwidth_kbps = 
> dc_link_bandwidth_kbps(aconnector->dc_link,
> +  
> dc_link_get_link_cap(aconnector->dc_link));
> +
> + if (dsc_caps.is_dsc_supported)
> + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc,
> +   _caps,
> +   link_bandwidth_kbps,
> +   >timing,
> +   >timing.dsc_cfg))
> + stream->timing.flags.DSC = 1;
> + }
>   #endif
>   
>   update_stream_scaling_settings(, dm_state, stream);
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
> index 7cf0573ab25f..5f2c315b18ba 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
> @@ -549,7 +549,9 @@ bool dm_helpers_dp_write_dsc_enable(
>   bool enable
>   )
>   {
> - return false;
> + uint8_t enable_dsc = enable ? 1 : 0;
> +
> + return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, 
> _dsc, 1);
>   }
>   #endif
>   
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: add a check for display depth validity

2018-07-04 Thread Mikita Lipski



On 2018-07-04 04:51 PM, Harry Wentland wrote:

On 2018-07-04 04:40 PM, mikita.lip...@amd.com wrote:

From: Mikita Lipski 

[why]
HDMI 2.0 fails to validate 4K@60 timing with 10 bpc
[how]
Adding a helper function that would verify if the display depth
assigned would pass a bandwidth validation.
Drop the display depth by one level till calculated pixel clk
is lower than maximum TMDS clk.

Bugzilla: https://bugs.freedesktop.org/106959

Signed-off-by: Mikita Lipski 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 42 +++
  1 file changed, 42 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ed09e36..c572e86 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2218,6 +2218,46 @@ get_output_color_space(const struct dc_crtc_timing 
*dc_crtc_timing)
return color_space;
  }
  
+static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)

+{
+   if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+   return;
+
+   timing_out->display_color_depth--;
+}
+
+static void check_if_display_depth_is_supported(struct dc_crtc_timing 
*timing_out,


Doesn't this update timing_out->display_color_depth? We should probably name this 
something else, maybe "pick_max_supported_color_depth" or something similar.

Harry

The function reduce_mode_colour_depth introduced above is the one that 
modifies timing_out->display_color_depth.


Nick

+   const struct drm_display_info 
*info)
+{
+   int normalized_clk;
+   if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+   return;
+   do {
+   normalized_clk = timing_out->pix_clk_khz;
+   /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+   if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+   normalized_clk /= 2;
+   /* Adjusting pix clock following on HDMI spec based on colour 
depth */
+   switch (timing_out->display_color_depth) {
+   case COLOR_DEPTH_101010:
+   normalized_clk = (normalized_clk * 30) / 24;
+   break;
+   case COLOR_DEPTH_121212:
+   normalized_clk = (normalized_clk * 36) / 24;
+   break;
+   case COLOR_DEPTH_161616:
+   normalized_clk = (normalized_clk * 48) / 24;
+   break;
+   default:
+   return;
+   }
+   if (normalized_clk <= info->max_tmds_clock)
+   return;
+   reduce_mode_colour_depth(timing_out);
+
+   } while (timing_out->display_color_depth > COLOR_DEPTH_888);
+
+}
  
/*/
  
  static void

@@ -2273,6 +2313,8 @@ fill_stream_properties_from_drm_display_mode(struct 
dc_stream_state *stream,
  
  	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;

stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+   if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+   check_if_display_depth_is_supported(timing_out, info);
  }
  
  static void fill_audio_info(struct audio_info *audio_info,



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: Disable irq on device before destroying it

2018-03-15 Thread Mikita Lipski



On 2018-03-15 10:15 AM, Alex Deucher wrote:

On Thu, Mar 15, 2018 at 10:10 AM,  <mikita.lip...@amd.com> wrote:

From: Mikita Lipski <mikita.lip...@amd.com>

Disable irq on devices before destroying them. That prevents
use-after-free memory access when unloading the driver.

Signed-off-by: Mikita Lipski <mikita.lip...@amd.com>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +++--
  1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4911911..593396f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1456,6 +1456,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device 
*adev)
 }
 }

+   /* disable all interrupts */
+   amdgpu_irq_disable_all(adev);
+


Any reason not to move this to the top of this function before the SMC loop?

Alex


It can be done, but it does not seem to have any functional effect.
The use-after-free corruption is caused by disabling DCE's irq after 
destroying it.


Nik




 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
 if (!adev->ip_blocks[i].status.hw)
 continue;
@@ -1482,8 +1485,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device 
*adev)
 adev->ip_blocks[i].status.hw = false;
 }

-   /* disable all interrupts */
-   amdgpu_irq_disable_all(adev);

 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
 if (!adev->ip_blocks[i].status.sw)
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx