Re: [Freedreno] [PATCH 1/2] drm/msm/dsi: add a helper method to compute the dsi byte clk

2022-12-21 Thread Abhinav Kumar

Hi Dmitry

Sorry for the late response.

On 10/31/2022 5:20 PM, Dmitry Baryshkov wrote:

On 28/10/2022 01:22, Abhinav Kumar wrote:



On 10/27/2022 10:35 AM, Dmitry Baryshkov wrote:

On 22/09/2022 03:49, Abhinav Kumar wrote:

Re-arrange the dsi_calc_pclk method to two helpers, one to
compute the DSI byte clk and the other to compute the pclk.

This makes the separation of the two clean and also allows
clients to compute and use the dsi byte clk separately.

Signed-off-by: Abhinav Kumar 
---
  drivers/gpu/drm/msm/dsi/dsi.h  |  2 ++
  drivers/gpu/drm/msm/dsi/dsi_host.c | 27 +++
  2 files changed, 21 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/msm/dsi/dsi.h 
b/drivers/gpu/drm/msm/dsi/dsi.h

index 2a96b4fe7839..60ba8e67f550 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -118,6 +118,8 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host 
*msm_host);

  int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
  void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
  void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, 
bool is_bonded_dsi,

+    const struct drm_display_mode *mode);
  int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
  int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
  void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c 
b/drivers/gpu/drm/msm/dsi/dsi_host.c

index 57a4c0fa614b..32b35d4ac1d3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -569,9 +569,8 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host 
*msm_host)

  clk_disable_unprepare(msm_host->byte_clk);
  }
-static unsigned long dsi_get_pclk_rate(struct msm_dsi_host 
*msm_host, bool is_bonded_dsi)
+static unsigned long dsi_get_pclk_rate(const struct 
drm_display_mode *mode, bool is_bonded_dsi)

  {
-    struct drm_display_mode *mode = msm_host->mode;
  unsigned long pclk_rate;
  pclk_rate = mode->clock * 1000;
@@ -588,12 +587,18 @@ static unsigned long dsi_get_pclk_rate(struct 
msm_dsi_host *msm_host, bool is_bo

  return pclk_rate;
  }
-static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool 
is_bonded_dsi)
+unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, 
bool is_bonded_dsi,

+    const struct drm_display_mode *mode)
  {
+    struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  u8 lanes = msm_host->lanes;
  u32 bpp = dsi_get_bpp(msm_host->format);
-    unsigned long pclk_rate = dsi_get_pclk_rate(msm_host, 
is_bonded_dsi);

-    u64 pclk_bpp = (u64)pclk_rate * bpp;
+    unsigned long pclk_rate;
+    u64 pclk_bpp;
+
+    pclk_rate = dsi_get_pclk_rate(mode, is_bonded_dsi);
+
+    pclk_bpp = (u64)pclk_rate * bpp;
  if (lanes == 0) {
  pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
@@ -606,8 +611,14 @@ static void dsi_calc_pclk(struct msm_dsi_host 
*msm_host, bool is_bonded_dsi)

  else
  do_div(pclk_bpp, (8 * lanes));
-    msm_host->pixel_clk_rate = pclk_rate;
-    msm_host->byte_clk_rate = pclk_bpp;
+    return pclk_bpp;
+}
+
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool 
is_bonded_dsi)

+{
+    msm_host->pixel_clk_rate = dsi_get_pclk_rate(msm_host->mode, 
is_bonded_dsi);
+    msm_host->byte_clk_rate = 
dsi_byte_clk_get_rate(&msm_host->base, is_bonded_dsi,

+    msm_host->mode);


This way you are calling dsi_get_pclk_rate twice(), which is slightly 
inefficient. You can call it once (here) and then pass the resulting 
pclk_rate as an argument to dsi_byte_clk_get_rate().


So the goal was to have two independent APIs to calculate byte and 
pixel clk.


If we pass the output of one as the input to the other we are making 
them dependent.


Thats why i kept it separate.


Calling one function from another clearly points that they are not 
independent. And surely pixel and byte clocks can not be fully 
independent. I see your point about getting only the byte clock. But I 
think it would be easier to explicitly pass the pixel rate rather than 
calculating it again under the hood.




Yes, calling one function from another means they are dependent and 
thats true in this case because byte clk computation depends on the pclk.


The reason behind this separation was that in the next patch, we need to 
compute only the byte clock and use that to validate in the mode_valid 
against the opp table limits.


The opp table for DSI is based on the byte clk.

If we were to pass pclk as a parameter, then we would have to explicitly 
compute the pclk just to pass the parameter to the method which computes 
the byte clk. As opposed to this approach, where it just happens under 
the hood.


So for the purpose of the next patch I kept it this way.

Let me know what you think.






  DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
  msm_host

[Freedreno] [PATCH v2 7/8] drm/msm/dpu: Implement DSC binding to PP block for CTL V1

2022-12-21 Thread Marijn Suijten
All V1 CTL blocks (active CTLs) explicitly bind the pixel output from a
DSC block to a PINGPONG block by setting the PINGPONG index in a DSC
hardware register.

Signed-off-by: Marijn Suijten 
Reviewed-by: Dmitry Baryshkov 
Reviewed-by: Abhinav Kumar 
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c   |  3 +++
 .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h|  9 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c| 27 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h|  4 +++
 4 files changed, 43 insertions(+)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index a158cd502d38..19fb20a6ed72 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1829,6 +1829,9 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc 
*hw_dsc,
if (hw_pp->ops.setup_dsc)
hw_pp->ops.setup_dsc(hw_pp);
 
+   if (hw_dsc->ops.dsc_bind_pingpong_blk)
+   hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, true, hw_pp->idx);
+
if (hw_pp->ops.enable_dsc)
hw_pp->ops.enable_dsc(hw_pp);
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index c160dae95a69..96f849907aa2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -268,6 +268,15 @@ enum {
DPU_VBIF_MAX
 };
 
+/**
+ * DSC features
+ * @DPU_DSC_OUTPUT_CTRL   Configure which PINGPONG block gets
+ *the pixel output from this DSC.
+ */
+enum {
+   DPU_DSC_OUTPUT_CTRL = 0x1,
+};
+
 /**
  * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
  * @name:  string name for debug purposes
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 3662df698dae..619926da1441 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -29,6 +29,8 @@
 #define DSC_RANGE_MAX_QP0x0B0
 #define DSC_RANGE_BPG_OFFSET0x0EC
 
+#define DSC_CTL(m) (0x1800 - 0x3FC * (m - DSC_0))
+
 static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
 {
struct dpu_hw_blk_reg_map *c = &dsc->hw;
@@ -150,6 +152,29 @@ static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc 
*hw_dsc,
}
 }
 
+static void dpu_hw_dsc_bind_pingpong_blk(
+   struct dpu_hw_dsc *hw_dsc,
+   bool enable,
+   const enum dpu_pingpong pp)
+{
+   struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+   int mux_cfg = 0xF;
+   u32 dsc_ctl_offset;
+
+   dsc_ctl_offset = DSC_CTL(hw_dsc->idx);
+
+   if (enable)
+   mux_cfg = (pp - PINGPONG_0) & 0x7;
+
+   DRM_DEBUG_KMS("%s dsc:%d %s pp:%d\n",
+   enable ? "Binding" : "Unbinding",
+   hw_dsc->idx - DSC_0,
+   enable ? "to" : "from",
+   pp - PINGPONG_0);
+
+   DPU_REG_WRITE(c, dsc_ctl_offset, mux_cfg);
+}
+
 static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
   const struct dpu_mdss_cfg *m,
   void __iomem *addr,
@@ -174,6 +199,8 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
ops->dsc_disable = dpu_hw_dsc_disable;
ops->dsc_config = dpu_hw_dsc_config;
ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
+   if (cap & BIT(DPU_DSC_OUTPUT_CTRL))
+   ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
 };
 
 struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index c0b77fe1a696..ae9b5db53d7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -42,6 +42,10 @@ struct dpu_hw_dsc_ops {
 */
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
  struct drm_dsc_config *dsc);
+
+   void (*dsc_bind_pingpong_blk)(struct dpu_hw_dsc *hw_dsc,
+ bool enable,
+ enum dpu_pingpong pp);
 };
 
 struct dpu_hw_dsc {
-- 
2.39.0



[Freedreno] [PATCH v2 4/8] drm/msm/dpu: Disallow unallocated resources to be returned

2022-12-21 Thread Marijn Suijten
In the event that the topology requests resources that have not been
created by the system (because they are typically not represented in
dpu_mdss_cfg ^1), the resource(s) in global_state (in this case DSC
blocks) remain NULL but will still be returned out of
dpu_rm_get_assigned_resources, where the caller expects to get an array
containing num_blks valid pointers (but instead gets these NULLs).

To prevent this from happening, where null-pointer dereferences
typically result in a hard-to-debug platform lockup, num_blks shouldn't
increase past NULL blocks and will print an error and break instead.
After all, max_blks represents the static size of the maximum number of
blocks whereas the actual amount varies per platform.

^1: which can happen after a git rebase ended up moving additions to
_dpu_cfg to a different struct which has the same patch context.

Fixes: bb00a452d6f7 ("drm/msm/dpu: Refactor resource manager")
Signed-off-by: Marijn Suijten 
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 73b3442e7467..8471d04bff50 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -660,6 +660,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
  blks_size, enc_id);
break;
}
+   if (!hw_blks[i]) {
+   DPU_ERROR("No more resource %d available to assign to 
enc %d\n",
+ type, enc_id);
+   break;
+   }
blks[num_blks++] = hw_blks[i];
}
 
-- 
2.39.0



[Freedreno] [PATCH v2 2/8] drm/msm/dsi: Use DSC slice(s) packet size to compute word count

2022-12-21 Thread Marijn Suijten
According to downstream the value to use for WORD_COUNT is
bytes_per_pkt, which denotes the number of bytes in a packet based on
how many slices have been configured by the panel driver times the
width of a slice times the number of bytes per pixel.

The DSC panels seen thus far use one byte per pixel, only one slice
per packet, and a slice width of half the panel width leading to the
desired bytes_per_pkt+1 value to be equal to hdisplay/2+1.  This however
isn't the case anymore for panels that configure two slices per packet,
where the value should now be hdisplay+1.

Note that the aforementioned panel (on a Sony Xperia XZ3, sdm845) with
slice_count=1 has also been tested to successfully accept slice_count=2,
which would have shown corrupted output previously.

Fixes: 08802f515c3c ("drm/msm/dsi: Add support for DSC configuration")
Signed-off-by: Marijn Suijten 
Reviewed-by: Dmitry Baryshkov 
Reviewed-by: Abhinav Kumar 
---
 drivers/gpu/drm/msm/dsi/dsi_host.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c 
b/drivers/gpu/drm/msm/dsi/dsi_host.c
index b83cf70b1adb..0686c35a6fd4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -989,7 +989,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, 
bool is_bonded_dsi)
if (!msm_host->dsc)
wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
else
-   wc = mode->hdisplay / 2 + 1;
+   wc = msm_host->dsc->slice_chunk_size * 
msm_host->dsc->slice_count + 1;
 
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
-- 
2.39.0



[Freedreno] [PATCH v2 0/8] drm/msm: DSC Electric Boogaloo for sm8[12]50

2022-12-21 Thread Marijn Suijten
This preliminary Display Stream Compression support package for
(initially tested on) sm8[12]50 is based on comparing DSC behaviour
between downstream and mainline.  Some new callbacks are added (for
binding blocks on active CTLs), logic bugs are corrected, zeroed struct
members are now assigned proper values, and RM allocation and hw block
retrieval now hand out (or not) DSC blocks without causing null-pointer
dereferences.

Unfortunately it is not yet enough to get rid of completely corrupted
display output on the boards I tested here:
- Sony Xperia 1 (sm8150), 1644x3840 or 1096x2560 pixels;
- Sony Xperia 5II (sm8250), 1080x2520, at 60 or 120Hz;
- (can include more Xperia boards if desired)

Both devices use the DUALPIPE_DSCMERGE topology downstream: dual LM, PP
and DSC, but only a single INTF/encoder/DSI-link.

Hopefully this spawns some community/upstream interest to help rootcause
our corruption issues (after we open a drm/msm report on GitLab for more
appropriate tracking).

The Sony Xperia XZ3 (sdm845) was fully tested and validated with this
series to not cause any regressions (and one of the math fixes now
allows us to change slice_count in the panel driver, which would corrupt
previously).

Changes since v1:

- Split patch 6 into two separately backportable Fixes: patches;
- Additionally remove num_enc from msm_display_topology in favour of
  num_dsc;
- Reorder patches to have all Fixes: at the beginning for easier
  picking;
- Fix existing multiline comment while editing it anyway;
- Add missing Signed-off-by to patch 5.

v1: 
https://lore.kernel.org/linux-arm-msm/20221213232207.113607-1-marijn.suij...@somainline.org/T/#u

Marijn Suijten (8):
  drm/msm/dpu: Wire up DSC mask for active CTL configuration
  drm/msm/dsi: Use DSC slice(s) packet size to compute word count
  drm/msm/dsi: Flip greater-than check for slice_count and
slice_per_intf
  drm/msm/dpu: Disallow unallocated resources to be returned
  drm/msm/dpu: Reject topologies for which no DSC blocks are available
  drm/msm/dpu: Remove num_enc from topology struct in favour of num_dsc
  drm/msm/dpu: Implement DSC binding to PP block for CTL V1
  drm/msm/dpu: Add DSC configuration for SM8150 and SM8250

 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c   | 12 +
 .../drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c  |  1 +
 .../drm/msm/disp/dpu1/dpu_encoder_phys_vid.c  |  1 +
 .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c| 23 +++-
 .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h|  9 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c| 27 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h|  4 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c| 14 --
 drivers/gpu/drm/msm/dsi/dsi_host.c|  7 ++---
 drivers/gpu/drm/msm/msm_drv.h |  2 --
 10 files changed, 82 insertions(+), 18 deletions(-)

--
2.39.0



[Freedreno] [PATCH v2 1/8] drm/msm/dpu: Wire up DSC mask for active CTL configuration

2022-12-21 Thread Marijn Suijten
Active CTLs have to configure what DSC block(s) have to be enabled, and
what DSC block(s) have to be flushed; this value was initialized to zero
resulting in the necessary register writes to never happen (or would
write zero otherwise).  This seems to have gotten lost in the DSC v4->v5
series while refactoring how the combination with merge_3d was handled.

Fixes: 58dca9810749 ("drm/msm/disp/dpu1: Add support for DSC in encoder")
Signed-off-by: Marijn Suijten 
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c | 1 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index ae28b2b93e69..35791f93c33d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -61,6 +61,7 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+   intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
 
/* setup which pp blk will connect to this intf */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 0f71e8fe7be7..9ee3a7306a5f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -274,6 +274,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+   intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
 
-- 
2.39.0



[Freedreno] [PATCH v2 8/8] drm/msm/dpu: Add DSC configuration for SM8150 and SM8250

2022-12-21 Thread Marijn Suijten
These DSC blocks on CTL V1 need to set its corresponding PINGPONG block
index in a hardware register to configure where to send pixel output to,
via the newly-added DPU_DSC_OUTPUT_CTRL feature flag.

Signed-off-by: Marijn Suijten 
Reviewed-by: Dmitry Baryshkov 
Reviewed-by: Abhinav Kumar 
---
 .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c| 23 ++-
 1 file changed, 17 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 318f0b4dbf6e..114ad8ca4554 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -1566,18 +1566,25 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] 
= {
 /*
  * DSC sub blocks config
  */
-#define DSC_BLK(_name, _id, _base) \
+#define DSC_BLK(_name, _id, _base, _features) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x140, \
-   .features = 0, \
+   .features = _features, \
}
 
 static struct dpu_dsc_cfg sdm845_dsc[] = {
-   DSC_BLK("dsc_0", DSC_0, 0x8),
-   DSC_BLK("dsc_1", DSC_1, 0x80400),
-   DSC_BLK("dsc_2", DSC_2, 0x80800),
-   DSC_BLK("dsc_3", DSC_3, 0x80c00),
+   DSC_BLK("dsc_0", DSC_0, 0x8, 0),
+   DSC_BLK("dsc_1", DSC_1, 0x80400, 0),
+   DSC_BLK("dsc_2", DSC_2, 0x80800, 0),
+   DSC_BLK("dsc_3", DSC_3, 0x80c00, 0),
+};
+
+static struct dpu_dsc_cfg sm8150_dsc[] = {
+   DSC_BLK("dsc_0", DSC_0, 0x8, BIT(DPU_DSC_OUTPUT_CTRL)),
+   DSC_BLK("dsc_1", DSC_1, 0x80400, BIT(DPU_DSC_OUTPUT_CTRL)),
+   DSC_BLK("dsc_2", DSC_2, 0x80800, BIT(DPU_DSC_OUTPUT_CTRL)),
+   DSC_BLK("dsc_3", DSC_3, 0x80c00, BIT(DPU_DSC_OUTPUT_CTRL)),
 };
 
 /*
@@ -2474,6 +2481,8 @@ static const struct dpu_mdss_cfg sm8150_dpu_cfg = {
.mixer = sm8150_lm,
.dspp_count = ARRAY_SIZE(sm8150_dspp),
.dspp = sm8150_dspp,
+   .dsc_count = ARRAY_SIZE(sm8150_dsc),
+   .dsc = sm8150_dsc,
.pingpong_count = ARRAY_SIZE(sm8150_pp),
.pingpong = sm8150_pp,
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
@@ -2524,6 +2533,8 @@ static const struct dpu_mdss_cfg sm8250_dpu_cfg = {
.mixer = sm8150_lm,
.dspp_count = ARRAY_SIZE(sm8150_dspp),
.dspp = sm8150_dspp,
+   .dsc_count = ARRAY_SIZE(sm8150_dsc),
+   .dsc = sm8150_dsc,
.pingpong_count = ARRAY_SIZE(sm8150_pp),
.pingpong = sm8150_pp,
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
-- 
2.39.0



[Freedreno] [PATCH v2 5/8] drm/msm/dpu: Reject topologies for which no DSC blocks are available

2022-12-21 Thread Marijn Suijten
Resource allocation of DSC blocks should behave more like LMs and CTLs
where NULL resources (based on initial hw_blk creation via definitions
in the catalog) are skipped ^1.  The current hardcoded mapping of DSC
blocks however means that resource allocation shouldn't succeed at all
when the DSC block on the corresponding index doesn't exist, rather than
searching for the next free block.

This hardcoded mapping should be loosened separately as DPU 5.0.0
introduced a crossbar where DSC blocks can be "somewhat" freely bound to
any PP and CTL (in proper pairs).

^1: which, on hardware that supports DSC, can happen after a git rebase
ended up moving additions to _dpu_cfg to a different struct which has
the same patch context.

Fixes: f2803ee91a41 ("drm/msm/disp/dpu1: Add DSC support in RM")
Signed-off-by: Marijn Suijten 
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 8471d04bff50..dcbf03d2940a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -496,6 +496,11 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
 
/* check if DSC required are allocated or not */
for (i = 0; i < num_dsc; i++) {
+   if (!rm->dsc_blks[i]) {
+   DPU_ERROR("DSC %d does not exist\n", i);
+   return -EIO;
+   }
+
if (global_state->dsc_to_enc_id[i]) {
DPU_ERROR("DSC %d is already allocated\n", i);
return -EIO;
-- 
2.39.0



[Freedreno] [PATCH v2 6/8] drm/msm/dpu: Remove num_enc from topology struct in favour of num_dsc

2022-12-21 Thread Marijn Suijten
Downstream calls this num_enc yet the DSC patches introduced a new
num_dsc struct member, leaving num_enc effectively unused.

Fixes: 7e9cc175b159 ("drm/msm/disp/dpu1: Add support for DSC in topology")
Signed-off-by: Marijn Suijten 
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 9 -
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c  | 4 ++--
 drivers/gpu/drm/msm/msm_drv.h   | 2 --
 3 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 9c6817b5a194..a158cd502d38 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -579,19 +579,18 @@ static struct msm_display_topology 
dpu_encoder_get_topology(
topology.num_dspp = topology.num_lm;
}
 
-   topology.num_enc = 0;
topology.num_intf = intf_count;
 
if (dpu_enc->dsc) {
-   /* In case of Display Stream Compression (DSC), we would use
-* 2 encoders, 2 layer mixers and 1 interface
+   /*
+* In case of Display Stream Compression (DSC), we would use
+* 2 DSC encoders, 2 layer mixers and 1 interface
 * this is power optimal and can drive up to (including) 4k
 * screens
 */
-   topology.num_enc = 2;
topology.num_dsc = 2;
-   topology.num_intf = 1;
topology.num_lm = 2;
+   topology.num_intf = 1;
}
 
return topology;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index dcbf03d2940a..5e7aa0f3a31c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -548,8 +548,8 @@ static int _dpu_rm_populate_requirements(
 {
reqs->topology = req_topology;
 
-   DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
- reqs->topology.num_lm, reqs->topology.num_enc,
+   DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
+ reqs->topology.num_lm, reqs->topology.num_dsc,
  reqs->topology.num_intf);
 
return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d4e0ef608950..74626a271f46 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -82,14 +82,12 @@ enum msm_event_wait {
 /**
  * struct msm_display_topology - defines a display topology pipeline
  * @num_lm:   number of layer mixers used
- * @num_enc:  number of compression encoder blocks used
  * @num_intf: number of interfaces the panel is mounted on
  * @num_dspp: number of dspp blocks used
  * @num_dsc:  number of Display Stream Compression (DSC) blocks used
  */
 struct msm_display_topology {
u32 num_lm;
-   u32 num_enc;
u32 num_intf;
u32 num_dspp;
u32 num_dsc;
-- 
2.39.0



[Freedreno] [PATCH v2 3/8] drm/msm/dsi: Flip greater-than check for slice_count and slice_per_intf

2022-12-21 Thread Marijn Suijten
According to downstream /and the comment copied from it/ this comparison
should be the other way around.  In other words, when the panel driver
requests to use more slices per packet than what could be sent over this
interface, it is bumped down to only use a single slice per packet (and
strangely not the number of slices that could fit on the interface).

Fixes: 08802f515c3c ("drm/msm/dsi: Add support for DSC configuration")
Signed-off-by: Marijn Suijten 
Reviewed-by: Dmitry Baryshkov 
Reviewed-by: Abhinav Kumar 
---
 drivers/gpu/drm/msm/dsi/dsi_host.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c 
b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 0686c35a6fd4..3409a4275d4a 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -855,11 +855,12 @@ static void dsi_update_dsc_timing(struct msm_dsi_host 
*msm_host, bool is_cmd_mod
 */
slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
 
-   /* If slice_per_pkt is greater than slice_per_intf
+   /*
+* If slice_count is greater than slice_per_intf
 * then default to 1. This can happen during partial
 * update.
 */
-   if (slice_per_intf > dsc->slice_count)
+   if (dsc->slice_count > slice_per_intf)
dsc->slice_count = 1;
 
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
-- 
2.39.0



Re: [Freedreno] [PATCH v9 01/15] drm/msm/disp/dpu: clear dpu_assign_crtc and get crtc from connector state instead of dpu_enc

2022-12-21 Thread Dmitry Baryshkov

On 21/12/2022 16:10, Vinod Polimera wrote:




-Original Message-
From: Dmitry Baryshkov 
Sent: Wednesday, December 14, 2022 9:05 PM
To: Vinod Polimera (QUIC) ; dri-
de...@lists.freedesktop.org; linux-arm-...@vger.kernel.org;
freedreno@lists.freedesktop.org; devicet...@vger.kernel.org
Cc: linux-ker...@vger.kernel.org; robdcl...@gmail.com;
diand...@chromium.org; swb...@chromium.org; Kalyan Thota (QUIC)
; Kuogee Hsieh (QUIC)
; Vishnuvardhan Prodduturi (QUIC)
; Bjorn Andersson (QUIC)
; Aravind Venkateswaran (QUIC)
; Abhinav Kumar (QUIC)
; Sankeerth Billakanti (QUIC)

Subject: Re: [PATCH v9 01/15] drm/msm/disp/dpu: clear dpu_assign_crtc and
get crtc from connector state instead of dpu_enc

WARNING: This email originated from outside of Qualcomm. Please be wary
of any links or attachments, and do not enable macros.

On 14/12/2022 12:05, Vinod Polimera wrote:

Update crtc retrieval from dpu_enc to dpu_enc connector state,
since new links get set as part of the dpu enc virt mode set.
The dpu_enc->crtc cache is no more needed, hence cleaning it as
part of this change.

This patch is dependent on the series:
https://patchwork.freedesktop.org/series/110969/

Signed-off-by: Vinod Polimera 
---
   drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c|  4 ---
   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 42 +-

---

   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h |  8 --
   3 files changed, 13 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c

b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c

index 3f72d38..289d51e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1029,7 +1029,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
*/
   if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
   release_bandwidth = true;
- dpu_encoder_assign_crtc(encoder, NULL);
   }

   /* wait for frame_event_done completion */
@@ -1099,9 +1098,6 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
   trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
   dpu_crtc->enabled = true;

- drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state-
encoder_mask)
- dpu_encoder_assign_crtc(encoder, crtc);
-
   /* Enable/restore vblank irq handling */
   drm_crtc_vblank_on(crtc);
   }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c

b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c

index a585036..b9b254d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -132,11 +132,6 @@ enum dpu_enc_rc_states {
* @intfs_swapped:  Whether or not the phys_enc interfaces have been

swapped

*  for partial update right-only cases, such as pingpong
*  split where virtual pingpong does not generate IRQs
- * @crtc:Pointer to the currently assigned crtc. Normally you
- *   would use crtc->state->encoder_mask to determine the
- *   link between encoder/crtc. However in this case we need
- *   to track crtc in the disable() hook which is called
- *   _after_ encoder_mask is cleared.
* @connector:  If a mode is set, cached pointer to the active

connector

* @crtc_kickoff_cb:Callback into CRTC that will flush & 
start
*  all CTL paths
@@ -181,7 +176,6 @@ struct dpu_encoder_virt {

   bool intfs_swapped;

- struct drm_crtc *crtc;
   struct drm_connector *connector;

   struct dentry *debugfs_root;
@@ -1317,7 +1311,7 @@ static void dpu_encoder_vblank_callback(struct

drm_encoder *drm_enc,

   struct dpu_encoder_phys *phy_enc)
   {
   struct dpu_encoder_virt *dpu_enc = NULL;
- unsigned long lock_flags;
+ struct drm_crtc *crtc;

   if (!drm_enc || !phy_enc)
   return;
@@ -1325,12 +1319,13 @@ static void dpu_encoder_vblank_callback(struct

drm_encoder *drm_enc,

   DPU_ATRACE_BEGIN("encoder_vblank_callback");
   dpu_enc = to_dpu_encoder_virt(drm_enc);

- atomic_inc(&phy_enc->vsync_cnt);
+ if (!dpu_enc->connector || !dpu_enc->connector->state ||
+ !dpu_enc->connector->state->crtc)
+ return;

- spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
- if (dpu_enc->crtc)
- dpu_crtc_vblank_callback(dpu_enc->crtc);
- spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+ atomic_inc(&phy_enc->vsync_cnt);
+ crtc = dpu_enc->connector->state->crtc;
+ dpu_crtc_vblank_callback(crtc);

   DPU_ATRACE_END("encoder_vblank_callback");
   }
@@ -1353,33 +1348,22 @@ static void

dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,

   DPU_ATRACE_END("encoder_underrun_callback");
   }

-void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct

drm_crtc *crtc)

-{
- struct dpu_

Re: [Freedreno] [PATCH v3 1/5] PM: domains: Allow a genpd consumer to require a synced power off

2022-12-21 Thread Akhil P Oommen
On 12/21/2022 8:13 PM, Ulf Hansson wrote:
> On Tue, 20 Dec 2022 at 08:44, Akhil P Oommen  wrote:
>> From: Ulf Hansson 
>>
>> Some genpd providers doesn't ensure that it has turned off at hardware.
>> This is fine until the consumer really requires during some special
>> scenarios that the power domain collapse at hardware before it is
>> turned ON again.
>>
>> An example is the reset sequence of Adreno GPU which requires that the
>> 'gpucc cx gdsc' power domain should move to OFF state in hardware at
>> least once before turning in ON again to clear the internal state.
>>
>> Signed-off-by: Ulf Hansson 
>> Signed-off-by: Akhil P Oommen 
>> ---
>>
>> (no changes since v2)
>>
>> Changes in v2:
>> - Minor formatting fix
>>
>>  drivers/base/power/domain.c | 23 +++
>>  include/linux/pm_domain.h   |  5 +
>>  2 files changed, 28 insertions(+)
>>
>> diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
>> index 967bcf9d415e..53524a102321 100644
>> --- a/drivers/base/power/domain.c
>> +++ b/drivers/base/power/domain.c
>> @@ -519,6 +519,28 @@ ktime_t dev_pm_genpd_get_next_hrtimer(struct device 
>> *dev)
>>  }
>>  EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
>>
>> +/*
>> + * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
>> + *
>> + * @dev: A device that is attached to the genpd.
>> + *
>> + * Allows a consumer of the genpd to notify the provider that the next 
>> power off
>> + * should be synchronous.
> Nitpick; similar to other dev_pm_genpd_* function-descriptions, I
> think it's important to add the below information.
>
> "It is assumed that the users guarantee that the genpd wouldn't be
> detached while this routine is getting called."
>
> Can you please add that?
Thanks. Fixed in revision 4.

-Akhil.
>
>> + */
>> +void dev_pm_genpd_synced_poweroff(struct device *dev)
>> +{
>> +   struct generic_pm_domain *genpd;
>> +
>> +   genpd = dev_to_genpd_safe(dev);
>> +   if (!genpd)
>> +   return;
>> +
>> +   genpd_lock(genpd);
>> +   genpd->synced_poweroff = true;
>> +   genpd_unlock(genpd);
>> +}
>> +EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
>> +
>>  static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
>>  {
>> unsigned int state_idx = genpd->state_idx;
>> @@ -562,6 +584,7 @@ static int _genpd_power_on(struct generic_pm_domain 
>> *genpd, bool timed)
>>
>>  out:
>> raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, 
>> NULL);
>> +   genpd->synced_poweroff = false;
>> return 0;
>>  err:
>> raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
>> diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
>> index 1cd41bdf73cf..f776fb93eaa0 100644
>> --- a/include/linux/pm_domain.h
>> +++ b/include/linux/pm_domain.h
>> @@ -136,6 +136,7 @@ struct generic_pm_domain {
>> unsigned int prepared_count;/* Suspend counter of prepared 
>> devices */
>> unsigned int performance_state; /* Aggregated max performance state 
>> */
>> cpumask_var_t cpus; /* A cpumask of the attached CPUs */
>> +   bool synced_poweroff;   /* A consumer needs a synced 
>> poweroff */
>> int (*power_off)(struct generic_pm_domain *domain);
>> int (*power_on)(struct generic_pm_domain *domain);
>> struct raw_notifier_head power_notifiers; /* Power on/off notifiers 
>> */
>> @@ -235,6 +236,7 @@ int dev_pm_genpd_add_notifier(struct device *dev, struct 
>> notifier_block *nb);
>>  int dev_pm_genpd_remove_notifier(struct device *dev);
>>  void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
>>  ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
>> +void dev_pm_genpd_synced_poweroff(struct device *dev);
>>
>>  extern struct dev_power_governor simple_qos_governor;
>>  extern struct dev_power_governor pm_domain_always_on_gov;
>> @@ -300,6 +302,9 @@ static inline ktime_t 
>> dev_pm_genpd_get_next_hrtimer(struct device *dev)
>>  {
>> return KTIME_MAX;
>>  }
>> +static inline void dev_pm_genpd_synced_poweroff(struct device *dev)
>> +{ }
>> +
>>  #define simple_qos_governor(*(struct dev_power_governor 
>> *)(NULL))
>>  #define pm_domain_always_on_gov(*(struct dev_power_governor 
>> *)(NULL))
>>  #endif
>> --
>> 2.7.4
>>
> Kind regards
> Uffe



[Freedreno] [PATCH v4 5/5] drm/msm/a6xx: Use genpd notifier to ensure cx-gdsc collapse

2022-12-21 Thread Akhil P Oommen
As per the recommended recovery sequence of adreno gpu, cx gdsc should
collapse at hardware before it is turned back ON. This helps to clear
out the stale states in hardware before it is reinitialized. Use the
genpd notifier along with the newly introduced
dev_pm_genpd_synced_poweroff() api to ensure that cx gdsc has collapsed
before we turn it back ON.

Signed-off-by: Akhil P Oommen 
---

(no changes since v2)

Changes in v2:
- Select PM_GENERIC_DOMAINS from Kconfig

 drivers/gpu/drm/msm/Kconfig   |  1 +
 drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 15 +++
 drivers/gpu/drm/msm/adreno/a6xx_gmu.h |  6 ++
 drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 11 +++
 4 files changed, 33 insertions(+)

diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 3c9dfdb0b328..74f5916f5ca5 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -28,6 +28,7 @@ config DRM_MSM
select SYNC_FILE
select PM_OPP
select NVMEM
+   select PM_GENERIC_DOMAINS
help
  DRM/KMS driver for MSM/snapdragon.
 
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 1580d0090f35..c03830957c26 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1507,6 +1507,17 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
gmu->initialized = false;
 }
 
+static int cxpd_notifier_cb(struct notifier_block *nb,
+   unsigned long action, void *data)
+{
+   struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb);
+
+   if (action == GENPD_NOTIFY_OFF)
+   complete_all(&gmu->pd_gate);
+
+   return 0;
+}
+
 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 {
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
@@ -1640,6 +1651,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct 
device_node *node)
goto detach_cxpd;
}
 
+   init_completion(&gmu->pd_gate);
+   complete_all(&gmu->pd_gate);
+   gmu->pd_nb.notifier_call = cxpd_notifier_cb;
+
/*
 * Get a link to the GX power domain to reset the GPU in case of GMU
 * crash
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h 
b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 5a42dd4dd31f..0bc3eb443fec 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -4,8 +4,10 @@
 #ifndef _A6XX_GMU_H_
 #define _A6XX_GMU_H_
 
+#include 
 #include 
 #include 
+#include 
 #include "msm_drv.h"
 #include "a6xx_hfi.h"
 
@@ -90,6 +92,10 @@ struct a6xx_gmu {
bool initialized;
bool hung;
bool legacy; /* a618 or a630 */
+
+   /* For power domain callback */
+   struct notifier_block pd_nb;
+   struct completion pd_gate;
 };
 
 static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 4b16e75dfa50..dd618b099110 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,7 @@
 
 #include 
 #include 
+#include 
 #include 
 
 #define GPU_PAS_ID 13
@@ -1258,6 +1259,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
 {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+   struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int i, active_submits;
 
adreno_dump_info(gpu);
@@ -1290,6 +1292,10 @@ static void a6xx_recover(struct msm_gpu *gpu)
 */
gpu->active_submits = 0;
 
+   reinit_completion(&gmu->pd_gate);
+   dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb);
+   dev_pm_genpd_synced_poweroff(gmu->cxpd);
+
/* Drop the rpm refcount from active submits */
if (active_submits)
pm_runtime_put(&gpu->pdev->dev);
@@ -1297,6 +1303,11 @@ static void a6xx_recover(struct msm_gpu *gpu)
/* And the final one from recover worker */
pm_runtime_put_sync(&gpu->pdev->dev);
 
+   if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000)))
+   DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n");
+
+   dev_pm_genpd_remove_notifier(gmu->cxpd);
+
pm_runtime_use_autosuspend(&gpu->pdev->dev);
 
if (active_submits)
-- 
2.7.4



[Freedreno] [PATCH v4 4/5] drm/msm/a6xx: Remove cx gdsc polling using 'reset'

2022-12-21 Thread Akhil P Oommen
Remove the unused 'reset' interface which was supposed to help to ensure
that cx gdsc has collapsed during gpu recovery. This is was not enabled
so far due to missing gpucc driver support. Similar functionality using
genpd framework will be implemented in the upcoming patch.

This effectively reverts commit 1f6cca404918
("drm/msm/a6xx: Ensure CX collapse during gpu recovery").

Signed-off-by: Akhil P Oommen 
---

(no changes since v3)

Changes in v3:
- Updated commit msg (Philipp)

 drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 4 
 drivers/gpu/drm/msm/msm_gpu.c | 4 
 drivers/gpu/drm/msm/msm_gpu.h | 4 
 3 files changed, 12 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 36c8fb699b56..4b16e75dfa50 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,7 +10,6 @@
 
 #include 
 #include 
-#include 
 #include 
 
 #define GPU_PAS_ID 13
@@ -1298,9 +1297,6 @@ static void a6xx_recover(struct msm_gpu *gpu)
/* And the final one from recover worker */
pm_runtime_put_sync(&gpu->pdev->dev);
 
-   /* Call into gpucc driver to poll for cx gdsc collapse */
-   reset_control_reset(gpu->cx_collapse);
-
pm_runtime_use_autosuspend(&gpu->pdev->dev);
 
if (active_submits)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 30ed45af76ad..97e1319d4577 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -16,7 +16,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 
 /*
@@ -933,9 +932,6 @@ int msm_gpu_init(struct drm_device *drm, struct 
platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
 
-   gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev,
-   "cx_collapse");
-
gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
 
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 651786bc55e5..fa9e34d02c91 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -13,7 +13,6 @@
 #include 
 #include 
 #include 
-#include 
 
 #include "msm_drv.h"
 #include "msm_fence.h"
@@ -282,9 +281,6 @@ struct msm_gpu {
bool hw_apriv;
 
struct thermal_cooling_device *cooling;
-
-   /* To poll for cx gdsc collapse during gpu recovery */
-   struct reset_control *cx_collapse;
 };
 
 static inline struct msm_gpu *dev_to_gpu(struct device *dev)
-- 
2.7.4



[Freedreno] [PATCH v4 2/5] clk: qcom: gdsc: Support 'synced_poweroff' genpd flag

2022-12-21 Thread Akhil P Oommen
Add support for the newly added 'synced_poweroff' genpd flag. This allows
some clients (like adreno gpu driver) to request gdsc driver to ensure
a votable gdsc (like gpucc cx gdsc) has collapsed at hardware.

Signed-off-by: Akhil P Oommen 
---

(no changes since v3)

Changes in v3:
- Rename the var 'force_sync' to 'wait (Stephen)

 drivers/clk/qcom/gdsc.c | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 9e4d6ce891aa..5358e28122ab 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -136,7 +136,8 @@ static int gdsc_update_collapse_bit(struct gdsc *sc, bool 
val)
return 0;
 }
 
-static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
+static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status,
+   bool wait)
 {
int ret;
 
@@ -149,7 +150,7 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum 
gdsc_status status)
ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF);
 
/* If disabling votable gdscs, don't poll on status */
-   if ((sc->flags & VOTABLE) && status == GDSC_OFF) {
+   if ((sc->flags & VOTABLE) && status == GDSC_OFF && !wait) {
/*
 * Add a short delay here to ensure that an enable
 * right after it was disabled does not put it in an
@@ -275,7 +276,7 @@ static int gdsc_enable(struct generic_pm_domain *domain)
gdsc_deassert_clamp_io(sc);
}
 
-   ret = gdsc_toggle_logic(sc, GDSC_ON);
+   ret = gdsc_toggle_logic(sc, GDSC_ON, false);
if (ret)
return ret;
 
@@ -352,7 +353,7 @@ static int gdsc_disable(struct generic_pm_domain *domain)
if (sc->pwrsts == PWRSTS_RET_ON)
return 0;
 
-   ret = gdsc_toggle_logic(sc, GDSC_OFF);
+   ret = gdsc_toggle_logic(sc, GDSC_OFF, domain->synced_poweroff);
if (ret)
return ret;
 
@@ -392,7 +393,7 @@ static int gdsc_init(struct gdsc *sc)
 
/* Force gdsc ON if only ON state is supported */
if (sc->pwrsts == PWRSTS_ON) {
-   ret = gdsc_toggle_logic(sc, GDSC_ON);
+   ret = gdsc_toggle_logic(sc, GDSC_ON, false);
if (ret)
return ret;
}
-- 
2.7.4



[Freedreno] [PATCH v4 3/5] drm/msm/a6xx: Vote for cx gdsc from gpu driver

2022-12-21 Thread Akhil P Oommen
When a device has multiple power domains, dev->power_domain is left
empty during probe. That didn't cause any issue so far because we are
freeloading on smmu driver's vote on cx gdsc. Instead of that, create
a device_link between cx genpd device and gmu device to keep a vote from
gpu driver.

Before this patch:
localhost ~ # cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
gx_gdsc on  0
/devices/genpd:1:3d6a000.gmuactive  0
cx_gdsc on  0
/devices/platform/soc@0/3da.iommu   active  0

After this patch:
localhost ~ # cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
gx_gdsc on  0
/devices/genpd:1:3d6a000.gmuactive  0
cx_gdsc on  0
/devices/platform/soc@0/3da.iommu   active  0
/devices/genpd:0:3d6a000.gmuactive  0

Signed-off-by: Akhil P Oommen 
---

(no changes since v1)

 drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 31 +++
 drivers/gpu/drm/msm/adreno/a6xx_gmu.h |  1 +
 2 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 6484b97c5344..1580d0090f35 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1479,6 +1479,12 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
 
pm_runtime_force_suspend(gmu->dev);
 
+   /*
+* Since cxpd is a virt device, the devlink with gmu-dev will be removed
+* automatically when we do detach
+*/
+   dev_pm_domain_detach(gmu->cxpd, false);
+
if (!IS_ERR_OR_NULL(gmu->gxpd)) {
pm_runtime_disable(gmu->gxpd);
dev_pm_domain_detach(gmu->gxpd, false);
@@ -1605,8 +1611,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct 
device_node *node)
 
if (adreno_is_a650_family(adreno_gpu)) {
gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
-   if (IS_ERR(gmu->rscc))
+   if (IS_ERR(gmu->rscc)) {
+   ret = -ENODEV;
goto err_mmio;
+   }
} else {
gmu->rscc = gmu->mmio + 0x23000;
}
@@ -1615,8 +1623,22 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct 
device_node *node)
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
 
-   if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
+   if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) {
+   ret = -ENODEV;
+   goto err_mmio;
+   }
+
+   gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
+   if (IS_ERR(gmu->cxpd)) {
+   ret = PTR_ERR(gmu->cxpd);
goto err_mmio;
+   }
+
+   if (!device_link_add(gmu->dev, gmu->cxpd,
+   DL_FLAG_PM_RUNTIME)) {
+   ret = -ENODEV;
+   goto detach_cxpd;
+   }
 
/*
 * Get a link to the GX power domain to reset the GPU in case of GMU
@@ -1634,6 +1656,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct 
device_node *node)
 
return 0;
 
+detach_cxpd:
+   dev_pm_domain_detach(gmu->cxpd, false);
+
 err_mmio:
iounmap(gmu->mmio);
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
@@ -1641,8 +1666,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct 
device_node *node)
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
 
-   ret = -ENODEV;
-
 err_memory:
a6xx_gmu_memory_free(gmu);
 err_put_device:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h 
b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index e034935b3986..5a42dd4dd31f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -56,6 +56,7 @@ struct a6xx_gmu {
int gmu_irq;
 
struct device *gxpd;
+   struct device *cxpd;
 
int idle_level;
 
-- 
2.7.4



[Freedreno] [PATCH v4 1/5] PM: domains: Allow a genpd consumer to require a synced power off

2022-12-21 Thread Akhil P Oommen
From: Ulf Hansson 

Some genpd providers doesn't ensure that it has turned off at hardware.
This is fine until the consumer really requires during some special
scenarios that the power domain collapse at hardware before it is
turned ON again.

An example is the reset sequence of Adreno GPU which requires that the
'gpucc cx gdsc' power domain should move to OFF state in hardware at
least once before turning in ON again to clear the internal state.

Signed-off-by: Ulf Hansson 
Signed-off-by: Akhil P Oommen 
---

Changes in v4:
- Update genpd function documentation (Ulf)

Changes in v2:
- Minor formatting fix

 drivers/base/power/domain.c | 26 ++
 include/linux/pm_domain.h   |  5 +
 2 files changed, 31 insertions(+)

diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 967bcf9d415e..84662d338188 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -519,6 +519,31 @@ ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
 
+/*
+ * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
+ *
+ * @dev: A device that is attached to the genpd.
+ *
+ * Allows a consumer of the genpd to notify the provider that the next power 
off
+ * should be synchronous.
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ */
+void dev_pm_genpd_synced_poweroff(struct device *dev)
+{
+   struct generic_pm_domain *genpd;
+
+   genpd = dev_to_genpd_safe(dev);
+   if (!genpd)
+   return;
+
+   genpd_lock(genpd);
+   genpd->synced_poweroff = true;
+   genpd_unlock(genpd);
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
+
 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 {
unsigned int state_idx = genpd->state_idx;
@@ -562,6 +587,7 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, 
bool timed)
 
 out:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
+   genpd->synced_poweroff = false;
return 0;
 err:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 1cd41bdf73cf..f776fb93eaa0 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -136,6 +136,7 @@ struct generic_pm_domain {
unsigned int prepared_count;/* Suspend counter of prepared devices 
*/
unsigned int performance_state; /* Aggregated max performance state */
cpumask_var_t cpus; /* A cpumask of the attached CPUs */
+   bool synced_poweroff;   /* A consumer needs a synced poweroff */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
@@ -235,6 +236,7 @@ int dev_pm_genpd_add_notifier(struct device *dev, struct 
notifier_block *nb);
 int dev_pm_genpd_remove_notifier(struct device *dev);
 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
+void dev_pm_genpd_synced_poweroff(struct device *dev);
 
 extern struct dev_power_governor simple_qos_governor;
 extern struct dev_power_governor pm_domain_always_on_gov;
@@ -300,6 +302,9 @@ static inline ktime_t dev_pm_genpd_get_next_hrtimer(struct 
device *dev)
 {
return KTIME_MAX;
 }
+static inline void dev_pm_genpd_synced_poweroff(struct device *dev)
+{ }
+
 #define simple_qos_governor(*(struct dev_power_governor *)(NULL))
 #define pm_domain_always_on_gov(*(struct dev_power_governor 
*)(NULL))
 #endif
-- 
2.7.4



[Freedreno] [PATCH v4 0/5] Improve GPU reset sequence for Adreno GPU

2022-12-21 Thread Akhil P Oommen


This is a rework of [1] using genpd instead of 'reset' framework.

As per the recommended reset sequence of Adreno gpu, we should ensure that
gpucc-cx-gdsc has collapsed at hardware to reset gpu's internal hardware states.
Because this gdsc is implemented as 'votable', gdsc driver doesn't poll and
wait until its hw status says OFF.

So use the newly introduced genpd api (dev_pm_genpd_synced_poweroff()) to
provide a hint to the gdsc driver to poll for the hw status and use genpd
notifier to wait from adreno gpu driver until gdsc is turned OFF.

This series is rebased on top of linux-next (20221215) since the changes span
multiple drivers.

[1] https://patchwork.freedesktop.org/series/107507/

Changes in v4:
- Update genpd function documentation (Ulf)

Changes in v3:
- Rename the var 'force_sync' to 'wait (Stephen)

Changes in v2:
- Minor formatting fix
- Select PM_GENERIC_DOMAINS from Kconfig

Akhil P Oommen (4):
  clk: qcom: gdsc: Support 'synced_poweroff' genpd flag
  drm/msm/a6xx: Vote for cx gdsc from gpu driver
  drm/msm/a6xx: Remove cx gdsc polling using 'reset'
  drm/msm/a6xx: Use genpd notifier to ensure cx-gdsc collapse

Ulf Hansson (1):
  PM: domains: Allow a genpd consumer to require a synced power off

 drivers/base/power/domain.c   | 26 
 drivers/clk/qcom/gdsc.c   | 11 +
 drivers/gpu/drm/msm/Kconfig   |  1 +
 drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 46 ---
 drivers/gpu/drm/msm/adreno/a6xx_gmu.h |  7 ++
 drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 13 +++---
 drivers/gpu/drm/msm/msm_gpu.c |  4 ---
 drivers/gpu/drm/msm/msm_gpu.h |  4 ---
 include/linux/pm_domain.h |  5 
 9 files changed, 97 insertions(+), 20 deletions(-)

-- 
2.7.4



[Freedreno] [PATCH v2 4/4] drm/msm/a6xx: Update ROQ size in coredump

2022-12-21 Thread Akhil P Oommen
Since RoQ size differs between generations, calculate dynamically the
RoQ size while capturing coredump.

Signed-off-by: Akhil P Oommen 
---

(no changes since v1)

 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 11 ++-
 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h | 17 ++---
 2 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index da190b6ddba0..80e60e34ce7d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -939,15 +939,24 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
dumper);
 }
 
+static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu)
+{
+   /* The value at [16:31] is in 4dword units. Convert it to dwords */
+   return gpu_read(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2) >> 14;
+}
+
 /* Read a block of data from an indexed register pair */
 static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
-   const struct a6xx_indexed_registers *indexed,
+   struct a6xx_indexed_registers *indexed,
struct a6xx_gpu_state_obj *obj)
 {
int i;
 
obj->handle = (const void *) indexed;
+   if (indexed->count_fn)
+   indexed->count = indexed->count_fn(gpu);
+
obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
if (!obj->data)
return;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 808121c88662..790f55e24533 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -383,25 +383,28 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = {
REGS(a6xx_gmu_gx_registers, 0, 0),
 };
 
-static const struct a6xx_indexed_registers {
+static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
+
+static struct a6xx_indexed_registers {
const char *name;
u32 addr;
u32 data;
u32 count;
+   u32 (*count_fn)(struct msm_gpu *gpu);
 } a6xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
-   REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
+   REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
-   REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+   REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
-   REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
+   REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
-   REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
+   REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
 };
 
-static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+static struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
-   REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
+   REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
 };
 
 #define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
-- 
2.7.4



[Freedreno] [PATCH v2 2/4] drm/msm: Fix failure paths in msm_drm_init()

2022-12-21 Thread Akhil P Oommen
Ensure that we do drm_dev_put() when there is an early return in
msm_drm_init().

Signed-off-by: Akhil P Oommen 
---

(no changes since v1)

 drivers/gpu/drm/msm/disp/msm_disp_snapshot.c |  3 +++
 drivers/gpu/drm/msm/msm_drv.c| 11 +++
 2 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c 
b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
index e75b97127c0d..b73031cd48e4 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -129,6 +129,9 @@ void msm_disp_snapshot_destroy(struct drm_device *drm_dev)
}
 
priv = drm_dev->dev_private;
+   if (!priv->kms)
+   return;
+
kms = priv->kms;
 
if (kms->dump_worker)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index eb5b056ce3f7..544e041dd710 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -149,6 +149,9 @@ static void msm_irq_uninstall(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
 
+   if (!priv->kms)
+   return;
+
kms->funcs->irq_uninstall(kms);
if (kms->irq_requested)
free_irq(kms->irq, dev);
@@ -265,8 +268,6 @@ static int msm_drm_uninit(struct device *dev)
component_unbind_all(dev, ddev);
 
ddev->dev_private = NULL;
-   drm_dev_put(ddev);
-
destroy_workqueue(priv->wq);
 
return 0;
@@ -441,12 +442,12 @@ static int msm_drm_init(struct device *dev, const struct 
drm_driver *drv)
 
ret = msm_init_vram(ddev);
if (ret)
-   return ret;
+   goto err_drm_dev_put;
 
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret)
-   return ret;
+   goto err_drm_dev_put;
 
dma_set_max_seg_size(dev, UINT_MAX);
 
@@ -541,6 +542,8 @@ static int msm_drm_init(struct device *dev, const struct 
drm_driver *drv)
 
 err_msm_uninit:
msm_drm_uninit(dev);
+err_drm_dev_put:
+   drm_dev_put(ddev);
return ret;
 }
 
-- 
2.7.4



[Freedreno] [PATCH v2 3/4] drm/msm/a6xx: Update a6xx gpu coredump

2022-12-21 Thread Akhil P Oommen
Update gpu coredump for a660/a650 family of gpus with the extra
information available.

Signed-off-by: Akhil P Oommen 
---

(no changes since v1)

 drivers/gpu/drm/msm/adreno/a6xx.xml.h   | 18 +++
 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 50 -
 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h | 49 +++-
 3 files changed, 108 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h 
b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index beea4a7fc1df..a92788019376 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -241,6 +241,9 @@ enum a6xx_shader_id {
A6XX_HLSQ_FRONTEND_META = 97,
A6XX_HLSQ_INDIRECT_META = 98,
A6XX_HLSQ_BACKEND_META = 99,
+   A6XX_SP_LB_6_DATA = 112,
+   A6XX_SP_LB_7_DATA = 113,
+   A6XX_HLSQ_INST_RAM_1 = 115,
 };
 
 enum a6xx_debugbus_id {
@@ -274,19 +277,32 @@ enum a6xx_debugbus_id {
A6XX_DBGBUS_HLSQ_SPTP = 31,
A6XX_DBGBUS_RB_0 = 32,
A6XX_DBGBUS_RB_1 = 33,
+   A6XX_DBGBUS_RB_2 = 34,
A6XX_DBGBUS_UCHE_WRAPPER = 36,
A6XX_DBGBUS_CCU_0 = 40,
A6XX_DBGBUS_CCU_1 = 41,
+   A6XX_DBGBUS_CCU_2 = 42,
A6XX_DBGBUS_VFD_0 = 56,
A6XX_DBGBUS_VFD_1 = 57,
A6XX_DBGBUS_VFD_2 = 58,
A6XX_DBGBUS_VFD_3 = 59,
+   A6XX_DBGBUS_VFD_4 = 60,
+   A6XX_DBGBUS_VFD_5 = 61,
A6XX_DBGBUS_SP_0 = 64,
A6XX_DBGBUS_SP_1 = 65,
+   A6XX_DBGBUS_SP_2 = 66,
A6XX_DBGBUS_TPL1_0 = 72,
A6XX_DBGBUS_TPL1_1 = 73,
A6XX_DBGBUS_TPL1_2 = 74,
A6XX_DBGBUS_TPL1_3 = 75,
+   A6XX_DBGBUS_TPL1_4 = 76,
+   A6XX_DBGBUS_TPL1_5 = 77,
+   A6XX_DBGBUS_SPTP_0 = 88,
+   A6XX_DBGBUS_SPTP_1 = 89,
+   A6XX_DBGBUS_SPTP_2 = 90,
+   A6XX_DBGBUS_SPTP_3 = 91,
+   A6XX_DBGBUS_SPTP_4 = 92,
+   A6XX_DBGBUS_SPTP_5 = 93,
 };
 
 enum a6xx_cp_perfcounter_select {
@@ -1071,6 +1087,8 @@ enum a6xx_tex_type {
 
 #define REG_A6XX_CP_MISC_CNTL  0x0840
 
+#define REG_A6XX_CP_CHICKEN_DBG
0x0841
+
 #define REG_A6XX_CP_APRIV_CNTL 0x0844
 
 #define REG_A6XX_CP_ROQ_THRESHOLDS_1   0x08c1
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index c61b233aff09..da190b6ddba0 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -385,6 +385,9 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
(a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
 
+   if (adreno_is_a650_family(to_adreno_gpu(gpu)))
+   nr_debugbus_blocks += ARRAY_SIZE(a650_debugbus_blocks);
+
a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
sizeof(*a6xx_state->debugbus));
 
@@ -411,6 +414,15 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
 
a6xx_state->nr_debugbus += 1;
}
+
+
+   if (adreno_is_a650_family(to_adreno_gpu(gpu))) {
+   for (i = 0; i < ARRAY_SIZE(a650_debugbus_blocks); i++)
+   a6xx_get_debugbus_block(gpu,
+   a6xx_state,
+   &a650_debugbus_blocks[i],
+   &a6xx_state->debugbus[i]);
+   }
}
 
/*  Dump the VBIF debugbus on applicable targets */
@@ -524,10 +536,21 @@ static void a6xx_get_cluster(struct msm_gpu *gpu,
struct a6xx_gpu_state_obj *obj,
struct a6xx_crashdumper *dumper)
 {
+   struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
u64 *in = dumper->ptr;
u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
size_t datasize;
int i, regcount = 0;
+   u32 id = cluster->id;
+
+   /* Skip registers that are not present on older generation */
+   if (!adreno_is_a660_family(adreno_gpu) &&
+   cluster->registers == a660_fe_cluster)
+   return;
+
+   if (adreno_is_a650_family(adreno_gpu) &&
+   cluster->registers == a6xx_ps_cluster)
+   id = CLUSTER_VPC_PS;
 
/* Some clusters need a selector register to be programmed too */
if (cluster->sel_reg)
@@ -537,7 +560,7 @@ static void a6xx_get_cluster(struct msm_gpu *gpu,
int j;
 
in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
-   (cluster->id << 8) | (i << 4) | i);
+   (id << 8) | (i << 4) | i);
 
for (j = 0; j < cluster->count; j += 2) {
int count = RANGE(cluster->registers, j);
@@ -687,6 +710,11 @@ static void a6xx_get_crashdumper_registers(struct 

[Freedreno] [PATCH v2 1/4] drm/msm/adreno: Fix null ptr access in adreno_gpu_cleanup()

2022-12-21 Thread Akhil P Oommen
Fix the below kernel panic due to null pointer access:
[   18.504431] Unable to handle kernel NULL pointer dereference at virtual 
address 0048
[   18.513464] Mem abort info:
[   18.516346]   ESR = 0x9605
[   18.520204]   EC = 0x25: DABT (current EL), IL = 32 bits
[   18.525706]   SET = 0, FnV = 0
[   18.528878]   EA = 0, S1PTW = 0
[   18.532117]   FSC = 0x05: level 1 translation fault
[   18.537138] Data abort info:
[   18.540110]   ISV = 0, ISS = 0x0005
[   18.544060]   CM = 0, WnR = 0
[   18.547109] user pgtable: 4k pages, 39-bit VAs, pgdp=000112826000
[   18.553738] [0048] pgd=, p4d=, 
pud=
[   18.562690] Internal error: Oops: 9605 [#1] PREEMPT SMP
**Snip**
[   18.696758] Call trace:
[   18.699278]  adreno_gpu_cleanup+0x30/0x88
[   18.703396]  a6xx_destroy+0xc0/0x130
[   18.707066]  a6xx_gpu_init+0x308/0x424
[   18.710921]  adreno_bind+0x178/0x288
[   18.714590]  component_bind_all+0xe0/0x214
[   18.718797]  msm_drm_bind+0x1d4/0x614
[   18.722566]  try_to_bring_up_aggregate_device+0x16c/0x1b8
[   18.728105]  __component_add+0xa0/0x158
[   18.732048]  component_add+0x20/0x2c
[   18.735719]  adreno_probe+0x40/0xc0
[   18.739300]  platform_probe+0xb4/0xd4
[   18.743068]  really_probe+0xfc/0x284
[   18.746738]  __driver_probe_device+0xc0/0xec
[   18.751129]  driver_probe_device+0x48/0x110
[   18.755421]  __device_attach_driver+0xa8/0xd0
[   18.759900]  bus_for_each_drv+0x90/0xdc
[   18.763843]  __device_attach+0xfc/0x174
[   18.767786]  device_initial_probe+0x20/0x2c
[   18.772090]  bus_probe_device+0x40/0xa0
[   18.776032]  deferred_probe_work_func+0x94/0xd0
[   18.780686]  process_one_work+0x190/0x3d0
[   18.784805]  worker_thread+0x280/0x3d4
[   18.788659]  kthread+0x104/0x1c0
[   18.791981]  ret_from_fork+0x10/0x20
[   18.795654] Code: f9400408 aa0003f3 aa1f03f4 91142015 (f9402516)
[   18.801913] ---[ end trace  ]---
[   18.809039] Kernel panic - not syncing: Oops: Fatal exception

Fixes: 17e822f7591f ("drm/msm: fix unbalanced pm_runtime_enable in 
adreno_gpu_{init, cleanup}")
Signed-off-by: Akhil P Oommen 
---

Changes in v2:
- Added 'Fixes' tag (Dan Carpenter)

 drivers/gpu/drm/msm/adreno/adreno_gpu.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c 
b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 382fb7f9e497..118d07e5c66c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -1073,13 +1073,13 @@ int adreno_gpu_init(struct drm_device *drm, struct 
platform_device *pdev,
 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
 {
struct msm_gpu *gpu = &adreno_gpu->base;
-   struct msm_drm_private *priv = gpu->dev->dev_private;
+   struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
unsigned int i;
 
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
 
-   if (pm_runtime_enabled(&priv->gpu_pdev->dev))
+   if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
pm_runtime_disable(&priv->gpu_pdev->dev);
 
msm_gpu_cleanup(&adreno_gpu->base);
-- 
2.7.4



Re: [Freedreno] [PATCH v3 1/5] PM: domains: Allow a genpd consumer to require a synced power off

2022-12-21 Thread Ulf Hansson
On Tue, 20 Dec 2022 at 08:44, Akhil P Oommen  wrote:
>
> From: Ulf Hansson 
>
> Some genpd providers doesn't ensure that it has turned off at hardware.
> This is fine until the consumer really requires during some special
> scenarios that the power domain collapse at hardware before it is
> turned ON again.
>
> An example is the reset sequence of Adreno GPU which requires that the
> 'gpucc cx gdsc' power domain should move to OFF state in hardware at
> least once before turning in ON again to clear the internal state.
>
> Signed-off-by: Ulf Hansson 
> Signed-off-by: Akhil P Oommen 
> ---
>
> (no changes since v2)
>
> Changes in v2:
> - Minor formatting fix
>
>  drivers/base/power/domain.c | 23 +++
>  include/linux/pm_domain.h   |  5 +
>  2 files changed, 28 insertions(+)
>
> diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
> index 967bcf9d415e..53524a102321 100644
> --- a/drivers/base/power/domain.c
> +++ b/drivers/base/power/domain.c
> @@ -519,6 +519,28 @@ ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
>  }
>  EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
>
> +/*
> + * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
> + *
> + * @dev: A device that is attached to the genpd.
> + *
> + * Allows a consumer of the genpd to notify the provider that the next power 
> off
> + * should be synchronous.

Nitpick; similar to other dev_pm_genpd_* function-descriptions, I
think it's important to add the below information.

"It is assumed that the users guarantee that the genpd wouldn't be
detached while this routine is getting called."

Can you please add that?

> + */
> +void dev_pm_genpd_synced_poweroff(struct device *dev)
> +{
> +   struct generic_pm_domain *genpd;
> +
> +   genpd = dev_to_genpd_safe(dev);
> +   if (!genpd)
> +   return;
> +
> +   genpd_lock(genpd);
> +   genpd->synced_poweroff = true;
> +   genpd_unlock(genpd);
> +}
> +EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
> +
>  static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
>  {
> unsigned int state_idx = genpd->state_idx;
> @@ -562,6 +584,7 @@ static int _genpd_power_on(struct generic_pm_domain 
> *genpd, bool timed)
>
>  out:
> raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, 
> NULL);
> +   genpd->synced_poweroff = false;
> return 0;
>  err:
> raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
> diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
> index 1cd41bdf73cf..f776fb93eaa0 100644
> --- a/include/linux/pm_domain.h
> +++ b/include/linux/pm_domain.h
> @@ -136,6 +136,7 @@ struct generic_pm_domain {
> unsigned int prepared_count;/* Suspend counter of prepared 
> devices */
> unsigned int performance_state; /* Aggregated max performance state */
> cpumask_var_t cpus; /* A cpumask of the attached CPUs */
> +   bool synced_poweroff;   /* A consumer needs a synced poweroff 
> */
> int (*power_off)(struct generic_pm_domain *domain);
> int (*power_on)(struct generic_pm_domain *domain);
> struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
> @@ -235,6 +236,7 @@ int dev_pm_genpd_add_notifier(struct device *dev, struct 
> notifier_block *nb);
>  int dev_pm_genpd_remove_notifier(struct device *dev);
>  void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
>  ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
> +void dev_pm_genpd_synced_poweroff(struct device *dev);
>
>  extern struct dev_power_governor simple_qos_governor;
>  extern struct dev_power_governor pm_domain_always_on_gov;
> @@ -300,6 +302,9 @@ static inline ktime_t 
> dev_pm_genpd_get_next_hrtimer(struct device *dev)
>  {
> return KTIME_MAX;
>  }
> +static inline void dev_pm_genpd_synced_poweroff(struct device *dev)
> +{ }
> +
>  #define simple_qos_governor(*(struct dev_power_governor *)(NULL))
>  #define pm_domain_always_on_gov(*(struct dev_power_governor 
> *)(NULL))
>  #endif
> --
> 2.7.4
>

Kind regards
Uffe


Re: [Freedreno] [PATCH v9 01/15] drm/msm/disp/dpu: clear dpu_assign_crtc and get crtc from connector state instead of dpu_enc

2022-12-21 Thread Vinod Polimera


> -Original Message-
> From: Dmitry Baryshkov 
> Sent: Wednesday, December 14, 2022 9:05 PM
> To: Vinod Polimera (QUIC) ; dri-
> de...@lists.freedesktop.org; linux-arm-...@vger.kernel.org;
> freedreno@lists.freedesktop.org; devicet...@vger.kernel.org
> Cc: linux-ker...@vger.kernel.org; robdcl...@gmail.com;
> diand...@chromium.org; swb...@chromium.org; Kalyan Thota (QUIC)
> ; Kuogee Hsieh (QUIC)
> ; Vishnuvardhan Prodduturi (QUIC)
> ; Bjorn Andersson (QUIC)
> ; Aravind Venkateswaran (QUIC)
> ; Abhinav Kumar (QUIC)
> ; Sankeerth Billakanti (QUIC)
> 
> Subject: Re: [PATCH v9 01/15] drm/msm/disp/dpu: clear dpu_assign_crtc and
> get crtc from connector state instead of dpu_enc
> 
> WARNING: This email originated from outside of Qualcomm. Please be wary
> of any links or attachments, and do not enable macros.
> 
> On 14/12/2022 12:05, Vinod Polimera wrote:
> > Update crtc retrieval from dpu_enc to dpu_enc connector state,
> > since new links get set as part of the dpu enc virt mode set.
> > The dpu_enc->crtc cache is no more needed, hence cleaning it as
> > part of this change.
> >
> > This patch is dependent on the series:
> > https://patchwork.freedesktop.org/series/110969/
> >
> > Signed-off-by: Vinod Polimera 
> > ---
> >   drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c|  4 ---
> >   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 42 +-
> ---
> >   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h |  8 --
> >   3 files changed, 13 insertions(+), 41 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
> > index 3f72d38..289d51e 100644
> > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
> > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
> > @@ -1029,7 +1029,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
> >*/
> >   if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
> >   release_bandwidth = true;
> > - dpu_encoder_assign_crtc(encoder, NULL);
> >   }
> >
> >   /* wait for frame_event_done completion */
> > @@ -1099,9 +1098,6 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
> >   trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
> >   dpu_crtc->enabled = true;
> >
> > - drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state-
> >encoder_mask)
> > - dpu_encoder_assign_crtc(encoder, crtc);
> > -
> >   /* Enable/restore vblank irq handling */
> >   drm_crtc_vblank_on(crtc);
> >   }
> > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> > index a585036..b9b254d 100644
> > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> > @@ -132,11 +132,6 @@ enum dpu_enc_rc_states {
> >* @intfs_swapped:  Whether or not the phys_enc interfaces have been
> swapped
> >*  for partial update right-only cases, such as pingpong
> >*  split where virtual pingpong does not generate IRQs
> > - * @crtc:Pointer to the currently assigned crtc. Normally you
> > - *   would use crtc->state->encoder_mask to determine the
> > - *   link between encoder/crtc. However in this case we 
> > need
> > - *   to track crtc in the disable() hook which is called
> > - *   _after_ encoder_mask is cleared.
> >* @connector:  If a mode is set, cached pointer to the active
> connector
> >* @crtc_kickoff_cb:Callback into CRTC that will flush & 
> > start
> >*  all CTL paths
> > @@ -181,7 +176,6 @@ struct dpu_encoder_virt {
> >
> >   bool intfs_swapped;
> >
> > - struct drm_crtc *crtc;
> >   struct drm_connector *connector;
> >
> >   struct dentry *debugfs_root;
> > @@ -1317,7 +1311,7 @@ static void dpu_encoder_vblank_callback(struct
> drm_encoder *drm_enc,
> >   struct dpu_encoder_phys *phy_enc)
> >   {
> >   struct dpu_encoder_virt *dpu_enc = NULL;
> > - unsigned long lock_flags;
> > + struct drm_crtc *crtc;
> >
> >   if (!drm_enc || !phy_enc)
> >   return;
> > @@ -1325,12 +1319,13 @@ static void dpu_encoder_vblank_callback(struct
> drm_encoder *drm_enc,
> >   DPU_ATRACE_BEGIN("encoder_vblank_callback");
> >   dpu_enc = to_dpu_encoder_virt(drm_enc);
> >
> > - atomic_inc(&phy_enc->vsync_cnt);
> > + if (!dpu_enc->connector || !dpu_enc->connector->state ||
> > + !dpu_enc->connector->state->crtc)
> > + return;
> >
> > - spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
> > - if (dpu_enc->crtc)
> > - dpu_crtc_vblank_callback(dpu_enc->crtc);
> > - spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
> > + atomic_inc(&phy_enc->vsync_cnt);
> > + crtc = dpu_enc->connector->state->crtc;
> > + dpu_crtc_vblank_callback(crtc);
> >