On Wed, Aug 23, 2023 at 10:07:30AM -0700, Lucas De Marchi wrote:
> From: Stanislav Lisovskiy <[email protected]>
> 
> In Lunar Lake we now separate MDCLK from CDLCK, which used to be before
> always 2 times CDCLK.  Now we might afford lower CDCLK, while having
> higher memory clock, so improving bandwidth and power consumption at the
> same time.  This is prep work required to enable that.
> 
> Signed-off-by: Stanislav Lisovskiy <[email protected]>
> Signed-off-by: Lucas De Marchi <[email protected]>
> ---
>  drivers/gpu/drm/i915/display/intel_cdclk.c | 30 ++++++++++++++++++++++
>  drivers/gpu/drm/i915/display/intel_cdclk.h |  2 +-
>  2 files changed, 31 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c 
> b/drivers/gpu/drm/i915/display/intel_cdclk.c
> index fdd8d04fe12c..3e566f45996d 100644
> --- a/drivers/gpu/drm/i915/display/intel_cdclk.c
> +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
> @@ -1223,6 +1223,7 @@ static void skl_cdclk_uninit_hw(struct drm_i915_private 
> *dev_priv)
>  
>  struct intel_cdclk_vals {
>       u32 cdclk;
> +     u32 mdclk;
>       u16 refclk;
>       u16 waveform;
>       u8 divider;     /* CD2X divider * 2 */
> @@ -1524,6 +1525,8 @@ static void bxt_de_pll_readout(struct drm_i915_private 
> *dev_priv,
>  static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
>                         struct intel_cdclk_config *cdclk_config)
>  {
> +     const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
> +     int i, ratio, tbl_waveform = 0;
>       u32 squash_ctl = 0;
>       u32 divider;
>       int div;
> @@ -1574,10 +1577,36 @@ static void bxt_get_cdclk(struct drm_i915_private 
> *dev_priv,
>  
>               cdclk_config->cdclk = DIV_ROUND_CLOSEST(hweight16(waveform) *
>                                                       cdclk_config->vco, size 
> * div);
> +             tbl_waveform = squash_ctl & CDCLK_SQUASH_WAVEFORM_MASK;
>       } else {
>               cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div);
>       }
>  
> +     ratio = cdclk_config->vco / cdclk_config->ref;
> +
> +     for (i = 0; table[i].refclk; i++) {
> +             if (table[i].refclk != cdclk_config->ref)
> +                     continue;
> +
> +             if (table[i].divider != div)
> +                     continue;
> +
> +             if (table[i].waveform != tbl_waveform)
> +                     continue;
> +
> +             if (table[i].ratio != ratio)
> +                     continue;
> +
> +             /*
> +              * Supported from LunarLake HW onwards, however considering that
> +              * besides this the whole procedure is the same, we keep this
> +              * for all the platforms.
> +              */
> +             cdclk_config->mdclk = table[i].mdclk;
> +
> +             break;
> +     }

I might be misunderstanding something, but from bspec 68861, is looks
like the mdclk frequency is always just "ratio * refclk."  Which is the
value we already have stored in cdclk_config->vco.  Do we need to do
this extra lookup or track this value separately?


Matt

> +
>   out:
>       /*
>        * Can't read this out :( Let's assume it's
> @@ -2191,6 +2220,7 @@ bool intel_cdclk_needs_modeset(const struct 
> intel_cdclk_config *a,
>                              const struct intel_cdclk_config *b)
>  {
>       return a->cdclk != b->cdclk ||
> +             a->mdclk != b->mdclk ||
>               a->vco != b->vco ||
>               a->ref != b->ref;
>  }
> diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h 
> b/drivers/gpu/drm/i915/display/intel_cdclk.h
> index 48fd7d39e0cd..3e7eabd4d7b6 100644
> --- a/drivers/gpu/drm/i915/display/intel_cdclk.h
> +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
> @@ -16,7 +16,7 @@ struct intel_atomic_state;
>  struct intel_crtc_state;
>  
>  struct intel_cdclk_config {
> -     unsigned int cdclk, vco, ref, bypass;
> +     unsigned int cdclk, mdclk, vco, ref, bypass;
>       u8 voltage_level;
>  };
>  
> -- 
> 2.40.1
> 

-- 
Matt Roper
Graphics Software Engineer
Linux GPU Platform Enablement
Intel Corporation

Reply via email to