On Tue, Nov 02, 2021 at 12:32:14PM -0700, José Roberto de Souza wrote:
> Changing the buffer in the middle of the scanout then entering an
> period of flip idleness will cause part of the previous buffer being
> diplayed to user when PSR is enabled.
> 
> So here disabling PSR and scheduling activation during the next
> sync flip.
> 
> The async flip check that we had in PSR compute is not executed at
> every flip so it was not doing anything useful and is also being
> dropped here.
> 
> v2:
> - scheduling the PSR work in _intel_psr_post_plane_update()
> 
> v3:
> - only re enabling PSR when doing a sync flip
> 
> Cc: Karthik B S <karthik....@intel.com>
> Cc: Vandita Kulkarni <vandita.kulka...@intel.com>
> Cc: Ville Syrjälä <ville.syrj...@linux.intel.com>
> Cc: Rodrigo Vivi <rodrigo.v...@intel.com>
> Signed-off-by: José Roberto de Souza <jose.so...@intel.com>
> ---
>  drivers/gpu/drm/i915/display/intel_psr.c | 37 ++++++++++++++----------
>  1 file changed, 21 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_psr.c 
> b/drivers/gpu/drm/i915/display/intel_psr.c
> index 9d589d471e335..b8fac53d57df1 100644
> --- a/drivers/gpu/drm/i915/display/intel_psr.c
> +++ b/drivers/gpu/drm/i915/display/intel_psr.c
> @@ -731,12 +731,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct 
> intel_dp *intel_dp,
>               return false;
>       }
>  
> -     if (crtc_state->uapi.async_flip) {
> -             drm_dbg_kms(&dev_priv->drm,
> -                         "PSR2 sel fetch not enabled, async flip enabled\n");
> -             return false;
> -     }
> -
>       /* Wa_14010254185 Wa_14010103792 */
>       if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
>               drm_dbg_kms(&dev_priv->drm,
> @@ -1780,36 +1774,47 @@ void intel_psr_pre_plane_update(struct 
> intel_atomic_state *state,
>               if (psr->enabled && needs_to_disable)
>                       intel_psr_disable_locked(intel_dp);
>  
> +             if (psr->enabled && crtc_state->uapi.async_flip)
> +                     intel_psr_exit(intel_dp);
> +
>               mutex_unlock(&psr->lock);
>       }
>  }
>  
>  static void _intel_psr_post_plane_update(const struct intel_atomic_state 
> *state,
> -                                      const struct intel_crtc_state 
> *crtc_state)
> +                                      const struct intel_crtc_state 
> *old_crtc_state,
> +                                      const struct intel_crtc_state 
> *new_crtc_state)

Might make sense to change this to match how psr_pre_plane_update()
works these days.

>  {
>       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
>       struct intel_encoder *encoder;
>  
> -     if (!crtc_state->has_psr)
> +     if (!new_crtc_state->has_psr)
>               return;
>  
>       for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
> -                                          crtc_state->uapi.encoder_mask) {
> +                                          new_crtc_state->uapi.encoder_mask) 
> {
>               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
>               struct intel_psr *psr = &intel_dp->psr;
>  
>               mutex_lock(&psr->lock);
>  
> -             drm_WARN_ON(&dev_priv->drm, psr->enabled && 
> !crtc_state->active_planes);
> +             drm_WARN_ON(&dev_priv->drm, psr->enabled &&
> +                         !new_crtc_state->active_planes);
>  
>               /* Only enable if there is active planes */
> -             if (!psr->enabled && crtc_state->active_planes)
> -                     intel_psr_enable_locked(intel_dp, crtc_state);
> +             if (!psr->enabled && new_crtc_state->active_planes)
> +                     intel_psr_enable_locked(intel_dp, new_crtc_state);

What prevents this guy from activating PSR while we're doing
an async flip?

>  
>               /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
> -             if (crtc_state->crc_enabled && psr->enabled)
> +             if (new_crtc_state->crc_enabled && psr->enabled)
>                       psr_force_hw_tracking_exit(intel_dp);
>  
> +             /* Only re enabling PSR when doing a sync flip */
> +             if (psr->enabled && !psr->active &&
> +                 old_crtc_state->uapi.async_flip &&
> +                 !new_crtc_state->uapi.async_flip)
> +                     schedule_work(&intel_dp->psr.work);
> +
>               mutex_unlock(&psr->lock);
>       }
>  }
> @@ -1817,15 +1822,15 @@ static void _intel_psr_post_plane_update(const struct 
> intel_atomic_state *state,
>  void intel_psr_post_plane_update(const struct intel_atomic_state *state)
>  {
>       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
> -     struct intel_crtc_state *crtc_state;
> +     struct intel_crtc_state *old_crtc_state, *new_crtc_state;
>       struct intel_crtc *crtc;
>       int i;
>  
>       if (!HAS_PSR(dev_priv))
>               return;
>  
> -     for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
> -             _intel_psr_post_plane_update(state, crtc_state);
> +     for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 
> new_crtc_state, i)
> +             _intel_psr_post_plane_update(state, old_crtc_state, 
> new_crtc_state);
>  }
>  
>  static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
> -- 
> 2.33.1

-- 
Ville Syrjälä
Intel

Reply via email to