It is a fact that scheduled work is now improved.

But it is also a fact that on core platforms that shouldn't
be needed. We only need to actually wait on VLV/CHV.

The immediate enabling is actually not an issue for the
HW perspective for core platforms that have HW tracking.
HW will wait few identical idle frames before transitioning
to actual psr active anyways.

Note that this patch also remove the delayed activation
on HSW and BDW introduced by commit 'd0ac896a477d
("drm/i915: Delay first PSR activation.")'. This was
introduced to fix a blank screen on VLV/CHV and also
masked some frozen screens on other core platforms.
Probably the same that we are now properly hunting and fixing.

Furthermore, if we stop using delayed activation on core
platforms we will be able, on following up patches,
to use available workarounds to make HW tracking properly
exit PSR instead of the big nuke of disabling psr and
re-enable on exit and activate respectively.
At least on few reliable cases.

Cc: Dhinakaran Pandiyan <dhinakaran.pandi...@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.v...@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c | 14 +++++++-------
 drivers/gpu/drm/i915/intel_psr.c    | 27 +++++++++++++++++++--------
 2 files changed, 26 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index da80ee16a3cf..541290c307c7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2522,18 +2522,18 @@ static int i915_edp_psr_status(struct seq_file *m, void 
*data)
        seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
                   dev_priv->psr.busy_frontbuffer_bits);
 
-       if (timer_pending(&dev_priv->psr.activate_timer))
-               seq_printf(m, "Activate scheduled: yes, in %dms\n",
-                          
jiffies_to_msecs(dev_priv->psr.activate_timer.expires - jiffies));
-       else
-               seq_printf(m, "Activate scheduled: no\n");
-
-       if (HAS_DDI(dev_priv)) {
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
                if (dev_priv->psr.psr2_support)
                        enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
                else
                        enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
        } else {
+               if (timer_pending(&dev_priv->psr.activate_timer))
+                       seq_printf(m, "Activate scheduled: yes, in %dms\n",
+                                  
jiffies_to_msecs(dev_priv->psr.activate_timer.expires - jiffies));
+               else
+                       seq_printf(m, "Activate scheduled: no\n");
+
                for_each_pipe(dev_priv, pipe) {
                        enum transcoder cpu_transcoder =
                                intel_pipe_to_cpu_transcoder(dev_priv, pipe);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 826b480841ac..13409c6301e8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -455,6 +455,8 @@ static void intel_psr_schedule(struct drm_i915_private 
*i915,
 {
        unsigned long next;
 
+       WARN_ON(!IS_VALLEYVIEW(i915) && !IS_CHERRYVIEW(i915));
+
        lockdep_assert_held(&i915->psr.lock);
 
        /*
@@ -543,7 +545,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
        dev_priv->psr.enable_source(intel_dp, crtc_state);
        dev_priv->psr.enabled = intel_dp;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
                intel_psr_activate(intel_dp);
        } else {
                /*
@@ -553,8 +555,6 @@ void intel_psr_enable(struct intel_dp *intel_dp,
                 * However on some platforms we face issues when first
                 * activation follows a modeset so quickly.
                 *     - On VLV/CHV we get bank screen on first activation
-                *     - On HSW/BDW we get a recoverable frozen screen until
-                *       next exit-activate sequence.
                 */
                intel_psr_schedule(dev_priv,
                                   intel_dp->panel_power_cycle_delay * 5);
@@ -687,6 +687,8 @@ static void intel_psr_work(struct work_struct *work)
        struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
+       WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv));
+
        /* We have to make sure PSR is ready for re-enable
         * otherwise it keeps disabled until next full enable/disable cycle.
         * PSR might take some time to get fully disabled
@@ -757,6 +759,8 @@ static void intel_psr_timer_fn(struct timer_list *timer)
        struct drm_i915_private *i915 =
                from_timer(i915, timer, psr.activate_timer);
 
+       WARN_ON(!IS_VALLEYVIEW(i915) && !IS_CHERRYVIEW(i915));
+
        /*
         * We need a non-atomic context to activate PSR.  Using
         * delayed_work wouldn't be an improvement -- delayed_work is
@@ -945,9 +949,12 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
        if (frontbuffer_bits)
                intel_psr_exit(dev_priv);
 
-       if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
-               intel_psr_schedule(dev_priv, 100);
-
+       if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) {
+               if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                       intel_psr_schedule(dev_priv, 100);
+               else
+                       intel_psr_activate(dev_priv->psr.enabled);
+       }
        mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -994,8 +1001,12 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
                dev_priv->psr.link_standby = false;
        }
 
-       timer_setup(&dev_priv->psr.activate_timer, intel_psr_timer_fn, 0);
-       INIT_WORK(&dev_priv->psr.activate_work, intel_psr_work);
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               timer_setup(&dev_priv->psr.activate_timer,
+                           intel_psr_timer_fn, 0);
+               INIT_WORK(&dev_priv->psr.activate_work, intel_psr_work);
+       }
+
        mutex_init(&dev_priv->psr.lock);
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-- 
2.13.6

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to