From: Ville Syrjälä <[email protected]>

On CTG+ read out the pipe bpp setting from hardware and fill it into
pipe config. Also check it appropriately.

v2: Don't do the pipe_bpp extraction inside the PCH only code block on
    ILK+.
    Avoid the PIPECONF read as we already have read it for the
    PIPECONF_EANBLE check.

Signed-off-by: Ville Syrjälä <[email protected]>
Reviewed-by: Jani Nikula <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>

---

This is already in drm-intel-next-queued as
commit 42571aefafb1d330ef84eb29418832f72e7dfb4c
Author: Ville Syrjälä <[email protected]>
Date:   Fri Sep 6 23:29:00 2013 +0300

    drm/i915: Add support for pipe_bpp readout

but is needed for patch 2/2.
---
 drivers/gpu/drm/i915/intel_ddi.c     |   17 ++++++++++++++++
 drivers/gpu/drm/i915/intel_display.c |   36 ++++++++++++++++++++++++++++++++++
 2 files changed, 53 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63de270..beb7f65 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1268,6 +1268,23 @@ static void intel_ddi_get_config(struct intel_encoder 
*encoder,
                flags |= DRM_MODE_FLAG_NVSYNC;
 
        pipe_config->adjusted_mode.flags |= flags;
+
+       switch (temp & TRANS_DDI_BPC_MASK) {
+       case TRANS_DDI_BPC_6:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case TRANS_DDI_BPC_8:
+               pipe_config->pipe_bpp = 24;
+               break;
+       case TRANS_DDI_BPC_10:
+               pipe_config->pipe_bpp = 30;
+               break;
+       case TRANS_DDI_BPC_12:
+               pipe_config->pipe_bpp = 36;
+               break;
+       default:
+               break;
+       }
 }
 
 static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
index 581fb4b..725f0be 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4983,6 +4983,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        if (!(tmp & PIPECONF_ENABLE))
                return false;
 
+       if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+               switch (tmp & PIPECONF_BPC_MASK) {
+               case PIPECONF_6BPC:
+                       pipe_config->pipe_bpp = 18;
+                       break;
+               case PIPECONF_8BPC:
+                       pipe_config->pipe_bpp = 24;
+                       break;
+               case PIPECONF_10BPC:
+                       pipe_config->pipe_bpp = 30;
+                       break;
+               default:
+                       break;
+               }
+       }
+
        intel_get_pipe_timings(crtc, pipe_config);
 
        i9xx_get_pfit_config(crtc, pipe_config);
@@ -5881,6 +5897,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc 
*crtc,
        if (!(tmp & PIPECONF_ENABLE))
                return false;
 
+       switch (tmp & PIPECONF_BPC_MASK) {
+       case PIPECONF_6BPC:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case PIPECONF_8BPC:
+               pipe_config->pipe_bpp = 24;
+               break;
+       case PIPECONF_10BPC:
+               pipe_config->pipe_bpp = 30;
+               break;
+       case PIPECONF_12BPC:
+               pipe_config->pipe_bpp = 36;
+               break;
+       default:
+               break;
+       }
+
        if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
                struct intel_shared_dpll *pll;
 
@@ -8612,6 +8645,9 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 
+       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+               PIPE_CONF_CHECK_I(pipe_bpp);
+
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_FLAGS
-- 
1.7.9.5

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to