The workarounds that are applied are exported to a debugfs file;
this is used to verify their state after the test case (reset or
suspend/resume etc). This patch is only required to support i-g-t.

Signed-off-by: Arun Siluvery <arun.siluv...@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c     | 40 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_drv.h         | 14 ++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.c | 23 +++++++++++++++++++
 3 files changed, 77 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index d42db6b..f0d63f6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2451,20 +2451,59 @@ static int i915_shared_dplls_info(struct seq_file *m, 
void *unused)
                seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
                seq_printf(m, " fp0:     0x%08x\n", pll->hw_state.fp0);
                seq_printf(m, " fp1:     0x%08x\n", pll->hw_state.fp1);
                seq_printf(m, " wrpll:   0x%08x\n", pll->hw_state.wrpll);
        }
        drm_modeset_unlock_all(dev);
 
        return 0;
 }
 
+static int intel_wa_registers(struct seq_file *m, void *unused)
+{
+       int i;
+       int ret;
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!IS_BROADWELL(dev)) {
+               DRM_DEBUG_DRIVER("Workaround table not available !!\n");
+               return -EINVAL;
+       }
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       intel_runtime_pm_get(dev_priv);
+
+       seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
+       for (i = 0; i < dev_priv->num_wa_regs; ++i) {
+               u32 addr, mask;
+
+               addr = dev_priv->intel_wa_regs[i].addr;
+               mask = dev_priv->intel_wa_regs[i].mask;
+               dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
+               if (dev_priv->intel_wa_regs[i].addr)
+                       seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+                                  dev_priv->intel_wa_regs[i].addr,
+                                  dev_priv->intel_wa_regs[i].value,
+                                  dev_priv->intel_wa_regs[i].mask);
+       }
+
+       intel_runtime_pm_put(dev_priv);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
 struct pipe_crc_info {
        const char *name;
        struct drm_device *dev;
        enum pipe pipe;
 };
 
 static int i915_dp_mst_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
@@ -3980,20 +4019,21 @@ static const struct drm_info_list i915_debugfs_list[] = 
{
        {"i915_llc", i915_llc, 0},
        {"i915_edp_psr_status", i915_edp_psr_status, 0},
        {"i915_sink_crc_eDP1", i915_sink_crc, 0},
        {"i915_energy_uJ", i915_energy_uJ, 0},
        {"i915_pc8_status", i915_pc8_status, 0},
        {"i915_power_domain_info", i915_power_domain_info, 0},
        {"i915_display_info", i915_display_info, 0},
        {"i915_semaphore_status", i915_semaphore_status, 0},
        {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
        {"i915_dp_mst_info", i915_dp_mst_info, 0},
+       {"intel_wa_registers", intel_wa_registers, 0}
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
 static const struct i915_debugfs_files {
        const char *name;
        const struct file_operations *fops;
 } i915_debugfs_files[] = {
        {"i915_wedged", &i915_wedged_fops},
        {"i915_max_freq", &i915_max_freq_fops},
        {"i915_min_freq", &i915_min_freq_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bcf79f0..49b7be7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1546,20 +1546,34 @@ struct drm_i915_private {
        wait_queue_head_t pending_flip_queue;
 
 #ifdef CONFIG_DEBUG_FS
        struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
 #endif
 
        int num_shared_dpll;
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
        int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
+       /*
+        * workarounds are currently applied at different places and
+        * changes are being done to consolidate them so exact count is
+        * not clear at this point, use a max value for now.
+        */
+#define I915_MAX_WA_REGS  16
+       struct {
+               u32 addr;
+               u32 value;
+               /* bitmask representing WA bits */
+               u32 mask;
+       } intel_wa_regs[I915_MAX_WA_REGS];
+       u32 num_wa_regs;
+
        /* Reclocking support */
        bool render_reclock_avail;
        bool lvds_downclock_avail;
        /* indicates the reduced downclock for LVDS*/
        int lvds_downclock;
 
        struct i915_frontbuffer_tracking fb_tracking;
 
        u16 orig_clock;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c1e7da6..0e496d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -646,34 +646,54 @@ err_unpin:
        i915_gem_object_ggtt_unpin(ring->scratch.obj);
 err_unref:
        drm_gem_object_unreference(&ring->scratch.obj->base);
 err:
        return ret;
 }
 
 static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
                                       u32 addr, u32 value)
 {
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->num_wa_regs > I915_MAX_WA_REGS)
+               return;
+
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
        intel_ring_emit(ring, addr);
        intel_ring_emit(ring, value);
+
+       dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
+       dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = (value) & 0xFFFF;
+       /* value is updated with the status of remaining bits of this
+        * register when it is read from debugfs file
+        */
+       dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
+       dev_priv->num_wa_regs++;
+
+       return;
 }
 
 int bdw_init_workarounds(struct intel_engine_cs *ring)
 {
        int ret;
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        /*
         * workarounds applied in this fn are part of register state context,
         * they need to be re-initialized followed by gpu reset, suspend/resume,
         * module reload.
         */
+       dev_priv->num_wa_regs = 0;
+       memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
 
        /*
         * update the number of dwords required based on the
         * actual number of workarounds applied
         */
        ret = intel_ring_begin(ring, 24);
        if (ret)
                return ret;
 
        /* WaDisablePartialInstShootdown:bdw */
@@ -718,20 +738,23 @@ int bdw_init_workarounds(struct intel_engine_cs *ring)
         *
         * Note that PS/WM thread counts depend on the WIZ hashing
         * disable bit, which we don't touch here, but it's good
         * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
         */
        intel_ring_emit_wa(ring, GEN7_GT_MODE,
                           GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
        intel_ring_advance(ring);
 
+       DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
+                        dev_priv->num_wa_regs);
+
        return 0;
 }
 
 static int init_render_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret = init_ring_common(ring);
        if (ret)
                return ret;
-- 
2.0.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to