From: Tvrtko Ursulin <[email protected]>

This leaves the ringbuff submission code in intel_ringbuffer.c

Signed-off-by: Tvrtko Ursulin <[email protected]>
---
 drivers/gpu/drm/i915/intel_engine_cs.c  | 834 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.c | 834 --------------------------------
 2 files changed, 834 insertions(+), 834 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c 
b/drivers/gpu/drm/i915/intel_engine_cs.c
index 538d845d7251..afaedc3adc2e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -525,6 +525,840 @@ void intel_engine_get_instdone(struct intel_engine_cs 
*engine,
        }
 }
 
+static int wa_add(struct drm_i915_private *dev_priv,
+                 i915_reg_t addr,
+                 const u32 mask, const u32 val)
+{
+       const u32 idx = dev_priv->workarounds.count;
+
+       if (WARN_ON(idx >= I915_MAX_WA_REGS))
+               return -ENOSPC;
+
+       dev_priv->workarounds.reg[idx].addr = addr;
+       dev_priv->workarounds.reg[idx].value = val;
+       dev_priv->workarounds.reg[idx].mask = mask;
+
+       dev_priv->workarounds.count++;
+
+       return 0;
+}
+
+#define WA_REG(addr, mask, val) do { \
+               const int r = wa_add(dev_priv, (addr), (mask), (val)); \
+               if (r) \
+                       return r; \
+       } while (0)
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+       WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+       WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+       WA_REG(addr, mask, _MASKED_FIELD(mask, value))
+
+#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
+#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
+
+#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
+
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+                                i915_reg_t reg)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct i915_workarounds *wa = &dev_priv->workarounds;
+       const uint32_t index = wa->hw_whitelist_count[engine->id];
+
+       if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
+               return -EINVAL;
+
+       WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
+                i915_mmio_reg_offset(reg));
+       wa->hw_whitelist_count[engine->id]++;
+
+       return 0;
+}
+
+static int gen8_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+       /* WaDisableAsyncFlipPerfMode:bdw,chv */
+       WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
+       /* WaDisablePartialInstShootdown:bdw,chv */
+       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+                         PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+       /* Use Force Non-Coherent whenever executing a 3D context. This is a
+        * workaround for for a possible hang in the unlikely event a TLB
+        * invalidation occurs during a PSD flush.
+        */
+       /* WaForceEnableNonCoherent:bdw,chv */
+       /* WaHdcDisableFetchWhenMasked:bdw,chv */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+                         HDC_FORCE_NON_COHERENT);
+
+       /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
+        * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
+        *  polygons in the same 8x4 pixel/sample area to be processed without
+        *  stalling waiting for the earlier ones to write to Hierarchical Z
+        *  buffer."
+        *
+        * This optimization is off by default for BDW and CHV; turn it on.
+        */
+       WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
+       /* Wa4x4STCOptimizationDisable:bdw,chv */
+       WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+                           GEN6_WIZ_HASHING_MASK,
+                           GEN6_WIZ_HASHING_16x4);
+
+       return 0;
+}
+
+static int bdw_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       ret = gen8_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+       /* WaDisableDopClockGating:bdw
+        *
+        * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
+        * to disable EUTC clock gating.
+        */
+       WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+                         DOP_CLOCK_GATING_DISABLE);
+
+       WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+                         GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         /* WaForceContextSaveRestoreNonCoherent:bdw */
+                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+                         /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+                         (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 
0));
+
+       return 0;
+}
+
+static int chv_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       ret = gen8_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /* WaDisableThreadStallDopClockGating:chv */
+       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+       /* Improve HiZ throughput on CHV. */
+       WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+
+       return 0;
+}
+
+static int gen9_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */
+       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, 
_MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+       /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */
+       I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
+                  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+       /* WaDisableKillLogic:bxt,skl,kbl */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+                  ECOCHK_DIS_TLB);
+
+       /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */
+       /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */
+       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+                         FLOW_CONTROL_ENABLE |
+                         PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+       /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+                         GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+       /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+               WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+                                 GEN9_DG_MIRROR_FIX_ENABLE);
+
+       /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+               WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
+                                 GEN9_RHWO_OPTIMIZATION_DISABLE);
+               /*
+                * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be 
set
+                * but we do that in per ctx batchbuffer as there is an issue
+                * with this register not getting restored on ctx restore
+                */
+       }
+
+       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+                         GEN9_ENABLE_GPGPU_PREEMPTION);
+
+       /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
+       /* WaDisablePartialResolveInVc:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+                                        GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
+
+       /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */
+       WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+                         GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+       /* WaDisableMaskBasedCammingInRCC:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+               WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
+                                 PIXEL_MASK_CAMMING_DISABLE);
+
+       /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+                         HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+       /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+        * both tied to WaForceContextSaveRestoreNonCoherent
+        * in some hsds for skl. We keep the tie for all gen9. The
+        * documentation is a bit hazy and so we want to get common behaviour,
+        * even though there is no clear evidence we would need both on kbl/bxt.
+        * This area has been source of system hangs so we play it safe
+        * and mimic the skl regardless of what bspec says.
+        *
+        * Use Force Non-Coherent whenever executing a 3D context. This
+        * is a workaround for a possible hang in the unlikely event
+        * a TLB invalidation occurs during a PSD flush.
+        */
+
+       /* WaForceEnableNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_NON_COHERENT);
+
+       /* WaDisableHDCInvalidation:skl,bxt,kbl */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+                  BDW_DISABLE_HDC_INVALIDATION);
+
+       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+       if (IS_SKYLAKE(dev_priv) ||
+           IS_KABYLAKE(dev_priv) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+               WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+                                 GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+       /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */
+       WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+
+       /* WaOCLCoherentLineFlush:skl,bxt,kbl */
+       I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
+                                   GEN8_LQSC_FLUSH_COHERENT_LINES));
+
+       /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */
+       ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+       if (ret)
+               return ret;
+
+       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
+       ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+       if (ret)
+               return ret;
+
+       /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */
+       ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       u8 vals[3] = { 0, 0, 0 };
+       unsigned int i;
+
+       for (i = 0; i < 3; i++) {
+               u8 ss;
+
+               /*
+                * Only consider slices where one, and only one, subslice has 7
+                * EUs
+                */
+               if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+                       continue;
+
+               /*
+                * subslice_7eu[i] != 0 (because of the check above) and
+                * ss_max == 4 (maximum number of subslices possible per slice)
+                *
+                * ->    0 <= ss <= 3;
+                */
+               ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+               vals[i] = 3 - ss;
+       }
+
+       if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+               return 0;
+
+       /* Tune IZ hashing. See intel_device_info_runtime_init() */
+       WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+                           GEN9_IZ_HASHING_MASK(2) |
+                           GEN9_IZ_HASHING_MASK(1) |
+                           GEN9_IZ_HASHING_MASK(0),
+                           GEN9_IZ_HASHING(2, vals[2]) |
+                           GEN9_IZ_HASHING(1, vals[1]) |
+                           GEN9_IZ_HASHING(0, vals[0]));
+
+       return 0;
+}
+
+static int skl_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       ret = gen9_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /*
+        * Actual WA is to disable percontext preemption granularity control
+        * until D0 which is the default case so this is equivalent to
+        * !WaDisablePerCtxtPreemptionGranularityControl:skl
+        */
+       I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+                  _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+
+       /* WaEnableGapsTsvCreditFix:skl */
+       I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+                                  GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+       /* WaDisableGafsUnitClkGating:skl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaInPlaceDecompressionHang:skl */
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
+               WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+       /* WaDisableLSQCROPERFforOCL:skl */
+       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+       if (ret)
+               return ret;
+
+       return skl_tune_iz_hashing(engine);
+}
+
+static int bxt_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       ret = gen9_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /* WaStoreMultiplePTEenable:bxt */
+       /* This is a requirement according to Hardware specification */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+               I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
+
+       /* WaSetClckGatingDisableMedia:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+               I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+                                           ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
+       }
+
+       /* WaDisableThreadStallDopClockGating:bxt */
+       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+                         STALL_DOP_GATING_DISABLE);
+
+       /* WaDisablePooledEuLoadBalancingFix:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+               WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
+                                 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+       }
+
+       /* WaDisableSbeCacheDispatchPortSharing:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
+               WA_SET_BIT_MASKED(
+                       GEN7_HALF_SLICE_CHICKEN1,
+                       GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+       }
+
+       /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
+       /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
+       /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
+       /* WaDisableLSQCROPERFforOCL:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+               ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
+               if (ret)
+                       return ret;
+
+               ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+               if (ret)
+                       return ret;
+       }
+
+       /* WaProgramL3SqcReg1DefaultForPerf:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+               I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
+                                          L3_HIGH_PRIO_CREDITS(2));
+
+       /* WaToEnableHwFixForPushConstHWBug:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       /* WaInPlaceDecompressionHang:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+               WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+       return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       ret = gen9_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /* WaEnableGapsTsvCreditFix:kbl */
+       I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+                                  GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+       /* WaDisableDynamicCreditSharing:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               WA_SET_BIT(GAMT_CHKN_BIT_REG,
+                          GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+       /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FENCE_DEST_SLM_DISABLE);
+
+       /* WaToEnableHwFixForPushConstHWBug:kbl */
+       if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       /* WaDisableGafsUnitClkGating:kbl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaDisableSbeCacheDispatchPortSharing:kbl */
+       WA_SET_BIT_MASKED(
+               GEN7_HALF_SLICE_CHICKEN1,
+               GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+       /* WaInPlaceDecompressionHang:kbl */
+       WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+       /* WaDisableLSQCROPERFforOCL:kbl */
+       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int glk_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       ret = gen9_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /* WaToEnableHwFixForPushConstHWBug:glk */
+       WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                         GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       return 0;
+}
+
+int init_workarounds_ring(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       WARN_ON(engine->id != RCS);
+
+       dev_priv->workarounds.count = 0;
+       dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
+
+       if (IS_BROADWELL(dev_priv))
+               return bdw_init_workarounds(engine);
+
+       if (IS_CHERRYVIEW(dev_priv))
+               return chv_init_workarounds(engine);
+
+       if (IS_SKYLAKE(dev_priv))
+               return skl_init_workarounds(engine);
+
+       if (IS_BROXTON(dev_priv))
+               return bxt_init_workarounds(engine);
+
+       if (IS_KABYLAKE(dev_priv))
+               return kbl_init_workarounds(engine);
+
+       if (IS_GEMINILAKE(dev_priv))
+               return glk_init_workarounds(engine);
+
+       return 0;
+}
+
+int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
+{
+       struct i915_workarounds *w = &req->i915->workarounds;
+       u32 *cs;
+       int ret, i;
+
+       if (w->count == 0)
+               return 0;
+
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
+       if (ret)
+               return ret;
+
+       cs = intel_ring_begin(req, (w->count * 2 + 2));
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       *cs++ = MI_LOAD_REGISTER_IMM(w->count);
+       for (i = 0; i < w->count; i++) {
+               *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+               *cs++ = w->reg[i].value;
+       }
+       *cs++ = MI_NOOP;
+
+       intel_ring_advance(req, cs);
+
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
+       if (ret)
+               return ret;
+
+       DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
+
+       return 0;
+}
+
+int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
+{
+       unsigned int flags;
+       enum i915_map_type map;
+       struct i915_vma *vma = ring->vma;
+       void *addr;
+       int ret;
+
+       GEM_BUG_ON(ring->vaddr);
+
+       map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
+
+       flags = PIN_GLOBAL;
+       if (offset_bias)
+               flags |= PIN_OFFSET_BIAS | offset_bias;
+       if (vma->obj->stolen)
+               flags |= PIN_MAPPABLE;
+
+       if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+               if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
+                       ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+               else
+                       ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
+               if (unlikely(ret))
+                       return ret;
+       }
+
+       ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
+       if (unlikely(ret))
+               return ret;
+
+       if (i915_vma_is_map_and_fenceable(vma))
+               addr = (void __force *)i915_vma_pin_iomap(vma);
+       else
+               addr = i915_gem_object_pin_map(vma->obj, map);
+       if (IS_ERR(addr))
+               goto err;
+
+       ring->vaddr = addr;
+       return 0;
+
+err:
+       i915_vma_unpin(vma);
+       return PTR_ERR(addr);
+}
+
+void intel_ring_unpin(struct intel_ring *ring)
+{
+       GEM_BUG_ON(!ring->vma);
+       GEM_BUG_ON(!ring->vaddr);
+
+       if (i915_vma_is_map_and_fenceable(ring->vma))
+               i915_vma_unpin_iomap(ring->vma);
+       else
+               i915_gem_object_unpin_map(ring->vma->obj);
+       ring->vaddr = NULL;
+
+       i915_vma_unpin(ring->vma);
+}
+
+static struct i915_vma *
+intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+
+       obj = i915_gem_object_create_stolen(dev_priv, size);
+       if (!obj)
+               obj = i915_gem_object_create(dev_priv, size);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       /* mark ring buffers as read-only from GPU side by default */
+       obj->gt_ro = 1;
+
+       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+       if (IS_ERR(vma))
+               goto err;
+
+       return vma;
+
+err:
+       i915_gem_object_put(obj);
+       return vma;
+}
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+{
+       struct intel_ring *ring;
+       struct i915_vma *vma;
+
+       GEM_BUG_ON(!is_power_of_2(size));
+       GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
+
+       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+       if (!ring)
+               return ERR_PTR(-ENOMEM);
+
+       ring->engine = engine;
+
+       INIT_LIST_HEAD(&ring->request_list);
+
+       ring->size = size;
+       /* Workaround an erratum on the i830 which causes a hang if
+        * the TAIL pointer points to within the last 2 cachelines
+        * of the buffer.
+        */
+       ring->effective_size = size;
+       if (IS_I830(engine->i915) || IS_I845G(engine->i915))
+               ring->effective_size -= 2 * CACHELINE_BYTES;
+
+       ring->last_retired_head = -1;
+       intel_ring_update_space(ring);
+
+       vma = intel_ring_create_vma(engine->i915, size);
+       if (IS_ERR(vma)) {
+               kfree(ring);
+               return ERR_CAST(vma);
+       }
+       ring->vma = vma;
+
+       return ring;
+}
+
+void
+intel_ring_free(struct intel_ring *ring)
+{
+       struct drm_i915_gem_object *obj = ring->vma->obj;
+
+       i915_vma_close(ring->vma);
+       __i915_gem_object_release_unless_active(obj);
+
+       kfree(ring);
+}
+
+static int __intel_ring_space(int head, int tail, int size)
+{
+       int space = head - tail;
+       if (space <= 0)
+               space += size;
+       return space - I915_RING_FREE_SPACE;
+}
+
+static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
+{
+       struct intel_ring *ring = req->ring;
+       struct drm_i915_gem_request *target;
+       long timeout;
+
+       lockdep_assert_held(&req->i915->drm.struct_mutex);
+
+       intel_ring_update_space(ring);
+       if (ring->space >= bytes)
+               return 0;
+
+       /*
+        * Space is reserved in the ringbuffer for finalising the request,
+        * as that cannot be allowed to fail. During request finalisation,
+        * reserved_space is set to 0 to stop the overallocation and the
+        * assumption is that then we never need to wait (which has the
+        * risk of failing with EINTR).
+        *
+        * See also i915_gem_request_alloc() and i915_add_request().
+        */
+       GEM_BUG_ON(!req->reserved_space);
+
+       list_for_each_entry(target, &ring->request_list, ring_link) {
+               unsigned space;
+
+               /* Would completion of this request free enough space? */
+               space = __intel_ring_space(target->postfix, ring->tail,
+                                          ring->size);
+               if (space >= bytes)
+                       break;
+       }
+
+       if (WARN_ON(&target->ring_link == &ring->request_list))
+               return -ENOSPC;
+
+       timeout = i915_wait_request(target,
+                                   I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                                   MAX_SCHEDULE_TIMEOUT);
+       if (timeout < 0)
+               return timeout;
+
+       i915_gem_request_retire_upto(target);
+
+       intel_ring_update_space(ring);
+       GEM_BUG_ON(ring->space < bytes);
+       return 0;
+}
+
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+{
+       struct intel_ring *ring = req->ring;
+       int remain_actual = ring->size - ring->tail;
+       int remain_usable = ring->effective_size - ring->tail;
+       int bytes = num_dwords * sizeof(u32);
+       int total_bytes, wait_bytes;
+       bool need_wrap = false;
+       u32 *cs;
+
+       total_bytes = bytes + req->reserved_space;
+
+       if (unlikely(bytes > remain_usable)) {
+               /*
+                * Not enough space for the basic request. So need to flush
+                * out the remainder and then wait for base + reserved.
+                */
+               wait_bytes = remain_actual + total_bytes;
+               need_wrap = true;
+       } else if (unlikely(total_bytes > remain_usable)) {
+               /*
+                * The base request will fit but the reserved space
+                * falls off the end. So we don't need an immediate wrap
+                * and only need to effectively wait for the reserved
+                * size space from the start of ringbuffer.
+                */
+               wait_bytes = remain_actual + req->reserved_space;
+       } else {
+               /* No wrapping required, just waiting. */
+               wait_bytes = total_bytes;
+       }
+
+       if (wait_bytes > ring->space) {
+               int ret = wait_for_space(req, wait_bytes);
+               if (unlikely(ret))
+                       return ERR_PTR(ret);
+       }
+
+       if (unlikely(need_wrap)) {
+               GEM_BUG_ON(remain_actual > ring->space);
+               GEM_BUG_ON(ring->tail + remain_actual > ring->size);
+
+               /* Fill the tail with MI_NOOP */
+               memset(ring->vaddr + ring->tail, 0, remain_actual);
+               ring->tail = 0;
+               ring->space -= remain_actual;
+       }
+
+       GEM_BUG_ON(ring->tail > ring->size - bytes);
+       cs = ring->vaddr + ring->tail;
+       ring->tail += bytes;
+       ring->space -= bytes;
+       GEM_BUG_ON(ring->space < 0);
+
+       return cs;
+}
+
+void intel_ring_update_space(struct intel_ring *ring)
+{
+       if (ring->last_retired_head != -1) {
+               ring->head = ring->last_retired_head;
+               ring->last_retired_head = -1;
+       }
+
+       ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
+                                        ring->tail, ring->size);
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
+{
+       int num_dwords =
+               (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+       u32 *cs;
+
+       if (num_dwords == 0)
+               return 0;
+
+       num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
+       cs = intel_ring_begin(req, num_dwords);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       while (num_dwords--)
+               *cs++ = MI_NOOP;
+
+       intel_ring_advance(req, cs);
+
+       return 0;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8c17db72489f..a8ff5903bb90 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -39,25 +39,6 @@
  */
 #define LEGACY_REQUEST_SIZE 200
 
-static int __intel_ring_space(int head, int tail, int size)
-{
-       int space = head - tail;
-       if (space <= 0)
-               space += size;
-       return space - I915_RING_FREE_SPACE;
-}
-
-void intel_ring_update_space(struct intel_ring *ring)
-{
-       if (ring->last_retired_head != -1) {
-               ring->head = ring->last_retired_head;
-               ring->last_retired_head = -1;
-       }
-
-       ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
-                                        ring->tail, ring->size);
-}
-
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
@@ -644,41 +625,6 @@ static void reset_ring_common(struct intel_engine_cs 
*engine,
        }
 }
 
-int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
-{
-       struct i915_workarounds *w = &req->i915->workarounds;
-       u32 *cs;
-       int ret, i;
-
-       if (w->count == 0)
-               return 0;
-
-       ret = req->engine->emit_flush(req, EMIT_BARRIER);
-       if (ret)
-               return ret;
-
-       cs = intel_ring_begin(req, (w->count * 2 + 2));
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       *cs++ = MI_LOAD_REGISTER_IMM(w->count);
-       for (i = 0; i < w->count; i++) {
-               *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
-               *cs++ = w->reg[i].value;
-       }
-       *cs++ = MI_NOOP;
-
-       intel_ring_advance(req, cs);
-
-       ret = req->engine->emit_flush(req, EMIT_BARRIER);
-       if (ret)
-               return ret;
-
-       DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
-
-       return 0;
-}
-
 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
 {
        int ret;
@@ -694,521 +640,6 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request 
*req)
        return 0;
 }
 
-static int wa_add(struct drm_i915_private *dev_priv,
-                 i915_reg_t addr,
-                 const u32 mask, const u32 val)
-{
-       const u32 idx = dev_priv->workarounds.count;
-
-       if (WARN_ON(idx >= I915_MAX_WA_REGS))
-               return -ENOSPC;
-
-       dev_priv->workarounds.reg[idx].addr = addr;
-       dev_priv->workarounds.reg[idx].value = val;
-       dev_priv->workarounds.reg[idx].mask = mask;
-
-       dev_priv->workarounds.count++;
-
-       return 0;
-}
-
-#define WA_REG(addr, mask, val) do { \
-               const int r = wa_add(dev_priv, (addr), (mask), (val)); \
-               if (r) \
-                       return r; \
-       } while (0)
-
-#define WA_SET_BIT_MASKED(addr, mask) \
-       WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
-       WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
-       WA_REG(addr, mask, _MASKED_FIELD(mask, value))
-
-#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
-#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
-
-#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
-
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
-                                i915_reg_t reg)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       struct i915_workarounds *wa = &dev_priv->workarounds;
-       const uint32_t index = wa->hw_whitelist_count[engine->id];
-
-       if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
-               return -EINVAL;
-
-       WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
-                i915_mmio_reg_offset(reg));
-       wa->hw_whitelist_count[engine->id]++;
-
-       return 0;
-}
-
-static int gen8_init_workarounds(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-
-       WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
-
-       /* WaDisableAsyncFlipPerfMode:bdw,chv */
-       WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
-
-       /* WaDisablePartialInstShootdown:bdw,chv */
-       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-                         PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
-       /* Use Force Non-Coherent whenever executing a 3D context. This is a
-        * workaround for for a possible hang in the unlikely event a TLB
-        * invalidation occurs during a PSD flush.
-        */
-       /* WaForceEnableNonCoherent:bdw,chv */
-       /* WaHdcDisableFetchWhenMasked:bdw,chv */
-       WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                         HDC_DONOT_FETCH_MEM_WHEN_MASKED |
-                         HDC_FORCE_NON_COHERENT);
-
-       /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
-        * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
-        *  polygons in the same 8x4 pixel/sample area to be processed without
-        *  stalling waiting for the earlier ones to write to Hierarchical Z
-        *  buffer."
-        *
-        * This optimization is off by default for BDW and CHV; turn it on.
-        */
-       WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
-
-       /* Wa4x4STCOptimizationDisable:bdw,chv */
-       WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
-
-       /*
-        * BSpec recommends 8x4 when MSAA is used,
-        * however in practice 16x4 seems fastest.
-        *
-        * Note that PS/WM thread counts depend on the WIZ hashing
-        * disable bit, which we don't touch here, but it's good
-        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-        */
-       WA_SET_FIELD_MASKED(GEN7_GT_MODE,
-                           GEN6_WIZ_HASHING_MASK,
-                           GEN6_WIZ_HASHING_16x4);
-
-       return 0;
-}
-
-static int bdw_init_workarounds(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       ret = gen8_init_workarounds(engine);
-       if (ret)
-               return ret;
-
-       /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
-       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
-       /* WaDisableDopClockGating:bdw
-        *
-        * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
-        * to disable EUTC clock gating.
-        */
-       WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
-                         DOP_CLOCK_GATING_DISABLE);
-
-       WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-                         GEN8_SAMPLER_POWER_BYPASS_DIS);
-
-       WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                         /* WaForceContextSaveRestoreNonCoherent:bdw */
-                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
-                         /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-                         (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 
0));
-
-       return 0;
-}
-
-static int chv_init_workarounds(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       ret = gen8_init_workarounds(engine);
-       if (ret)
-               return ret;
-
-       /* WaDisableThreadStallDopClockGating:chv */
-       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
-       /* Improve HiZ throughput on CHV. */
-       WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
-       return 0;
-}
-
-static int gen9_init_workarounds(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */
-       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, 
_MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
-
-       /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */
-       I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
-                  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
-
-       /* WaDisableKillLogic:bxt,skl,kbl */
-       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                  ECOCHK_DIS_TLB);
-
-       /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */
-       /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */
-       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-                         FLOW_CONTROL_ENABLE |
-                         PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
-       /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
-       WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-                         GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
-       /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
-       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
-               WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
-                                 GEN9_DG_MIRROR_FIX_ENABLE);
-
-       /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
-       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
-               WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
-                                 GEN9_RHWO_OPTIMIZATION_DISABLE);
-               /*
-                * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be 
set
-                * but we do that in per ctx batchbuffer as there is an issue
-                * with this register not getting restored on ctx restore
-                */
-       }
-
-       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
-       WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
-                         GEN9_ENABLE_GPGPU_PREEMPTION);
-
-       /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
-       /* WaDisablePartialResolveInVc:skl,bxt,kbl */
-       WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
-                                        GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
-
-       /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */
-       WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
-                         GEN9_CCS_TLB_PREFETCH_ENABLE);
-
-       /* WaDisableMaskBasedCammingInRCC:bxt */
-       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
-               WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
-                                 PIXEL_MASK_CAMMING_DISABLE);
-
-       /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
-       WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
-                         HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
-
-       /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
-        * both tied to WaForceContextSaveRestoreNonCoherent
-        * in some hsds for skl. We keep the tie for all gen9. The
-        * documentation is a bit hazy and so we want to get common behaviour,
-        * even though there is no clear evidence we would need both on kbl/bxt.
-        * This area has been source of system hangs so we play it safe
-        * and mimic the skl regardless of what bspec says.
-        *
-        * Use Force Non-Coherent whenever executing a 3D context. This
-        * is a workaround for a possible hang in the unlikely event
-        * a TLB invalidation occurs during a PSD flush.
-        */
-
-       /* WaForceEnableNonCoherent:skl,bxt,kbl */
-       WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                         HDC_FORCE_NON_COHERENT);
-
-       /* WaDisableHDCInvalidation:skl,bxt,kbl */
-       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                  BDW_DISABLE_HDC_INVALIDATION);
-
-       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
-       if (IS_SKYLAKE(dev_priv) ||
-           IS_KABYLAKE(dev_priv) ||
-           IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
-               WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-                                 GEN8_SAMPLER_POWER_BYPASS_DIS);
-
-       /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */
-       WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
-
-       /* WaOCLCoherentLineFlush:skl,bxt,kbl */
-       I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
-                                   GEN8_LQSC_FLUSH_COHERENT_LINES));
-
-       /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */
-       ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
-       if (ret)
-               return ret;
-
-       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
-       ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
-       if (ret)
-               return ret;
-
-       /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */
-       ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       u8 vals[3] = { 0, 0, 0 };
-       unsigned int i;
-
-       for (i = 0; i < 3; i++) {
-               u8 ss;
-
-               /*
-                * Only consider slices where one, and only one, subslice has 7
-                * EUs
-                */
-               if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
-                       continue;
-
-               /*
-                * subslice_7eu[i] != 0 (because of the check above) and
-                * ss_max == 4 (maximum number of subslices possible per slice)
-                *
-                * ->    0 <= ss <= 3;
-                */
-               ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
-               vals[i] = 3 - ss;
-       }
-
-       if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
-               return 0;
-
-       /* Tune IZ hashing. See intel_device_info_runtime_init() */
-       WA_SET_FIELD_MASKED(GEN7_GT_MODE,
-                           GEN9_IZ_HASHING_MASK(2) |
-                           GEN9_IZ_HASHING_MASK(1) |
-                           GEN9_IZ_HASHING_MASK(0),
-                           GEN9_IZ_HASHING(2, vals[2]) |
-                           GEN9_IZ_HASHING(1, vals[1]) |
-                           GEN9_IZ_HASHING(0, vals[0]));
-
-       return 0;
-}
-
-static int skl_init_workarounds(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       ret = gen9_init_workarounds(engine);
-       if (ret)
-               return ret;
-
-       /*
-        * Actual WA is to disable percontext preemption granularity control
-        * until D0 which is the default case so this is equivalent to
-        * !WaDisablePerCtxtPreemptionGranularityControl:skl
-        */
-       I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-                  _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-
-       /* WaEnableGapsTsvCreditFix:skl */
-       I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
-                                  GEN9_GAPS_TSV_CREDIT_DISABLE));
-
-       /* WaDisableGafsUnitClkGating:skl */
-       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
-
-       /* WaInPlaceDecompressionHang:skl */
-       if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
-               WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
-                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-       /* WaDisableLSQCROPERFforOCL:skl */
-       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-       if (ret)
-               return ret;
-
-       return skl_tune_iz_hashing(engine);
-}
-
-static int bxt_init_workarounds(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       ret = gen9_init_workarounds(engine);
-       if (ret)
-               return ret;
-
-       /* WaStoreMultiplePTEenable:bxt */
-       /* This is a requirement according to Hardware specification */
-       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
-               I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
-
-       /* WaSetClckGatingDisableMedia:bxt */
-       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
-               I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
-                                           ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
-       }
-
-       /* WaDisableThreadStallDopClockGating:bxt */
-       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-                         STALL_DOP_GATING_DISABLE);
-
-       /* WaDisablePooledEuLoadBalancingFix:bxt */
-       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
-               WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
-                                 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
-       }
-
-       /* WaDisableSbeCacheDispatchPortSharing:bxt */
-       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
-               WA_SET_BIT_MASKED(
-                       GEN7_HALF_SLICE_CHICKEN1,
-                       GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-       }
-
-       /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
-       /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
-       /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
-       /* WaDisableLSQCROPERFforOCL:bxt */
-       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
-               ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
-               if (ret)
-                       return ret;
-
-               ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-               if (ret)
-                       return ret;
-       }
-
-       /* WaProgramL3SqcReg1DefaultForPerf:bxt */
-       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
-               I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
-                                          L3_HIGH_PRIO_CREDITS(2));
-
-       /* WaToEnableHwFixForPushConstHWBug:bxt */
-       if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
-               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-       /* WaInPlaceDecompressionHang:bxt */
-       if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
-               WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
-                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-       return 0;
-}
-
-static int kbl_init_workarounds(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       ret = gen9_init_workarounds(engine);
-       if (ret)
-               return ret;
-
-       /* WaEnableGapsTsvCreditFix:kbl */
-       I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
-                                  GEN9_GAPS_TSV_CREDIT_DISABLE));
-
-       /* WaDisableDynamicCreditSharing:kbl */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
-               WA_SET_BIT(GAMT_CHKN_BIT_REG,
-                          GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
-
-       /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
-       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
-               WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                                 HDC_FENCE_DEST_SLM_DISABLE);
-
-       /* WaToEnableHwFixForPushConstHWBug:kbl */
-       if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
-               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-       /* WaDisableGafsUnitClkGating:kbl */
-       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
-
-       /* WaDisableSbeCacheDispatchPortSharing:kbl */
-       WA_SET_BIT_MASKED(
-               GEN7_HALF_SLICE_CHICKEN1,
-               GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
-       /* WaInPlaceDecompressionHang:kbl */
-       WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
-                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-       /* WaDisableLSQCROPERFforOCL:kbl */
-       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int glk_init_workarounds(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       int ret;
-
-       ret = gen9_init_workarounds(engine);
-       if (ret)
-               return ret;
-
-       /* WaToEnableHwFixForPushConstHWBug:glk */
-       WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-                         GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-       return 0;
-}
-
-int init_workarounds_ring(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-
-       WARN_ON(engine->id != RCS);
-
-       dev_priv->workarounds.count = 0;
-       dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
-
-       if (IS_BROADWELL(dev_priv))
-               return bdw_init_workarounds(engine);
-
-       if (IS_CHERRYVIEW(dev_priv))
-               return chv_init_workarounds(engine);
-
-       if (IS_SKYLAKE(dev_priv))
-               return skl_init_workarounds(engine);
-
-       if (IS_BROXTON(dev_priv))
-               return bxt_init_workarounds(engine);
-
-       if (IS_KABYLAKE(dev_priv))
-               return kbl_init_workarounds(engine);
-
-       if (IS_GEMINILAKE(dev_priv))
-               return glk_init_workarounds(engine);
-
-       return 0;
-}
-
 static int init_render_ring(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
@@ -1842,142 +1273,6 @@ static int init_phys_status_page(struct intel_engine_cs 
*engine)
        return 0;
 }
 
-int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
-{
-       unsigned int flags;
-       enum i915_map_type map;
-       struct i915_vma *vma = ring->vma;
-       void *addr;
-       int ret;
-
-       GEM_BUG_ON(ring->vaddr);
-
-       map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
-
-       flags = PIN_GLOBAL;
-       if (offset_bias)
-               flags |= PIN_OFFSET_BIAS | offset_bias;
-       if (vma->obj->stolen)
-               flags |= PIN_MAPPABLE;
-
-       if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
-               if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
-                       ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
-               else
-                       ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
-               if (unlikely(ret))
-                       return ret;
-       }
-
-       ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
-       if (unlikely(ret))
-               return ret;
-
-       if (i915_vma_is_map_and_fenceable(vma))
-               addr = (void __force *)i915_vma_pin_iomap(vma);
-       else
-               addr = i915_gem_object_pin_map(vma->obj, map);
-       if (IS_ERR(addr))
-               goto err;
-
-       ring->vaddr = addr;
-       return 0;
-
-err:
-       i915_vma_unpin(vma);
-       return PTR_ERR(addr);
-}
-
-void intel_ring_unpin(struct intel_ring *ring)
-{
-       GEM_BUG_ON(!ring->vma);
-       GEM_BUG_ON(!ring->vaddr);
-
-       if (i915_vma_is_map_and_fenceable(ring->vma))
-               i915_vma_unpin_iomap(ring->vma);
-       else
-               i915_gem_object_unpin_map(ring->vma->obj);
-       ring->vaddr = NULL;
-
-       i915_vma_unpin(ring->vma);
-}
-
-static struct i915_vma *
-intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
-{
-       struct drm_i915_gem_object *obj;
-       struct i915_vma *vma;
-
-       obj = i915_gem_object_create_stolen(dev_priv, size);
-       if (!obj)
-               obj = i915_gem_object_create(dev_priv, size);
-       if (IS_ERR(obj))
-               return ERR_CAST(obj);
-
-       /* mark ring buffers as read-only from GPU side by default */
-       obj->gt_ro = 1;
-
-       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
-       if (IS_ERR(vma))
-               goto err;
-
-       return vma;
-
-err:
-       i915_gem_object_put(obj);
-       return vma;
-}
-
-struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size)
-{
-       struct intel_ring *ring;
-       struct i915_vma *vma;
-
-       GEM_BUG_ON(!is_power_of_2(size));
-       GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
-
-       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-       if (!ring)
-               return ERR_PTR(-ENOMEM);
-
-       ring->engine = engine;
-
-       INIT_LIST_HEAD(&ring->request_list);
-
-       ring->size = size;
-       /* Workaround an erratum on the i830 which causes a hang if
-        * the TAIL pointer points to within the last 2 cachelines
-        * of the buffer.
-        */
-       ring->effective_size = size;
-       if (IS_I830(engine->i915) || IS_I845G(engine->i915))
-               ring->effective_size -= 2 * CACHELINE_BYTES;
-
-       ring->last_retired_head = -1;
-       intel_ring_update_space(ring);
-
-       vma = intel_ring_create_vma(engine->i915, size);
-       if (IS_ERR(vma)) {
-               kfree(ring);
-               return ERR_CAST(vma);
-       }
-       ring->vma = vma;
-
-       return ring;
-}
-
-void
-intel_ring_free(struct intel_ring *ring)
-{
-       struct drm_i915_gem_object *obj = ring->vma->obj;
-
-       i915_vma_close(ring->vma);
-       __i915_gem_object_release_unless_active(obj);
-
-       kfree(ring);
-}
-
 static int context_pin(struct i915_gem_context *ctx)
 {
        struct i915_vma *vma = ctx->engine[RCS].state;
@@ -2160,135 +1455,6 @@ static int ring_request_alloc(struct 
drm_i915_gem_request *request)
        return 0;
 }
 
-static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
-{
-       struct intel_ring *ring = req->ring;
-       struct drm_i915_gem_request *target;
-       long timeout;
-
-       lockdep_assert_held(&req->i915->drm.struct_mutex);
-
-       intel_ring_update_space(ring);
-       if (ring->space >= bytes)
-               return 0;
-
-       /*
-        * Space is reserved in the ringbuffer for finalising the request,
-        * as that cannot be allowed to fail. During request finalisation,
-        * reserved_space is set to 0 to stop the overallocation and the
-        * assumption is that then we never need to wait (which has the
-        * risk of failing with EINTR).
-        *
-        * See also i915_gem_request_alloc() and i915_add_request().
-        */
-       GEM_BUG_ON(!req->reserved_space);
-
-       list_for_each_entry(target, &ring->request_list, ring_link) {
-               unsigned space;
-
-               /* Would completion of this request free enough space? */
-               space = __intel_ring_space(target->postfix, ring->tail,
-                                          ring->size);
-               if (space >= bytes)
-                       break;
-       }
-
-       if (WARN_ON(&target->ring_link == &ring->request_list))
-               return -ENOSPC;
-
-       timeout = i915_wait_request(target,
-                                   I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
-                                   MAX_SCHEDULE_TIMEOUT);
-       if (timeout < 0)
-               return timeout;
-
-       i915_gem_request_retire_upto(target);
-
-       intel_ring_update_space(ring);
-       GEM_BUG_ON(ring->space < bytes);
-       return 0;
-}
-
-u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
-{
-       struct intel_ring *ring = req->ring;
-       int remain_actual = ring->size - ring->tail;
-       int remain_usable = ring->effective_size - ring->tail;
-       int bytes = num_dwords * sizeof(u32);
-       int total_bytes, wait_bytes;
-       bool need_wrap = false;
-       u32 *cs;
-
-       total_bytes = bytes + req->reserved_space;
-
-       if (unlikely(bytes > remain_usable)) {
-               /*
-                * Not enough space for the basic request. So need to flush
-                * out the remainder and then wait for base + reserved.
-                */
-               wait_bytes = remain_actual + total_bytes;
-               need_wrap = true;
-       } else if (unlikely(total_bytes > remain_usable)) {
-               /*
-                * The base request will fit but the reserved space
-                * falls off the end. So we don't need an immediate wrap
-                * and only need to effectively wait for the reserved
-                * size space from the start of ringbuffer.
-                */
-               wait_bytes = remain_actual + req->reserved_space;
-       } else {
-               /* No wrapping required, just waiting. */
-               wait_bytes = total_bytes;
-       }
-
-       if (wait_bytes > ring->space) {
-               int ret = wait_for_space(req, wait_bytes);
-               if (unlikely(ret))
-                       return ERR_PTR(ret);
-       }
-
-       if (unlikely(need_wrap)) {
-               GEM_BUG_ON(remain_actual > ring->space);
-               GEM_BUG_ON(ring->tail + remain_actual > ring->size);
-
-               /* Fill the tail with MI_NOOP */
-               memset(ring->vaddr + ring->tail, 0, remain_actual);
-               ring->tail = 0;
-               ring->space -= remain_actual;
-       }
-
-       GEM_BUG_ON(ring->tail > ring->size - bytes);
-       cs = ring->vaddr + ring->tail;
-       ring->tail += bytes;
-       ring->space -= bytes;
-       GEM_BUG_ON(ring->space < 0);
-
-       return cs;
-}
-
-/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
-{
-       int num_dwords =
-               (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
-       u32 *cs;
-
-       if (num_dwords == 0)
-               return 0;
-
-       num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-       cs = intel_ring_begin(req, num_dwords);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       while (num_dwords--)
-               *cs++ = MI_NOOP;
-
-       intel_ring_advance(req, cs);
-
-       return 0;
-}
-
 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 {
        struct drm_i915_private *dev_priv = request->i915;
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to