[Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915/gt: Initialize unused MOCS entries to L3_WB

2021-08-15 Thread Patchwork
== Series Details ==

Series: drm/i915/gt: Initialize unused MOCS entries to L3_WB
URL   : https://patchwork.freedesktop.org/series/93706/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_10485 -> Patchwork_20827


Summary
---

  **SUCCESS**

  No regressions found.

  External URL: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20827/index.html


Changes
---

  No changes found


Participating hosts (37 -> 34)
--

  Missing(3): fi-bdw-samus fi-bsw-cyan bat-jsl-1 


Build changes
-

  * Linux: CI_DRM_10485 -> Patchwork_20827

  CI-20190529: 20190529
  CI_DRM_10485: 029f8ff4156268d7abc8acde2eddd3041c7a94e4 @ 
git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6175: c91f99c74b966f635d7e2eb898bf0f78383d281b @ 
https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_20827: 2140f351a3a35395dead4cdd30382bb163beafd9 @ 
git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

2140f351a3a3 drm/i95/adl: Define MOCS table for Alderlake
2fa1529ce1fd drm/i915/gt: Initialize unused MOCS entries with device specific 
values
fb7209540fa9 drm/i915/gt: Set BLIT_CCTL reg to un-cached
5a156fff1508 drm/i915/gt: Use cmd_cctl override for platforms >= gen12
bd87af7aa75f drm/i915/gt: Add support of mocs propagation

== Logs ==

For more details see: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20827/index.html


[Intel-gfx] ✗ Fi.CI.SPARSE: warning for drm/i915/gt: Initialize unused MOCS entries to L3_WB

2021-08-15 Thread Patchwork
== Series Details ==

Series: drm/i915/gt: Initialize unused MOCS entries to L3_WB
URL   : https://patchwork.freedesktop.org/series/93706/
State : warning

== Summary ==

$ dim sparse --fast origin/drm-tip
Sparse version: v0.6.2
Fast mode used, each commit won't be checked separately.
-
+drivers/gpu/drm/i915/display/intel_display.c:1901:21:expected struct 
i915_vma *[assigned] vma
+drivers/gpu/drm/i915/display/intel_display.c:1901:21:got void [noderef] 
__iomem *[assigned] iomem
+drivers/gpu/drm/i915/display/intel_display.c:1901:21: warning: incorrect type 
in assignment (different address spaces)
+drivers/gpu/drm/i915/gem/i915_gem_context.c:1374:34:expected struct 
i915_address_space *vm
+drivers/gpu/drm/i915/gem/i915_gem_context.c:1374:34:got struct 
i915_address_space [noderef] __rcu *vm
+drivers/gpu/drm/i915/gem/i915_gem_context.c:1374:34: warning: incorrect type 
in argument 1 (different address spaces)
+drivers/gpu/drm/i915/gem/selftests/mock_context.c:43:25:expected struct 
i915_address_space [noderef] __rcu *vm
+drivers/gpu/drm/i915/gem/selftests/mock_context.c:43:25:got struct 
i915_address_space *
+drivers/gpu/drm/i915/gem/selftests/mock_context.c:43:25: warning: incorrect 
type in assignment (different address spaces)
+drivers/gpu/drm/i915/gem/selftests/mock_context.c:60:34:expected struct 
i915_address_space *vm
+drivers/gpu/drm/i915/gem/selftests/mock_context.c:60:34:got struct 
i915_address_space [noderef] __rcu *vm
+drivers/gpu/drm/i915/gem/selftests/mock_context.c:60:34: warning: incorrect 
type in argument 1 (different address spaces)
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:27:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:27:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:27:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:32:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:32:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:49:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:49:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:49:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:56:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:56:9: warning: trying to copy 
expression type 31
+drivers/gpu/drm/i915/gt/intel_reset.c:1392:5: warning: context imbalance in 
'intel_gt_reset_trylock' - different lock contexts for basic block
+drivers/gpu/drm/i915/gt/intel_ring_submission.c:1268:24: warning: Using plain 
integer as NULL pointer
+drivers/gpu/drm/i915/i915_perf.c:1442:15: warning: memset with byte count of 
16777216
+drivers/gpu/drm/i915/i915_perf.c:1496:15: warning: memset with byte count of 
16777216
+./include/asm-generic/bitops/find.h:112:45: warning: shift count is negative 
(-262080)
+./include/asm-generic/bitops/find.h:32:31: warning: shift count is negative 
(-262080)
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'fwtable_read16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'fwtable_read32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'fwtable_read64' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'fwtable_read8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'fwtable_write16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'fwtable_write32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'fwtable_write8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'gen11_fwtable_read16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'gen11_fwtable_read32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'gen11_fwtable_read64' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'gen11_fwtable_read8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'gen11_fwtable_write16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'gen11_fwtable_write32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 
'gen11_fwtable_write8' - different lock contexts for basic block

[Intel-gfx] [PATCH V2 0/5] drm/i915/gt: Initialize unused MOCS entries to L3_WB

2021-08-15 Thread Ayaz A Siddiqui
Gen >= 12 onwards MOCS table doesn't have a setting for PTE
so I915_MOCS_PTE is not a valid index and it will have different
MOCS values based on the platform.

To detect these kinds of misprogramming, all the unspecified and
reserved MOCS indexes are set to WB_L3.

This series also contains patches to program BLIT_CCTL and
CMD_CCTL registers to UC.

Since we are quite late to update MOCS table for TGL so added
a new MOCS table for ADL family.

V2:
  1. Added CMD_CCTL to GUC regset list so that it can be restored
 after engine reset.
  2. Checkpatch warning removal.

Apoorva Singh (1):
  drm/i915/gt: Set BLIT_CCTL reg to un-cached

Ayaz A Siddiqui (3):
  drm/i915/gt: Add support of mocs propagation
  drm/i915/gt: Initialize unused MOCS entries with device specific
values
  drm/i95/adl: Define MOCS table for Alderlake

Srinivasan Shanmugam (1):
  drm/i915/gt: Use cmd_cctl override for platforms >= gen12

 drivers/gpu/drm/i915/gt/intel_gt_types.h   |   4 +
 drivers/gpu/drm/i915/gt/intel_mocs.c   | 198 +++--
 drivers/gpu/drm/i915/gt/selftest_mocs.c|  49 +
 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c |   1 +
 drivers/gpu/drm/i915/i915_reg.h|  23 +++
 5 files changed, 256 insertions(+), 19 deletions(-)

-- 
2.26.2



[Intel-gfx] [PATCH V2 4/5] drm/i915/gt: Initialize unused MOCS entries with device specific values

2021-08-15 Thread Ayaz A Siddiqui
During to creation mocs table,used field of drm_i915_mocs_entry
is being checked, if used field is 0, then it will check values
of index 1. All the unspecified indexes of xxx_mocs_table[] will
contain control value and l3cc value of index I915_MOCS_PTE if
its initialized.

This patch is intended to provide capability to program device
specific control value and l3cc value index which can be used
for all the unspecified indexes of MOCS table.

Signed-off-by: Ayaz A Siddiqui 
---
 drivers/gpu/drm/i915/gt/intel_mocs.c | 38 +++-
 1 file changed, 20 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c 
b/drivers/gpu/drm/i915/gt/intel_mocs.c
index df3c5d550c46a..cf00537ba4acc 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -23,6 +23,7 @@ struct drm_i915_mocs_table {
unsigned int n_entries;
const struct drm_i915_mocs_entry *table;
u8 uc_index;
+   u8 unused_entries_index;
 };
 
 struct drm_i915_aux_table {
@@ -99,17 +100,23 @@ struct drm_i915_aux_table {
  * Entries not part of the following tables are undefined as far as
  * userspace is concerned and shouldn't be relied upon.  For Gen < 12
  * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for
- * PTE and will be initialized to an invalid value.
+ * PTE and will be initialized L3 WB to catch accidental use of reserved and
+ * unused mocs indexes.
  *
  * The last few entries are reserved by the hardware. For ICL+ they
  * should be initialized according to bspec and never used, for older
  * platforms they should never be written to.
  *
- * NOTE: These tables are part of bspec and defined as part of hardware
+ * NOTE1: These tables are part of bspec and defined as part of hardware
  *   interface for ICL+. For older platforms, they are part of kernel
  *   ABI. It is expected that, for specific hardware platform, existing
  *   entries will remain constant and the table will only be updated by
  *   adding new entries, filling unused positions.
+ *
+ * NOTE2: For GEN >= 12, reserved and unspecified MOCS indices have been
+ *set to L3 WB. These reserved entries should never be used, they
+ *may be changed to low performant variants with better coherency
+ *in the future if more entries are needed.
  */
 #define GEN9_MOCS_ENTRIES \
MOCS_ENTRY(I915_MOCS_UNCACHED, \
@@ -292,17 +299,9 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = 
{
 };
 
 static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
-   /* Error */
-   MOCS_ENTRY(0, 0, L3_0_DIRECT),
 
/* UC */
MOCS_ENTRY(1, 0, L3_1_UC),
-
-   /* Reserved */
-   MOCS_ENTRY(2, 0, L3_0_DIRECT),
-   MOCS_ENTRY(3, 0, L3_0_DIRECT),
-   MOCS_ENTRY(4, 0, L3_0_DIRECT),
-
/* WB - L3 */
MOCS_ENTRY(5, 0, L3_3_WB),
/* WB - L3 50% */
@@ -450,6 +449,7 @@ static unsigned int get_mocs_settings(const struct 
drm_i915_private *i915,
table->table = dg1_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->uc_index = 1;
+   table->unused_entries_index = 5;
} else if (GRAPHICS_VER(i915) >= 12) {
table->size  = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
@@ -500,16 +500,17 @@ static unsigned int get_mocs_settings(const struct 
drm_i915_private *i915,
 }
 
 /*
- * Get control_value from MOCS entry taking into account when it's not used:
- * I915_MOCS_PTE's value is returned in this case.
+ * Get control_value from MOCS entry taking into account when it's not used
+ * then if unused_entries_index is non-zero then its value will be returned
+ * otherwise I915_MOCS_PTE's value is returned in this case.
  */
 static u32 get_entry_control(const struct drm_i915_mocs_table *table,
 unsigned int index)
 {
if (index < table->size && table->table[index].used)
return table->table[index].control_value;
-
-   return table->table[I915_MOCS_PTE].control_value;
+   index = table->unused_entries_index ? : I915_MOCS_PTE;
+   return table->table[index].control_value;
 }
 
 #define for_each_mocs(mocs, t, i) \
@@ -550,16 +551,17 @@ static void init_mocs_table(struct intel_engine_cs 
*engine,
 }
 
 /*
- * Get l3cc_value from MOCS entry taking into account when it's not used:
- * I915_MOCS_PTE's value is returned in this case.
+ * Get l3cc_value from MOCS entry taking into account when it's not used
+ * then if unused_entries_index is not zero then its value will be returned
+ * otherwise I915_MOCS_PTE's value is returned in this case.
  */
 static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
  unsigned int index)
 {
if (index < table->size && table->table[index].used)
return table->table[index].l3cc_value;
-
-   return 

[Intel-gfx] [PATCH V2 2/5] drm/i915/gt: Use cmd_cctl override for platforms >= gen12

2021-08-15 Thread Ayaz A Siddiqui
From: Srinivasan Shanmugam 

Program CMD_CCTL to use a mocs entry for uncached access.
This controls memory accesses by CS as it reads instructions
from the ring and batch buffers.

v2: Added CMD_CCTL in guc_mmio_regset_init(), so that this
register can restored after engine reset.

Signed-off-by: Srinivasan Shanmugam 
Signed-off-by: Ayaz A Siddiqui 
Cc: Chris Wilson 
Cc: Matt Roper 
---
 drivers/gpu/drm/i915/gt/intel_mocs.c   | 96 ++
 drivers/gpu/drm/i915/gt/selftest_mocs.c| 49 +++
 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c |  1 +
 drivers/gpu/drm/i915/i915_reg.h| 16 
 4 files changed, 162 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c 
b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 10cc508c1a4f6..92141cf6f9a79 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -25,6 +25,15 @@ struct drm_i915_mocs_table {
u8 uc_index;
 };
 
+struct drm_i915_aux_table {
+   const char *name;
+   i915_reg_t offset;
+   u32 value;
+   u32 readmask;
+   bool skip_check;
+   struct drm_i915_aux_table *next;
+};
+
 /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
 #define _LE_CACHEABILITY(value)((value) << 0)
 #define _LE_TGT_CACHE(value)   ((value) << 2)
@@ -336,6 +345,86 @@ static bool has_mocs(const struct drm_i915_private *i915)
return !IS_DGFX(i915);
 }
 
+static struct drm_i915_aux_table *
+add_aux_reg(struct drm_i915_aux_table *aux,
+   const char *name,
+   i915_reg_t offset,
+   u32 value,
+   u32 read,
+   bool skip_check)
+
+{
+   struct drm_i915_aux_table *x;
+
+   x = kmalloc(sizeof(*x), GFP_ATOMIC);
+   if (!x) {
+   DRM_ERROR("Failed to allocate aux reg '%s'\n", name);
+   return aux;
+   }
+
+   x->name = name;
+   x->offset = offset;
+   x->value = value;
+   x->readmask = read;
+   x->skip_check = skip_check;
+
+   x->next = aux;
+   return x;
+}
+
+static struct drm_i915_aux_table *
+add_cmd_cctl_override(struct drm_i915_aux_table *aux, u8 idx)
+{
+   return add_aux_reg(aux,
+  "CMD_CCTL",
+  RING_CMD_CCTL(0),
+  CMD_CCTL_MOCS_OVERRIDE(idx, idx),
+  CMD_CCTL_WRITE_OVERRIDE_MASK | 
CMD_CCTL_READ_OVERRIDE_MASK,
+  false);
+}
+
+static const struct drm_i915_aux_table *
+build_aux_regs(const struct intel_engine_cs *engine,
+  const struct drm_i915_mocs_table *mocs)
+{
+   struct drm_i915_aux_table *aux = NULL;
+
+   if (GRAPHICS_VER(engine->i915) >= 12 &&
+   !drm_WARN_ONCE(>i915->drm, !mocs->uc_index,
+   "Platform that should have UC index defined and does not\n")) {
+   /*
+* Index-0 does not operate as an uncached value as believed,
+* but causes invalid write cycles. Steer CMD_CCTL to another
+* uncached index.
+*/
+   aux = add_cmd_cctl_override(aux, mocs->uc_index);
+   }
+
+   return aux;
+}
+
+static void
+free_aux_regs(const struct drm_i915_aux_table *aux)
+{
+   while (aux) {
+   struct drm_i915_aux_table *next = aux->next;
+
+   kfree(aux);
+   aux = next;
+   }
+}
+
+static void apply_aux_regs(struct intel_engine_cs *engine,
+  const struct drm_i915_aux_table *aux)
+{
+   while (aux) {
+   intel_uncore_write_fw(engine->uncore,
+ _MMIO(engine->mmio_base + 
i915_mmio_reg_offset(aux->offset)),
+ aux->value);
+   aux = aux->next;
+   }
+}
+
 static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
  struct drm_i915_mocs_table *table)
 {
@@ -347,10 +436,12 @@ static unsigned int get_mocs_settings(const struct 
drm_i915_private *i915,
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+   table->uc_index = 1;
} else if (GRAPHICS_VER(i915) >= 12) {
table->size  = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+   table->uc_index = 3;
} else if (GRAPHICS_VER(i915) == 11) {
table->size  = ARRAY_SIZE(icl_mocs_table);
table->table = icl_mocs_table;
@@ -484,6 +575,7 @@ static void init_l3cc_table(struct intel_engine_cs *engine,
 
 void intel_mocs_init_engine(struct intel_engine_cs *engine)
 {
+   const struct drm_i915_aux_table *aux;
struct drm_i915_mocs_table table;
unsigned int flags;
 
@@ -500,6 +592,10 @@ void intel_mocs_init_engine(struct intel_engine_cs 

[Intel-gfx] [PATCH V2 5/5] drm/i95/adl: Define MOCS table for Alderlake

2021-08-15 Thread Ayaz A Siddiqui
In order to program unused and reserved mocs entries to L3_WB,
we need to create a separate mocs table for alderlake.

This patch will also covers wa_1608975824.

Cc: Chris P Wilson 
Cc: Lucas De Marchi 

Signed-off-by: Ayaz A Siddiqui 
---
 drivers/gpu/drm/i915/gt/intel_mocs.c | 41 +++-
 1 file changed, 40 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c 
b/drivers/gpu/drm/i915/gt/intel_mocs.c
index cf00537ba4acc..f76e2a2b3ea82 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -323,6 +323,39 @@ static const struct drm_i915_mocs_entry dg1_mocs_table[] = 
{
MOCS_ENTRY(63, 0, L3_1_UC),
 };
 
+static const struct drm_i915_mocs_entry adl_mocs_table[] = {
+   /* wa_1608975824 */
+   MOCS_ENTRY(0,
+  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+  L3_3_WB),
+
+   GEN11_MOCS_ENTRIES,
+   /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+   MOCS_ENTRY(48,
+  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+  L3_3_WB),
+   /* Implicitly enable L1 - HDC:L1 + L3 */
+   MOCS_ENTRY(49,
+  LE_1_UC | LE_TC_1_LLC,
+  L3_3_WB),
+   /* Implicitly enable L1 - HDC:L1 + LLC */
+   MOCS_ENTRY(50,
+  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+  L3_1_UC),
+   /* Implicitly enable L1 - HDC:L1 */
+   MOCS_ENTRY(51,
+  LE_1_UC | LE_TC_1_LLC,
+  L3_1_UC),
+   /* HW Special Case (CCS) */
+   MOCS_ENTRY(60,
+  LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+  L3_1_UC),
+   /* HW Special Case (Displayable) */
+   MOCS_ENTRY(61,
+  LE_1_UC | LE_TC_1_LLC,
+  L3_3_WB),
+};
+
 enum {
HAS_GLOBAL_MOCS = BIT(0),
HAS_ENGINE_MOCS = BIT(1),
@@ -444,7 +477,13 @@ static unsigned int get_mocs_settings(const struct 
drm_i915_private *i915,
 
memset(table, 0, sizeof(struct drm_i915_mocs_table));
 
-   if (IS_DG1(i915)) {
+   if (IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
+   table->size = ARRAY_SIZE(adl_mocs_table);
+   table->table = adl_mocs_table;
+   table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+   table->uc_index = 3;
+   table->unused_entries_index = 2;
+   } else if (IS_DG1(i915)) {
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
-- 
2.26.2



[Intel-gfx] [PATCH V2 3/5] drm/i915/gt: Set BLIT_CCTL reg to un-cached

2021-08-15 Thread Ayaz A Siddiqui
From: Apoorva Singh 

Blitter commands which does not have MOCS fields rely on
cacheability of BlitterCacheControlRegister which was mapped
to index 0 by default.Once we changed the MOCS value of
index 0 to L3 WB, tests like gem_linear_blits started failing
due to change in cacheability from UC to WB.

Program and place the BlitterCacheControlRegister in
build_aux_regs().

Signed-off-by: Apoorva Singh 
Signed-off-by: Ayaz A Siddiqui 
---
 drivers/gpu/drm/i915/gt/intel_mocs.c | 13 +
 drivers/gpu/drm/i915/i915_reg.h  |  7 +++
 2 files changed, 20 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c 
b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 92141cf6f9a79..df3c5d550c46a 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -372,6 +372,17 @@ add_aux_reg(struct drm_i915_aux_table *aux,
return x;
 }
 
+static struct drm_i915_aux_table *
+add_blit_cctl_override(struct drm_i915_aux_table *aux, u8 idx)
+{
+   return add_aux_reg(aux,
+  "BLIT_CCTL",
+  BLIT_CCTL(0),
+  BLIT_CCTL_MOCS(idx, idx),
+  BLIT_CCTL_DST_MOCS_MASK | BLIT_CCTL_SRC_MOCS_MASK,
+  true);
+}
+
 static struct drm_i915_aux_table *
 add_cmd_cctl_override(struct drm_i915_aux_table *aux, u8 idx)
 {
@@ -398,6 +409,8 @@ build_aux_regs(const struct intel_engine_cs *engine,
 * uncached index.
 */
aux = add_cmd_cctl_override(aux, mocs->uc_index);
+   if (engine->class == COPY_ENGINE_CLASS && mocs->uc_index)
+   aux = add_blit_cctl_override(aux, mocs->uc_index);
}
 
return aux;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c8e2ca1b20796..de3cc9d66ffaa 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2567,6 +2567,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
  REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 
1) | \
  REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1))
 
+#define BLIT_CCTL(base)_MMIO((base) + 0x204)
+#define   BLIT_CCTL_DST_MOCS_MASK  REG_GENMASK(14, 8)
+#define   BLIT_CCTL_SRC_MOCS_MASK  REG_GENMASK(6, 0)
+#define   BLIT_CCTL_DST_MOCS_SHIFT 8
+#define   BLIT_CCTL_MOCS(dst, src) 
\
+   dst) << 1) << BLIT_CCTL_DST_MOCS_SHIFT) | ((src) << 1))
+
 #define RING_RESET_CTL(base)   _MMIO((base) + 0xd0)
 #define   RESET_CTL_CAT_ERROR REG_BIT(2)
 #define   RESET_CTL_READY_TO_RESET REG_BIT(1)
-- 
2.26.2



[Intel-gfx] [PATCH V2 1/5] drm/i915/gt: Add support of mocs propagation

2021-08-15 Thread Ayaz A Siddiqui
Now there are lots of Command and registers that require mocs index
programming.
So propagating mocs_index from mocs to gt so that it can be
used directly without having platform-specific checks.

Signed-off-by: Ayaz A Siddiqui 
Reviewed-by: CQ Tang
---
 drivers/gpu/drm/i915/gt/intel_gt_types.h |  4 
 drivers/gpu/drm/i915/gt/intel_mocs.c | 10 ++
 2 files changed, 14 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h 
b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index a81e21bf1bd1a..88601a2d2c229 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -192,6 +192,10 @@ struct intel_gt {
 
unsigned long mslice_mask;
} info;
+
+   struct i915_mocs_index_gt {
+   u8 uc_index;
+   } mocs;
 };
 
 enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c 
b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 582c4423b95d6..10cc508c1a4f6 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -22,6 +22,7 @@ struct drm_i915_mocs_table {
unsigned int size;
unsigned int n_entries;
const struct drm_i915_mocs_entry *table;
+   u8 uc_index;
 };
 
 /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
@@ -340,6 +341,8 @@ static unsigned int get_mocs_settings(const struct 
drm_i915_private *i915,
 {
unsigned int flags;
 
+   memset(table, 0, sizeof(struct drm_i915_mocs_table));
+
if (IS_DG1(i915)) {
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
@@ -504,6 +507,12 @@ static u32 global_mocs_offset(void)
return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
 }
 
+static void set_mocs_index(struct intel_gt *gt,
+  struct drm_i915_mocs_table *table)
+{
+   gt->mocs.uc_index = table->uc_index;
+}
+
 void intel_mocs_init(struct intel_gt *gt)
 {
struct drm_i915_mocs_table table;
@@ -515,6 +524,7 @@ void intel_mocs_init(struct intel_gt *gt)
flags = get_mocs_settings(gt->i915, );
if (flags & HAS_GLOBAL_MOCS)
__init_mocs_table(gt->uncore, , global_mocs_offset());
+   set_mocs_index(gt, );
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-- 
2.26.2



Re: [Intel-gfx] i915 timeouts delaying boot under GVT

2021-08-15 Thread Zhenyu Wang
On 2021.08.13 08:31:28 +0200, Christoph Hellwig wrote:
> Hi all,
> 
> when botting a current 4.14-rc tree in a VM using GVT-g (with the host
> also running a current 4.14-rc tree), I see bunch of long timeouts
> followed by i915 errors:
> 
> [4.252066] i915 :00:03.0: [drm] VGT balloon successfully
> [5.095190] i915 :00:03.0: [drm] *ERROR* Failed to disable SAGV (-110)
> [   15.334559] [drm:drm_atomic_helper_wait_for_flip_done] *ERROR* 
> [CRTC:51:pipe
> A] flip_done timed out
> [   15.346934] [drm] Initialized i915 1.6.0 20201103 for :00:03.0 on minor
> 0
> 
> I did a hackjob to track them down and just if out the offending code,
> which speeds up the boot by ~11 seconds but is probably dangerous as hell:

Hi, Christoph, what platform is this? And what's your guest i915 config?

I'll try to reproduce on our side. Thanks for reporting this.

> 
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c 
> b/drivers/gpu/drm/i915/display/intel_display.c
> index 2d5d21740c25..ee82fd67f386 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -10696,7 +10696,7 @@ static void intel_atomic_commit_tail(struct 
> intel_atomic_state *state)
>* - switch over to the vblank wait helper in the core after that since
>*   we don't need out special handling any more.
>*/
> - drm_atomic_helper_wait_for_flip_done(dev, >base);
> +//   drm_atomic_helper_wait_for_flip_done(dev, >base);
>  
>   for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
>   if (new_crtc_state->uapi.async_flip)
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 45fefa0ed160..f03ce729cc4b 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -3753,7 +3753,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
>   if (!intel_has_sagv(dev_priv))
>   return 0;
>  
> - if (dev_priv->sagv_status == I915_SAGV_DISABLED)
> + if (1 || dev_priv->sagv_status == I915_SAGV_DISABLED)
>   return 0;
>  
>   drm_dbg_kms(_priv->drm, "Disabling SAGV\n");


signature.asc
Description: PGP signature


Re: [Intel-gfx] ✗ Fi.CI.BAT: failure for Clean up GuC CI failures, simplify locking, and kernel DOC

2021-08-15 Thread Matthew Brost
On Sun, Aug 15, 2021 at 09:15:31PM +, Patchwork wrote:
> Patch Details
> 
> Series:  Clean up GuC CI failures, simplify locking, and kernel DOC
> URL: https://patchwork.freedesktop.org/series/93704/
> State:   failure
> Details: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/index.html
> 
> CI Bug Log - changes from CI_DRM_10484 -> Patchwork_20826
> 
> Summary
> 
> FAILURE
> 
> Serious unknown changes coming with Patchwork_20826 absolutely need to be
> verified manually.
> 
> If you think the reported changes have nothing to do with the changes
> introduced in Patchwork_20826, please notify your bug team to allow them
> to document this new failure mode, which will reduce false positives in CI.
> 
> External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/
> index.html
> 
> Possible new issues
> 
> Here are the unknown changes that may have been introduced in Patchwork_20826:
> 
> IGT changes
> 
> Possible regressions
> 
>   • igt@i915_selftest@live@requests:
> 
>   □ fi-cfl-guc: PASS -> DMESG-FAIL
> 

New selftext, __cancel_reset, is exposing a bug (or at minimum different
behavior) in the execlists implementation for canceling requests. Will
fix in new rev.

Matt

>   □ fi-kbl-soraka: PASS -> DMESG-FAIL
> 
>   □ fi-bxt-dsi: PASS -> DMESG-FAIL
> 
>   □ fi-tgl-1115g4: PASS -> DMESG-FAIL
> 
>   □ fi-cml-u2: PASS -> DMESG-FAIL
> 
>   □ fi-kbl-8809g: PASS -> DMESG-FAIL
> 
>   □ fi-cfl-8700k: PASS -> DMESG-FAIL
> 
>   □ fi-cfl-8109u: PASS -> DMESG-FAIL
> 
>   □ fi-icl-u2: PASS -> DMESG-FAIL
> 
>   □ fi-kbl-7500u: PASS -> DMESG-FAIL
> 
>   □ fi-bsw-nick: PASS -> DMESG-FAIL
> 
>   □ fi-icl-y: PASS -> DMESG-FAIL
> 
>   □ fi-kbl-guc: PASS -> DMESG-FAIL
> 
>   □ fi-kbl-7567u: PASS -> DMESG-FAIL
> 
>   □ fi-skl-guc: PASS -> DMESG-FAIL
> 
>   □ fi-bdw-5557u: PASS -> DMESG-FAIL
> 
>   □ fi-glk-dsi: PASS -> DMESG-FAIL
> 
>   □ fi-bsw-kefka: PASS -> DMESG-FAIL
> 
>   □ fi-skl-6700k2: PASS -> DMESG-FAIL
> 
> Warnings
> 
>   • igt@i915_selftest@live@workarounds:
>   □ fi-rkl-guc: DMESG-FAIL (i915#3928) -> INCOMPLETE
> 
> Suppressed
> 
> The following results come from untrusted machines, tests, or statuses.
> They do not affect the overall result.
> 
>   • igt@i915_selftest@live@requests:
> 
>   □ {fi-tgl-dsi}: PASS -> DMESG-FAIL
> 
>   □ {fi-jsl-1}: PASS -> DMESG-FAIL
> 
>   □ {fi-ehl-2}: PASS -> DMESG-FAIL
> 
> New tests
> 
> New tests have been introduced between CI_DRM_10484 and Patchwork_20826:
> 
> New IGT tests (1)
> 
>   • igt@i915_selftest@live@guc:
>   □ Statuses : 30 pass(s)
>   □ Exec time: [0.42, 5.06] s
> 
> Known issues
> 
> Here are the changes found in Patchwork_20826 that come from known issues:
> 
> IGT changes
> 
> Issues hit
> 
>   • igt@i915_module_load@reload:
> 
>   □ fi-kbl-soraka: PASS -> DMESG-WARN (i915#1982)
>   • igt@i915_selftest@live@execlists:
> 
>   □ fi-icl-y: PASS -> DMESG-FAIL (i915#1993)
> 
> Possible fixes
> 
>   • igt@i915_module_load@reload:
>   □ {fi-tgl-dsi}: DMESG-WARN (i915#1982 / k.org#205379) -> PASS
> 
> {name}: This element is suppressed. This means it is ignored when computing
> the status of the difference (SUCCESS, WARNING, or FAILURE).
> 
> Participating hosts (37 -> 34)
> 
> Missing (3): fi-bdw-samus fi-bsw-cyan bat-jsl-1
> 
> Build changes
> 
>   • Linux: CI_DRM_10484 -> Patchwork_20826
> 
> CI-20190529: 20190529
> CI_DRM_10484: 7de02d5cb1f35bd3f068237444063844dea47ddc @ git://
> anongit.freedesktop.org/gfx-ci/linux
> IGT_6175: c91f99c74b966f635d7e2eb898bf0f78383d281b @ https://
> gitlab.freedesktop.org/drm/igt-gpu-tools.git
> Patchwork_20826: f7ff315bfe3a76713c1f0a16cd92b0908d28e4c6 @ git://
> anongit.freedesktop.org/gfx-ci/linux
> 
> == Linux commits ==
> 
> f7ff315bfe3a drm/i915/guc: Add GuC kernel doc
> af14e3698d19 drm/i915/guc: Move GuC priority fields in context under 
> guc_active
> eb8a352e7c1f drm/i915/guc: Drop pin count check trick between sched_disable 
> and
> re-pin
> 7057a0daff8c drm/i915/guc: Proper xarray usage for contexts_lookup
> d97ab34c8bac drm/i915/guc: Rework and simplify locking
> 4c980575f7af drm/i915/guc: Move guc_blocked fence to struct guc_state
> 1c2b4c0ac62a drm/i915/guc: Release submit fence from an IRQ
> 14dc302536e1 drm/i915/guc: Flush G2H work queue during reset
> c0ad63d810e6 drm/i915: Allocate error capture in atomic context
> 6c1c488a3654 drm/i915/guc: Reset LRC descriptor if register returns -ENODEV
> 025e88fa74d3 drm/i915/guc: Don't touch guc_state.sched_state without a lock
> b929abcf3b59 drm/i915/guc: Take context ref when cancelling request
> b5e8c08dff35 drm/i915/selftests: Add initial GuC selftest for scrubbing lost
> G2H
> cddf94c9bda0 drm/i915/selftests: Fix memory corruption in live_lrc_isolation
> c18da32e671c drm/i915/guc: Don't enable scheduling on a banned context, guc_id
> invalid, not registered
> 0c0928ba1ba8 drm/i915/selftests: Add a cancel request selftest that 

[Intel-gfx] ✗ Fi.CI.BAT: failure for Clean up GuC CI failures, simplify locking, and kernel DOC

2021-08-15 Thread Patchwork
== Series Details ==

Series: Clean up GuC CI failures, simplify locking, and kernel DOC
URL   : https://patchwork.freedesktop.org/series/93704/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_10484 -> Patchwork_20826


Summary
---

  **FAILURE**

  Serious unknown changes coming with Patchwork_20826 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_20826, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/index.html

Possible new issues
---

  Here are the unknown changes that may have been introduced in Patchwork_20826:

### IGT changes ###

 Possible regressions 

  * igt@i915_selftest@live@requests:
- fi-cfl-guc: [PASS][1] -> [DMESG-FAIL][2]
   [1]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-cfl-guc/igt@i915_selftest@l...@requests.html
   [2]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-cfl-guc/igt@i915_selftest@l...@requests.html
- fi-kbl-soraka:  [PASS][3] -> [DMESG-FAIL][4]
   [3]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-kbl-soraka/igt@i915_selftest@l...@requests.html
   [4]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-kbl-soraka/igt@i915_selftest@l...@requests.html
- fi-bxt-dsi: [PASS][5] -> [DMESG-FAIL][6]
   [5]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-bxt-dsi/igt@i915_selftest@l...@requests.html
   [6]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-bxt-dsi/igt@i915_selftest@l...@requests.html
- fi-tgl-1115g4:  [PASS][7] -> [DMESG-FAIL][8]
   [7]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-tgl-1115g4/igt@i915_selftest@l...@requests.html
   [8]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-tgl-1115g4/igt@i915_selftest@l...@requests.html
- fi-cml-u2:  [PASS][9] -> [DMESG-FAIL][10]
   [9]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-cml-u2/igt@i915_selftest@l...@requests.html
   [10]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-cml-u2/igt@i915_selftest@l...@requests.html
- fi-kbl-8809g:   [PASS][11] -> [DMESG-FAIL][12]
   [11]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-kbl-8809g/igt@i915_selftest@l...@requests.html
   [12]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-kbl-8809g/igt@i915_selftest@l...@requests.html
- fi-cfl-8700k:   [PASS][13] -> [DMESG-FAIL][14]
   [13]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-cfl-8700k/igt@i915_selftest@l...@requests.html
   [14]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-cfl-8700k/igt@i915_selftest@l...@requests.html
- fi-cfl-8109u:   [PASS][15] -> [DMESG-FAIL][16]
   [15]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-cfl-8109u/igt@i915_selftest@l...@requests.html
   [16]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-cfl-8109u/igt@i915_selftest@l...@requests.html
- fi-icl-u2:  [PASS][17] -> [DMESG-FAIL][18]
   [17]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-icl-u2/igt@i915_selftest@l...@requests.html
   [18]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-icl-u2/igt@i915_selftest@l...@requests.html
- fi-kbl-7500u:   [PASS][19] -> [DMESG-FAIL][20]
   [19]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-kbl-7500u/igt@i915_selftest@l...@requests.html
   [20]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-kbl-7500u/igt@i915_selftest@l...@requests.html
- fi-bsw-nick:[PASS][21] -> [DMESG-FAIL][22]
   [21]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-bsw-nick/igt@i915_selftest@l...@requests.html
   [22]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-bsw-nick/igt@i915_selftest@l...@requests.html
- fi-icl-y:   [PASS][23] -> [DMESG-FAIL][24]
   [23]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-icl-y/igt@i915_selftest@l...@requests.html
   [24]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-icl-y/igt@i915_selftest@l...@requests.html
- fi-kbl-guc: [PASS][25] -> [DMESG-FAIL][26]
   [25]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-kbl-guc/igt@i915_selftest@l...@requests.html
   [26]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-kbl-guc/igt@i915_selftest@l...@requests.html
- fi-kbl-7567u:   [PASS][27] -> [DMESG-FAIL][28]
   [27]: 
https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10484/fi-kbl-7567u/igt@i915_selftest@l...@requests.html
   [28]: 
https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_20826/fi-kbl-7567u/igt@i915_selftest@l...@requests.html
- fi-skl-guc: [PASS][29] -> [DMESG-FAIL][30]
   [29]: 

[Intel-gfx] ✗ Fi.CI.SPARSE: warning for Clean up GuC CI failures, simplify locking, and kernel DOC

2021-08-15 Thread Patchwork
== Series Details ==

Series: Clean up GuC CI failures, simplify locking, and kernel DOC
URL   : https://patchwork.freedesktop.org/series/93704/
State : warning

== Summary ==

$ dim sparse --fast origin/drm-tip
Sparse version: v0.6.2
Fast mode used, each commit won't be checked separately.
+drivers/gpu/drm/i915/selftests/i915_syncmap.c:80:54: warning: dubious: x | !y




[Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Clean up GuC CI failures, simplify locking, and kernel DOC

2021-08-15 Thread Patchwork
== Series Details ==

Series: Clean up GuC CI failures, simplify locking, and kernel DOC
URL   : https://patchwork.freedesktop.org/series/93704/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
c9f3859e5dce drm/i915/guc: Fix blocked context accounting
7fe738f56abc drm/i915/guc: outstanding G2H accounting
1d414637d838 drm/i915/guc: Unwind context requests in reverse order
2739b8f8966f drm/i915/guc: Don't drop ce->guc_active.lock when unwinding context
8ec967ce47da drm/i915/guc: Workaround reset G2H is received after schedule done 
G2H
-:7: WARNING:TYPO_SPELLING: 'cancelation' may be misspelled - perhaps 
'cancellation'?
#7: 
If the context is reset as a result of the request cancelation the
   ^^^

-:10: WARNING:TYPO_SPELLING: 'cancelation' may be misspelled - perhaps 
'cancellation'?
#10: 
waiting request cancelation code which resubmits the context. This races
^^^

-:12: WARNING:TYPO_SPELLING: 'cancelation' may be misspelled - perhaps 
'cancellation'?
#12: 
in this case it really should be a NOP as request cancelation code owns
  ^^^

-:43: WARNING:TYPO_SPELLING: 'cancelation' may be misspelled - perhaps 
'cancellation'?
#43: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:840:
+* XXX: If the context is reset as a result of the request cancelation
   ^^^

-:45: WARNING:TYPO_SPELLING: 'cancelation' may be misspelled - perhaps 
'cancellation'?
#45: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:842:
+* likely wrong as this creates a race between the request cancelation
   ^^^

-:49: WARNING:TYPO_SPELLING: 'cancelation' may be misspelled - perhaps 
'cancellation'?
#49: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:846:
+* enable is in flight as this indicates that a request cancelation has
^^^

-:50: WARNING:TYPO_SPELLING: 'occured' may be misspelled - perhaps 'occurred'?
#50: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:847:
+* occured.
   ^^^

-:92: WARNING:TYPO_SPELLING: 'cancelation' may be misspelled - perhaps 
'cancellation'?
#92: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:2748:
+* XXX: Racey if request cancelation has occurred, see comment in
 ^^^

total: 0 errors, 8 warnings, 0 checks, 72 lines checked
0c0928ba1ba8 drm/i915/selftests: Add a cancel request selftest that triggers a 
reset
c18da32e671c drm/i915/guc: Don't enable scheduling on a banned context, guc_id 
invalid, not registered
cddf94c9bda0 drm/i915/selftests: Fix memory corruption in live_lrc_isolation
b5e8c08dff35 drm/i915/selftests: Add initial GuC selftest for scrubbing lost G2H
-:101: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does 
MAINTAINERS need updating?
#101: 
new file mode 100644

-:161: ERROR:SPACING: space required before the open parenthesis '('
#161: FILE: drivers/gpu/drm/i915/gt/uc/selftest_guc.c:56:
+   switch(i) {

total: 1 errors, 1 warnings, 0 checks, 232 lines checked
b929abcf3b59 drm/i915/guc: Take context ref when cancelling request
025e88fa74d3 drm/i915/guc: Don't touch guc_state.sched_state without a lock
6c1c488a3654 drm/i915/guc: Reset LRC descriptor if register returns -ENODEV
c0ad63d810e6 drm/i915: Allocate error capture in atomic context
14dc302536e1 drm/i915/guc: Flush G2H work queue during reset
1c2b4c0ac62a drm/i915/guc: Release submit fence from an IRQ
4c980575f7af drm/i915/guc: Move guc_blocked fence to struct guc_state
d97ab34c8bac drm/i915/guc: Rework and simplify locking
7057a0daff8c drm/i915/guc: Proper xarray usage for contexts_lookup
-:27: WARNING:LINE_SPACING: Missing a blank line after declarations
#27: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:610:
+   bool do_put = kref_get_unless_zero(>ref);
+   xa_unlock(>context_lookup);

total: 0 errors, 1 warnings, 0 checks, 187 lines checked
eb8a352e7c1f drm/i915/guc: Drop pin count check trick between sched_disable and 
re-pin
af14e3698d19 drm/i915/guc: Move GuC priority fields in context under guc_active
f7ff315bfe3a drm/i915/guc: Add GuC kernel doc




[Intel-gfx] [PATCH 03/21] drm/i915/guc: Unwind context requests in reverse order

2021-08-15 Thread Matthew Brost
When unwinding requests on a reset context, if other requests in the
context are in the priority list the requests could be resubmitted out
of seqno order. Traverse the list of active requests in reverse and
append to the head of the priority list to fix this.

Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface")
Signed-off-by: Matthew Brost 
Cc: 
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index b5d3972ae164..bc51caba50d0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -799,9 +799,9 @@ __unwind_incomplete_requests(struct intel_context *ce)
 
spin_lock_irqsave(_engine->lock, flags);
spin_lock(>guc_active.lock);
-   list_for_each_entry_safe(rq, rn,
->guc_active.requests,
-sched.link) {
+   list_for_each_entry_safe_reverse(rq, rn,
+>guc_active.requests,
+sched.link) {
if (i915_request_completed(rq))
continue;
 
@@ -818,7 +818,7 @@ __unwind_incomplete_requests(struct intel_context *ce)
}
GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
 
-   list_add_tail(>sched.link, pl);
+   list_add(>sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 
spin_lock(>guc_active.lock);
-- 
2.32.0



[Intel-gfx] [PATCH 09/21] drm/i915/selftests: Add initial GuC selftest for scrubbing lost G2H

2021-08-15 Thread Matthew Brost
While debugging an issue with full GT resets I went down a rabbit hole
thinking the scrubbing of lost G2H wasn't working correctly. This proved
to be incorrect as this was working just fine but this chase inspired me
to write a selftest to prove that this works. This simple selftest
injects errors dropping various G2H and then issues a full GT reset
proving that the scrubbing of these G2H doesn't blow up.

v2:
 (Daniel Vetter)
  - Use ifdef instead of macros for selftests

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |  18 +++
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  25 
 drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 126 ++
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 .../i915/selftests/intel_scheduler_helpers.c  |  12 ++
 .../i915/selftests/intel_scheduler_helpers.h  |   2 +
 6 files changed, 184 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gt/uc/selftest_guc.c

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h 
b/drivers/gpu/drm/i915/gt/intel_context_types.h
index e54351a170e2..3a73f3117873 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -198,6 +198,24 @@ struct intel_context {
 */
u8 guc_prio;
u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+   /**
+* @drop_schedule_enable: Force drop of schedule enable G2H for selftest
+*/
+   bool drop_schedule_enable;
+
+   /**
+* @drop_schedule_disable: Force drop of schedule disable G2H for
+* selftest
+*/
+   bool drop_schedule_disable;
+
+   /**
+* @drop_deregister: Force drop of deregister G2H for selftest
+*/
+   bool drop_deregister;
+#endif
 };
 
 #endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index be50cb226e33..76c32fc24e0a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2633,6 +2633,13 @@ int intel_guc_deregister_done_process_msg(struct 
intel_guc *guc,
 
trace_intel_context_deregister_done(ce);
 
+#ifdef CONFIG_DRM_I915_SELFTEST
+   if (unlikely(ce->drop_deregister)) {
+   ce->drop_deregister = false;
+   return 0;
+   }
+#endif
+
if (context_wait_for_deregister_to_register(ce)) {
struct intel_runtime_pm *runtime_pm =
>engine->gt->i915->runtime_pm;
@@ -2687,10 +2694,24 @@ int intel_guc_sched_done_process_msg(struct intel_guc 
*guc,
trace_intel_context_sched_done(ce);
 
if (context_pending_enable(ce)) {
+#ifdef CONFIG_DRM_I915_SELFTEST
+   if (unlikely(ce->drop_schedule_enable)) {
+   ce->drop_schedule_enable = false;
+   return 0;
+   }
+#endif
+
clr_context_pending_enable(ce);
} else if (context_pending_disable(ce)) {
bool banned;
 
+#ifdef CONFIG_DRM_I915_SELFTEST
+   if (unlikely(ce->drop_schedule_disable)) {
+   ce->drop_schedule_disable = false;
+   return 0;
+   }
+#endif
+
/*
 * Unpin must be done before __guc_signal_context_fence,
 * otherwise a race exists between the requests getting
@@ -3067,3 +3088,7 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct 
intel_engine_cs *ve)
 
return false;
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_guc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c 
b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
new file mode 100644
index ..46ca6554f65d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright �� 2021 Intel Corporation
+ */
+
+#include "selftests/intel_scheduler_helpers.h"
+
+static struct i915_request *nop_user_request(struct intel_context *ce,
+struct i915_request *from)
+{
+   struct i915_request *rq;
+   int ret;
+
+   rq = intel_context_create_request(ce);
+   if (IS_ERR(rq))
+   return rq;
+
+   if (from) {
+   ret = i915_sw_fence_await_dma_fence(>submit,
+   >fence, 0,
+   I915_FENCE_GFP);
+   if (ret < 0) {
+   i915_request_put(rq);
+   return ERR_PTR(ret);
+   }
+   }
+
+   i915_request_get(rq);
+   i915_request_add(rq);
+
+   return rq;
+}
+
+static int intel_guc_scrub_ctbs(void *arg)
+{
+   struct intel_gt *gt = arg;
+   int ret = 0;
+   int i;
+   struct i915_request *last[3] = {NULL, NULL, NULL}, 

[Intel-gfx] [PATCH 07/21] drm/i915/guc: Don't enable scheduling on a banned context, guc_id invalid, not registered

2021-08-15 Thread Matthew Brost
When unblocking a context, do not enable scheduling if the context is
banned, guc_id invalid, or not registered.

Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation")
Signed-off-by: Matthew Brost 
Cc: 
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 1e6b876bab34..be50cb226e33 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1578,6 +1578,9 @@ static void guc_context_unblock(struct intel_context *ce)
spin_lock_irqsave(>guc_state.lock, flags);
 
if (unlikely(submission_disabled(guc) ||
+intel_context_is_banned(ce) ||
+context_guc_id_invalid(ce) ||
+!lrc_desc_registered(guc, ce->guc_id) ||
 !intel_context_is_pinned(ce) ||
 context_pending_disable(ce) ||
 context_blocked(ce) > 1)) {
-- 
2.32.0



[Intel-gfx] [PATCH 18/21] drm/i915/guc: Proper xarray usage for contexts_lookup

2021-08-15 Thread Matthew Brost
Lock the xarray and take ref to the context if needed.

Signed-off-by: Matthew Brost 
---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 83 ---
 1 file changed, 72 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index d3f1d8ca0396..4120d1a3c065 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -599,8 +599,17 @@ static void scrub_guc_desc_for_outstanding_g2h(struct 
intel_guc *guc)
unsigned long index, flags;
bool pending_disable, pending_enable, deregister, destroyed, banned;
 
+   xa_lock_irqsave(>context_lookup, flags);
xa_for_each(>context_lookup, index, ce) {
-   spin_lock_irqsave(>guc_state.lock, flags);
+   /*
+* Corner case where the ref count on the object is zero but and
+* deregister G2H was lost. In this case we don't touch the ref
+* count and finish the destroy of the context.
+*/
+   bool do_put = kref_get_unless_zero(>ref);
+   xa_unlock(>context_lookup);
+
+   spin_lock(>guc_state.lock);
 
/*
 * Once we are at this point submission_disabled() is guaranteed
@@ -616,7 +625,9 @@ static void scrub_guc_desc_for_outstanding_g2h(struct 
intel_guc *guc)
banned = context_banned(ce);
init_sched_state(ce);
 
-   spin_unlock_irqrestore(>guc_state.lock, flags);
+   spin_unlock(>guc_state.lock);
+
+   GEM_BUG_ON(!do_put && !destroyed);
 
if (pending_enable || destroyed || deregister) {
atomic_dec(>outstanding_submission_g2h);
@@ -645,7 +656,12 @@ static void scrub_guc_desc_for_outstanding_g2h(struct 
intel_guc *guc)
 
intel_context_put(ce);
}
+
+   if (do_put)
+   intel_context_put(ce);
+   xa_lock(>context_lookup);
}
+   xa_unlock_irqrestore(>context_lookup, flags);
 }
 
 static inline bool
@@ -865,16 +881,26 @@ void intel_guc_submission_reset(struct intel_guc *guc, 
bool stalled)
 {
struct intel_context *ce;
unsigned long index;
+   unsigned long flags;
 
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
return;
}
 
-   xa_for_each(>context_lookup, index, ce)
+   xa_lock_irqsave(>context_lookup, flags);
+   xa_for_each(>context_lookup, index, ce) {
+   intel_context_get(ce);
+   xa_unlock(>context_lookup);
+
if (intel_context_is_pinned(ce))
__guc_reset_context(ce, stalled);
 
+   intel_context_put(ce);
+   xa_lock(>context_lookup);
+   }
+   xa_unlock_irqrestore(>context_lookup, flags);
+
/* GuC is blown away, drop all references to contexts */
xa_destroy(>context_lookup);
 }
@@ -949,11 +975,21 @@ void intel_guc_submission_cancel_requests(struct 
intel_guc *guc)
 {
struct intel_context *ce;
unsigned long index;
+   unsigned long flags;
+
+   xa_lock_irqsave(>context_lookup, flags);
+   xa_for_each(>context_lookup, index, ce) {
+   intel_context_get(ce);
+   xa_unlock(>context_lookup);
 
-   xa_for_each(>context_lookup, index, ce)
if (intel_context_is_pinned(ce))
guc_cancel_context_requests(ce);
 
+   intel_context_put(ce);
+   xa_lock(>context_lookup);
+   }
+   xa_unlock_irqrestore(>context_lookup, flags);
+
guc_cancel_sched_engine_requests(guc->sched_engine);
 
/* GuC is blown away, drop all references to contexts */
@@ -2847,21 +2883,26 @@ void intel_guc_find_hung_context(struct intel_engine_cs 
*engine)
struct intel_context *ce;
struct i915_request *rq;
unsigned long index;
+   unsigned long flags;
 
/* Reset called during driver load? GuC not yet initialised! */
if (unlikely(!guc_submission_initialized(guc)))
return;
 
+   xa_lock_irqsave(>context_lookup, flags);
xa_for_each(>context_lookup, index, ce) {
+   intel_context_get(ce);
+   xa_unlock(>context_lookup);
+
if (!intel_context_is_pinned(ce))
-   continue;
+   goto next;
 
if (intel_engine_is_virtual(ce->engine)) {
if (!(ce->engine->mask & engine->mask))
-   continue;
+   goto next;
} else {
if (ce->engine != engine)
-   continue;
+   goto next;

[Intel-gfx] [PATCH 19/21] drm/i915/guc: Drop pin count check trick between sched_disable and re-pin

2021-08-15 Thread Matthew Brost
Drop pin count check trick between a sched_disable and re-pin, now rely
on the lock and counter of the number of committed requests to determine
if scheduling should be disabled on the context.

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |  2 +
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 49 ---
 2 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h 
b/drivers/gpu/drm/i915/gt/intel_context_types.h
index d5d643b04d54..524a35a78bf4 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -169,6 +169,8 @@ struct intel_context {
struct list_head fences;
/* GuC context blocked fence */
struct i915_sw_fence blocked_fence;
+   /* GuC committed requests */
+   int number_committed_requests;
} guc_state;
 
struct {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 4120d1a3c065..e55c7c36ff56 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -247,6 +247,25 @@ static inline void decr_context_blocked(struct 
intel_context *ce)
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
 }
 
+static inline bool context_has_committed_requests(struct intel_context *ce)
+{
+   return !!ce->guc_state.number_committed_requests;
+}
+
+static inline void incr_context_committed_requests(struct intel_context *ce)
+{
+   lockdep_assert_held(>guc_state.lock);
+   ++ce->guc_state.number_committed_requests;
+   GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
+static inline void decr_context_committed_requests(struct intel_context *ce)
+{
+   lockdep_assert_held(>guc_state.lock);
+   --ce->guc_state.number_committed_requests;
+   GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
 static inline bool context_guc_id_invalid(struct intel_context *ce)
 {
return ce->guc_id == GUC_INVALID_LRC_ID;
@@ -1734,14 +1753,11 @@ static void guc_context_sched_disable(struct 
intel_context *ce)
spin_lock_irqsave(>guc_state.lock, flags);
 
/*
-* We have to check if the context has been disabled by another thread.
-* We also have to check if the context has been pinned again as another
-* pin operation is allowed to pass this function. Checking the pin
-* count, within ce->guc_state.lock, synchronizes this function with
-* guc_request_alloc ensuring a request doesn't slip through the
-* 'context_pending_disable' fence. Checking within the spin lock (can't
-* sleep) ensures another process doesn't pin this context and generate
-* a request before we set the 'context_pending_disable' flag here.
+* We have to check if the context has been disabled by another thread,
+* check if submssion has been disabled to seal a race with reset and
+* finally check if any more requests have been committed to the
+* context ensursing that a request doesn't slip through the
+* 'context_pending_disable' fence.
 */
enabled = context_enabled(ce);
if (unlikely(!enabled || submission_disabled(guc))) {
@@ -1750,7 +1766,8 @@ static void guc_context_sched_disable(struct 
intel_context *ce)
spin_unlock_irqrestore(>guc_state.lock, flags);
goto unpin;
}
-   if (unlikely(atomic_add_unless(>pin_count, -2, 2))) {
+   if (unlikely(context_has_committed_requests(ce))) {
+   intel_context_sched_disable_unpin(ce);
spin_unlock_irqrestore(>guc_state.lock, flags);
return;
}
@@ -1783,6 +1800,7 @@ static void __guc_context_destroy(struct intel_context 
*ce)
   ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
   ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
   ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+   GEM_BUG_ON(ce->guc_state.number_committed_requests);
 
lrc_fini(ce);
intel_context_fini(ce);
@@ -2013,6 +2031,10 @@ static void remove_from_context(struct i915_request *rq)
 
spin_unlock_irq(>guc_active.lock);
 
+   spin_lock_irq(>guc_state.lock);
+   decr_context_committed_requests(ce);
+   spin_unlock_irq(>guc_state.lock);
+
atomic_dec(>guc_id_ref);
i915_request_notify_execute_cb_imm(rq);
 }
@@ -2160,15 +2182,7 @@ static int guc_request_alloc(struct i915_request *rq)
 * schedule enable or context registration if either G2H is pending
 * respectfully. Once a G2H returns, the fence is released that is
 * blocking these requests (see guc_signal_context_fence).
-*
-* We can safely check the below fields outside of the lock as it isn't
-* 

[Intel-gfx] [PATCH 20/21] drm/i915/guc: Move GuC priority fields in context under guc_active

2021-08-15 Thread Matthew Brost
Move GuC management fields in context under guc_active struct as this is
where the lock that protects theses fields lives. Also only set guc_prio
field once during context init.

Fixes: ee242ca704d3 ("drm/i915/guc: Implement GuC priority management")
Signed-off-by: Matthew Brost 
Cc: 
---
 drivers/gpu/drm/i915/gt/intel_context_types.h | 12 ++--
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 68 +++
 drivers/gpu/drm/i915/i915_trace.h |  2 +-
 3 files changed, 45 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h 
b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 524a35a78bf4..f6989e6807f7 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -112,6 +112,7 @@ struct intel_context {
 #define CONTEXT_FORCE_SINGLE_SUBMISSION7
 #define CONTEXT_NOPREEMPT  8
 #define CONTEXT_LRCA_DIRTY 9
+#define CONTEXT_GUC_INIT   10
 
struct {
u64 timeout_us;
@@ -178,6 +179,11 @@ struct intel_context {
spinlock_t lock;
/** requests: active requests on this context */
struct list_head requests;
+   /*
+* GuC priority management
+*/
+   u8 guc_prio;
+   u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
} guc_active;
 
/* GuC LRC descriptor ID */
@@ -191,12 +197,6 @@ struct intel_context {
 */
struct list_head guc_id_link;
 
-   /*
-* GuC priority management
-*/
-   u8 guc_prio;
-   u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
-
 #ifdef CONFIG_DRM_I915_SELFTEST
/**
 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index e55c7c36ff56..be5d798065a8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1352,8 +1352,6 @@ static void guc_context_policy_init(struct 
intel_engine_cs *engine,
desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
 }
 
-static inline u8 map_i915_prio_to_guc_prio(int prio);
-
 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 {
struct intel_engine_cs *engine = ce->engine;
@@ -1361,8 +1359,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
struct intel_guc *guc = >gt->uc.guc;
u32 desc_idx = ce->guc_id;
struct guc_lrc_desc *desc;
-   const struct i915_gem_context *ctx;
-   int prio = I915_CONTEXT_DEFAULT_PRIORITY;
bool context_registered;
intel_wakeref_t wakeref;
int ret = 0;
@@ -1379,12 +1375,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
 
context_registered = lrc_desc_registered(guc, desc_idx);
 
-   rcu_read_lock();
-   ctx = rcu_dereference(ce->gem_context);
-   if (ctx)
-   prio = ctx->sched.priority;
-   rcu_read_unlock();
-
reset_lrc_desc(guc, desc_idx);
set_lrc_desc_registered(guc, desc_idx, ce);
 
@@ -1393,8 +1383,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
desc->engine_submit_mask = adjust_engine_mask(engine->class,
  engine->mask);
desc->hw_context_desc = ce->lrc.lrca;
-   ce->guc_prio = map_i915_prio_to_guc_prio(prio);
-   desc->priority = ce->guc_prio;
+   desc->priority = ce->guc_active.guc_prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init(engine, desc);
 
@@ -1796,10 +1785,10 @@ static inline void guc_lrc_desc_unpin(struct 
intel_context *ce)
 
 static void __guc_context_destroy(struct intel_context *ce)
 {
-   GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
-  ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
-  ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
-  ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+   GEM_BUG_ON(ce->guc_active.guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] 
||
+  ce->guc_active.guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
+  
ce->guc_active.guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
+  ce->guc_active.guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
GEM_BUG_ON(ce->guc_state.number_committed_requests);
 
lrc_fini(ce);
@@ -1909,14 +1898,17 @@ static void guc_context_set_prio(struct intel_guc *guc,
 
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
   prio > GUC_CLIENT_PRIORITY_NORMAL);
+   lockdep_assert_held(>guc_active.lock);
 
-   if (ce->guc_prio == prio || submission_disabled(guc) ||
-   !context_registered(ce))
+   if (ce->guc_active.guc_prio == 

[Intel-gfx] [PATCH 17/21] drm/i915/guc: Rework and simplify locking

2021-08-15 Thread Matthew Brost
Rework and simplify the locking with GuC subission. Drop
sched_state_no_lock and move all fields under the guc_state.sched_state
and protect all these fields with guc_state.lock . This requires
changing the locking hierarchy from guc_state.lock -> sched_engine.lock
to sched_engine.lock -> guc_state.lock.

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |   5 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 186 --
 drivers/gpu/drm/i915/i915_trace.h |   6 +-
 3 files changed, 89 insertions(+), 108 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h 
b/drivers/gpu/drm/i915/gt/intel_context_types.h
index c06171ee8792..d5d643b04d54 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -161,7 +161,7 @@ struct intel_context {
 * sched_state: scheduling state of this context using GuC
 * submission
 */
-   u16 sched_state;
+   u32 sched_state;
/*
 * fences: maintains of list of requests that have a submit
 * fence related to GuC submission
@@ -178,9 +178,6 @@ struct intel_context {
struct list_head requests;
} guc_active;
 
-   /* GuC scheduling state flags that do not require a lock. */
-   atomic_t guc_sched_state_no_lock;
-
/* GuC LRC descriptor ID */
u16 guc_id;
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 64b12b91656a..d3f1d8ca0396 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -72,86 +72,23 @@ guc_create_virtual(struct intel_engine_cs **siblings, 
unsigned int count);
 
 #define GUC_REQUEST_SIZE 64 /* bytes */
 
-/*
- * Below is a set of functions which control the GuC scheduling state which do
- * not require a lock as all state transitions are mutually exclusive. i.e. It
- * is not possible for the context pinning code and submission, for the same
- * context, to be executing simultaneously. We still need an atomic as it is
- * possible for some of the bits to changing at the same time though.
- */
-#define SCHED_STATE_NO_LOCK_ENABLEDBIT(0)
-#define SCHED_STATE_NO_LOCK_PENDING_ENABLE BIT(1)
-#define SCHED_STATE_NO_LOCK_REGISTERED BIT(2)
-static inline bool context_enabled(struct intel_context *ce)
-{
-   return (atomic_read(>guc_sched_state_no_lock) &
-   SCHED_STATE_NO_LOCK_ENABLED);
-}
-
-static inline void set_context_enabled(struct intel_context *ce)
-{
-   atomic_or(SCHED_STATE_NO_LOCK_ENABLED, >guc_sched_state_no_lock);
-}
-
-static inline void clr_context_enabled(struct intel_context *ce)
-{
-   atomic_and((u32)~SCHED_STATE_NO_LOCK_ENABLED,
-  >guc_sched_state_no_lock);
-}
-
-static inline bool context_pending_enable(struct intel_context *ce)
-{
-   return (atomic_read(>guc_sched_state_no_lock) &
-   SCHED_STATE_NO_LOCK_PENDING_ENABLE);
-}
-
-static inline void set_context_pending_enable(struct intel_context *ce)
-{
-   atomic_or(SCHED_STATE_NO_LOCK_PENDING_ENABLE,
- >guc_sched_state_no_lock);
-}
-
-static inline void clr_context_pending_enable(struct intel_context *ce)
-{
-   atomic_and((u32)~SCHED_STATE_NO_LOCK_PENDING_ENABLE,
-  >guc_sched_state_no_lock);
-}
-
-static inline bool context_registered(struct intel_context *ce)
-{
-   return (atomic_read(>guc_sched_state_no_lock) &
-   SCHED_STATE_NO_LOCK_REGISTERED);
-}
-
-static inline void set_context_registered(struct intel_context *ce)
-{
-   atomic_or(SCHED_STATE_NO_LOCK_REGISTERED,
- >guc_sched_state_no_lock);
-}
-
-static inline void clr_context_registered(struct intel_context *ce)
-{
-   atomic_and((u32)~SCHED_STATE_NO_LOCK_REGISTERED,
-  >guc_sched_state_no_lock);
-}
-
 /*
  * Below is a set of functions which control the GuC scheduling state which
- * require a lock, aside from the special case where the functions are called
- * from guc_lrc_desc_pin(). In that case it isn't possible for any other code
- * path to be executing on the context.
+ * require a lock.
  */
 #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTERBIT(0)
 #define SCHED_STATE_DESTROYED  BIT(1)
 #define SCHED_STATE_PENDING_DISABLEBIT(2)
 #define SCHED_STATE_BANNED BIT(3)
-#define SCHED_STATE_BLOCKED_SHIFT  4
+#define SCHED_STATE_ENABLEDBIT(4)
+#define SCHED_STATE_PENDING_ENABLE BIT(5)
+#define SCHED_STATE_REGISTERED BIT(6)
+#define SCHED_STATE_BLOCKED_SHIFT  7
 #define SCHED_STATE_BLOCKEDBIT(SCHED_STATE_BLOCKED_SHIFT)
 

[Intel-gfx] [PATCH 05/21] drm/i915/guc: Workaround reset G2H is received after schedule done G2H

2021-08-15 Thread Matthew Brost
If the context is reset as a result of the request cancelation the
context reset G2H is received after schedule disable done G2H which is
likely the wrong order. The schedule disable done G2H release the
waiting request cancelation code which resubmits the context. This races
with the context reset G2H which also wants to resubmit the context but
in this case it really should be a NOP as request cancelation code owns
the resubmit. Use some clever tricks of checking the context state to
seal this race until if / when the GuC firmware is fixed.

Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation")
Signed-off-by: Matthew Brost 
Cc: 
---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 42 ---
 1 file changed, 36 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 3cd2da6f5c03..1e6b876bab34 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -826,17 +826,34 @@ __unwind_incomplete_requests(struct intel_context *ce)
 static void __guc_reset_context(struct intel_context *ce, bool stalled)
 {
struct i915_request *rq;
+   unsigned long flags;
u32 head;
+   bool skip = false;
 
intel_context_get(ce);
 
/*
-* GuC will implicitly mark the context as non-schedulable
-* when it sends the reset notification. Make sure our state
-* reflects this change. The context will be marked enabled
-* on resubmission.
+* GuC will implicitly mark the context as non-schedulable when it sends
+* the reset notification. Make sure our state reflects this change. The
+* context will be marked enabled on resubmission.
+*
+* XXX: If the context is reset as a result of the request cancelation
+* this G2H is received after the schedule disable complete G2H which is
+* likely wrong as this creates a race between the request cancelation
+* code re-submitting the context and this G2H handler. This likely
+* should be fixed in the GuC but until if / when that gets fixed we
+* need to workaround this. Convert this function to a NOP if a pending
+* enable is in flight as this indicates that a request cancelation has
+* occured.
 */
-   clr_context_enabled(ce);
+   spin_lock_irqsave(>guc_state.lock, flags);
+   if (unlikely(!context_pending_enable(ce))) {
+   clr_context_enabled(ce);
+   skip = true;
+   }
+   spin_unlock_irqrestore(>guc_state.lock, flags);
+   if (unlikely(skip))
+   goto out_put;
 
rq = intel_context_find_active_request(ce);
if (!rq) {
@@ -855,6 +872,7 @@ static void __guc_reset_context(struct intel_context *ce, 
bool stalled)
 out_replay:
guc_reset_state(ce, head, stalled);
__unwind_incomplete_requests(ce);
+out_put:
intel_context_put(ce);
 }
 
@@ -1599,6 +1617,13 @@ static void guc_context_cancel_request(struct 
intel_context *ce,
guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
true);
}
+
+   /*
+* XXX: Racey if context is reset, see comment in
+* __guc_reset_context().
+*/
+   flush_work(_to_guc(ce)->ct.requests.worker);
+
guc_context_unblock(ce);
}
 }
@@ -2719,7 +2744,12 @@ static void guc_handle_context_reset(struct intel_guc 
*guc,
 {
trace_intel_context_reset(ce);
 
-   if (likely(!intel_context_is_banned(ce))) {
+   /*
+* XXX: Racey if request cancelation has occurred, see comment in
+* __guc_reset_context().
+*/
+   if (likely(!intel_context_is_banned(ce) &&
+  !context_blocked(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
}
-- 
2.32.0



[Intel-gfx] [PATCH 14/21] drm/i915/guc: Flush G2H work queue during reset

2021-08-15 Thread Matthew Brost
It isn't safe to scrub for missing G2H or continue with the reset until
all G2H processing is complete. Flush the G2H work queue during reset to
ensure it is done running.

Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface")
Signed-off-by: Matthew Brost 
---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c  | 18 ++
 1 file changed, 2 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index a8ac446f32f3..54c0914b21f3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -707,8 +707,6 @@ static void guc_flush_submissions(struct intel_guc *guc)
 
 void intel_guc_submission_reset_prepare(struct intel_guc *guc)
 {
-   int i;
-
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
return;
@@ -724,20 +722,8 @@ void intel_guc_submission_reset_prepare(struct intel_guc 
*guc)
 
guc_flush_submissions(guc);
 
-   /*
-* Handle any outstanding G2Hs before reset. Call IRQ handler directly
-* each pass as interrupt have been disabled. We always scrub for
-* outstanding G2H as it is possible for outstanding_submission_g2h to
-* be incremented after the context state update.
-*/
-   for (i = 0; i < 4 && atomic_read(>outstanding_submission_g2h); 
++i) {
-   intel_guc_to_host_event_handler(guc);
-#define wait_for_reset(guc, wait_var) \
-   intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20))
-   do {
-   wait_for_reset(guc, >outstanding_submission_g2h);
-   } while (!list_empty(>ct.requests.incoming));
-   }
+   flush_work(>ct.requests.worker);
+
scrub_guc_desc_for_outstanding_g2h(guc);
 }
 
-- 
2.32.0



[Intel-gfx] [PATCH 16/21] drm/i915/guc: Move guc_blocked fence to struct guc_state

2021-08-15 Thread Matthew Brost
Move guc_blocked fence to struct guc_state as the lock which protects
the fence lives there.

s/ce->guc_blocked/ce->guc_state.blocked_fence/g

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/intel_context.c|  5 +++--
 drivers/gpu/drm/i915/gt/intel_context_types.h  |  5 ++---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c  | 18 +-
 3 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context.c 
b/drivers/gpu/drm/i915/gt/intel_context.c
index 745e84c72c90..0e48939ec85f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -405,8 +405,9 @@ intel_context_init(struct intel_context *ce, struct 
intel_engine_cs *engine)
 * Initialize fence to be complete as this is expected to be complete
 * unless there is a pending schedule disable outstanding.
 */
-   i915_sw_fence_init(>guc_blocked, sw_fence_dummy_notify);
-   i915_sw_fence_commit(>guc_blocked);
+   i915_sw_fence_init(>guc_state.blocked_fence,
+  sw_fence_dummy_notify);
+   i915_sw_fence_commit(>guc_state.blocked_fence);
 
i915_active_init(>active,
 __intel_context_active, __intel_context_retire, 0);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h 
b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 3a73f3117873..c06171ee8792 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -167,6 +167,8 @@ struct intel_context {
 * fence related to GuC submission
 */
struct list_head fences;
+   /* GuC context blocked fence */
+   struct i915_sw_fence blocked_fence;
} guc_state;
 
struct {
@@ -190,9 +192,6 @@ struct intel_context {
 */
struct list_head guc_id_link;
 
-   /* GuC context blocked fence */
-   struct i915_sw_fence guc_blocked;
-
/*
 * GuC priority management
 */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index b09be9b88cb3..64b12b91656a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1481,24 +1481,24 @@ static void guc_blocked_fence_complete(struct 
intel_context *ce)
 {
lockdep_assert_held(>guc_state.lock);
 
-   if (!i915_sw_fence_done(>guc_blocked))
-   i915_sw_fence_complete(>guc_blocked);
+   if (!i915_sw_fence_done(>guc_state.blocked_fence))
+   i915_sw_fence_complete(>guc_state.blocked_fence);
 }
 
 static void guc_blocked_fence_reinit(struct intel_context *ce)
 {
lockdep_assert_held(>guc_state.lock);
-   GEM_BUG_ON(!i915_sw_fence_done(>guc_blocked));
+   GEM_BUG_ON(!i915_sw_fence_done(>guc_state.blocked_fence));
 
/*
 * This fence is always complete unless a pending schedule disable is
 * outstanding. We arm the fence here and complete it when we receive
 * the pending schedule disable complete message.
 */
-   i915_sw_fence_fini(>guc_blocked);
-   i915_sw_fence_reinit(>guc_blocked);
-   i915_sw_fence_await(>guc_blocked);
-   i915_sw_fence_commit(>guc_blocked);
+   i915_sw_fence_fini(>guc_state.blocked_fence);
+   i915_sw_fence_reinit(>guc_state.blocked_fence);
+   i915_sw_fence_await(>guc_state.blocked_fence);
+   i915_sw_fence_commit(>guc_state.blocked_fence);
 }
 
 static u16 prep_context_pending_disable(struct intel_context *ce)
@@ -1538,7 +1538,7 @@ static struct i915_sw_fence *guc_context_block(struct 
intel_context *ce)
if (enabled)
clr_context_enabled(ce);
spin_unlock_irqrestore(>guc_state.lock, flags);
-   return >guc_blocked;
+   return >guc_state.blocked_fence;
}
 
/*
@@ -1554,7 +1554,7 @@ static struct i915_sw_fence *guc_context_block(struct 
intel_context *ce)
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_sched_disable(guc, ce, guc_id);
 
-   return >guc_blocked;
+   return >guc_state.blocked_fence;
 }
 
 static void guc_context_unblock(struct intel_context *ce)
-- 
2.32.0



[Intel-gfx] [PATCH 02/21] drm/i915/guc: outstanding G2H accounting

2021-08-15 Thread Matthew Brost
A small race that could result in incorrect accounting of the number
of outstanding G2H. Basically prior to this patch we did not increment
the number of outstanding G2H if we encoutered a GT reset while sending
a H2G. This was incorrect as the context state had already been updated
to anticipate a G2H response thus the counter should be incremented.

Fixes: f4eb1f3fe946 ("drm/i915/guc: Ensure G2H response has space in buffer")
Signed-off-by: Matthew Brost 
Cc: 
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 69faa39da178..b5d3972ae164 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -360,11 +360,13 @@ static int guc_submission_send_busy_loop(struct intel_guc 
*guc,
 {
int err;
 
-   err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
-
-   if (!err && g2h_len_dw)
+   if (g2h_len_dw)
atomic_inc(>outstanding_submission_g2h);
 
+   err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+   if (err == -EBUSY && g2h_len_dw)
+   atomic_dec(>outstanding_submission_g2h);
+
return err;
 }
 
-- 
2.32.0



[Intel-gfx] [PATCH 21/21] drm/i915/guc: Add GuC kernel doc

2021-08-15 Thread Matthew Brost
Add GuC kernel doc for all structures added thus far for GuC submission
and update the main GuC submission section with the new interface
details.

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |  42 +---
 drivers/gpu/drm/i915/gt/uc/intel_guc.h|  19 +++-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 101 ++
 drivers/gpu/drm/i915/i915_request.h   |  18 ++--
 4 files changed, 131 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h 
b/drivers/gpu/drm/i915/gt/intel_context_types.h
index f6989e6807f7..75d609a1bc33 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -156,44 +156,56 @@ struct intel_context {
u8 wa_bb_page; /* if set, page num reserved for context workarounds */
 
struct {
-   /** lock: protects everything in guc_state */
+   /** @lock: protects everything in guc_state */
spinlock_t lock;
/**
-* sched_state: scheduling state of this context using GuC
+* @sched_state: scheduling state of this context using GuC
 * submission
 */
u32 sched_state;
/*
-* fences: maintains of list of requests that have a submit
-* fence related to GuC submission
+* @fences: maintains a list of requests are currently being
+* fenced until a GuC operation completes
 */
struct list_head fences;
-   /* GuC context blocked fence */
+   /**
+* @blocked_fence: fence used to signal when the blocking of a
+* contexts submissions is complete.
+*/
struct i915_sw_fence blocked_fence;
-   /* GuC committed requests */
+   /** @number_committed_requests: number of committed requests */
int number_committed_requests;
} guc_state;
 
struct {
-   /** lock: protects everything in guc_active */
+   /** @lock: protects everything in guc_active */
spinlock_t lock;
-   /** requests: active requests on this context */
+   /** @requests: list of active requests on this context */
struct list_head requests;
-   /*
-* GuC priority management
-*/
+   /** @guc_prio: the contexts current guc priority */
u8 guc_prio;
+   /**
+* @guc_prio_count: a counter of the number requests inflight in
+* each priority bucket
+*/
u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
} guc_active;
 
-   /* GuC LRC descriptor ID */
+   /**
+* @guc_id: unique handle which is used to communicate information with
+* the GuC about this context, protected by guc->contexts_lock
+*/
u16 guc_id;
 
-   /* GuC LRC descriptor reference count */
+   /**
+* @guc_id_ref: the number of references to the guc_id, when
+* transitioning in and out of zero protected by guc->contexts_lock
+*/
atomic_t guc_id_ref;
 
-   /*
-* GuC ID link - in list when unpinned but guc_id still valid in GuC
+   /**
+* @guc_id_link: in guc->guc_id_list when the guc_id has no refs but is
+* still valid, protected by guc->contexts_lock
 */
struct list_head guc_id_link;
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h 
b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2e27fe59786b..c0b3fdb601f0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -41,6 +41,10 @@ struct intel_guc {
spinlock_t irq_lock;
unsigned int msg_enabled_mask;
 
+   /**
+* @outstanding_submission_g2h: number of outstanding G2H related to GuC
+* submission, used to determine if the GT is idle
+*/
atomic_t outstanding_submission_g2h;
 
struct {
@@ -49,12 +53,16 @@ struct intel_guc {
void (*disable)(struct intel_guc *guc);
} interrupts;
 
-   /*
-* contexts_lock protects the pool of free guc ids and a linked list of
-* guc ids available to be stolen
+   /**
+* @contexts_lock: protects guc_ids, guc_id_list, ce->guc_id, and
+* ce->guc_id_ref when transitioning in and out of zero
 */
spinlock_t contexts_lock;
+   /** @guc_ids: used to allocate new guc_ids */
struct ida guc_ids;
+   /**
+* @guc_id_list: list of intel_context with valid guc_ids but no refs
+*/
struct list_head guc_id_list;
 
bool submission_supported;
@@ -70,7 +78,10 @@ struct intel_guc {
struct i915_vma 

[Intel-gfx] [PATCH 12/21] drm/i915/guc: Reset LRC descriptor if register returns -ENODEV

2021-08-15 Thread Matthew Brost
Reset LRC descriptor if a context register returns -ENODEV as this means
we are mid-reset.

Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface")
Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 02652631e3f7..a8ac446f32f3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1398,10 +1398,12 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
} else {
with_intel_runtime_pm(runtime_pm, wakeref)
ret = register_context(ce, loop);
-   if (unlikely(ret == -EBUSY))
+   if (unlikely(ret == -EBUSY)) {
+   reset_lrc_desc(guc, desc_idx);
+   } else if (unlikely(ret == -ENODEV)) {
reset_lrc_desc(guc, desc_idx);
-   else if (unlikely(ret == -ENODEV))
ret = 0;/* Will get registered later */
+   }
}
 
return ret;
-- 
2.32.0



[Intel-gfx] [PATCH 13/21] drm/i915: Allocate error capture in atomic context

2021-08-15 Thread Matthew Brost
Error captures can now be done in a work queue processing G2H messages.
These messages need to be completely done being processed in the reset
path, to avoid races in the missing G2H cleanup, which create a
dependency on memory allocations and dma fences (i915_requests).
Requests depend on resets, thus now we have a circular dependency. To
work around this, allocate the error capture in an atomic context.

Fixes: dc0dad365c5e ("Fix for error capture after full GPU reset with GuC")
Fixes: 573ba126aef3 ("Capture error state on context reset")
Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/i915_gpu_error.c | 37 +--
 1 file changed, 18 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0f08bcfbe964..453376aa6d9f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -49,7 +49,6 @@
 #include "i915_memcpy.h"
 #include "i915_scatterlist.h"
 
-#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
 
 static void __sg_set_buf(struct scatterlist *sg,
@@ -79,7 +78,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf 
*e, size_t len)
if (e->cur == e->end) {
struct scatterlist *sgl;
 
-   sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
+   sgl = (typeof(sgl))__get_free_page(ATOMIC_MAYFAIL);
if (!sgl) {
e->err = -ENOMEM;
return false;
@@ -99,10 +98,10 @@ static bool __i915_error_grow(struct 
drm_i915_error_state_buf *e, size_t len)
}
 
e->size = ALIGN(len + 1, SZ_64K);
-   e->buf = kmalloc(e->size, ALLOW_FAIL);
+   e->buf = kmalloc(e->size, ATOMIC_MAYFAIL);
if (!e->buf) {
e->size = PAGE_ALIGN(len + 1);
-   e->buf = kmalloc(e->size, GFP_KERNEL);
+   e->buf = kmalloc(e->size, ATOMIC_MAYFAIL);
}
if (!e->buf) {
e->err = -ENOMEM;
@@ -243,12 +242,12 @@ static bool compress_init(struct i915_vma_compress *c)
 {
struct z_stream_s *zstream = >zstream;
 
-   if (pool_init(>pool, ALLOW_FAIL))
+   if (pool_init(>pool, ATOMIC_MAYFAIL))
return false;
 
zstream->workspace =
kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
-   ALLOW_FAIL);
+   ATOMIC_MAYFAIL);
if (!zstream->workspace) {
pool_fini(>pool);
return false;
@@ -256,7 +255,7 @@ static bool compress_init(struct i915_vma_compress *c)
 
c->tmp = NULL;
if (i915_has_memcpy_from_wc())
-   c->tmp = pool_alloc(>pool, ALLOW_FAIL);
+   c->tmp = pool_alloc(>pool, ATOMIC_MAYFAIL);
 
return true;
 }
@@ -280,7 +279,7 @@ static void *compress_next_page(struct i915_vma_compress *c,
if (dst->page_count >= dst->num_pages)
return ERR_PTR(-ENOSPC);
 
-   page = pool_alloc(>pool, ALLOW_FAIL);
+   page = pool_alloc(>pool, ATOMIC_MAYFAIL);
if (!page)
return ERR_PTR(-ENOMEM);
 
@@ -376,7 +375,7 @@ struct i915_vma_compress {
 
 static bool compress_init(struct i915_vma_compress *c)
 {
-   return pool_init(>pool, ALLOW_FAIL) == 0;
+   return pool_init(>pool, ATOMIC_MAYFAIL) == 0;
 }
 
 static bool compress_start(struct i915_vma_compress *c)
@@ -391,7 +390,7 @@ static int compress_page(struct i915_vma_compress *c,
 {
void *ptr;
 
-   ptr = pool_alloc(>pool, ALLOW_FAIL);
+   ptr = pool_alloc(>pool, ATOMIC_MAYFAIL);
if (!ptr)
return -ENOMEM;
 
@@ -997,7 +996,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
 
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
-   dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
+   dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ATOMIC_MAYFAIL);
if (!dst)
return NULL;
 
@@ -1433,7 +1432,7 @@ capture_engine(struct intel_engine_cs *engine,
struct i915_request *rq = NULL;
unsigned long flags;
 
-   ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+   ee = intel_engine_coredump_alloc(engine, ATOMIC_MAYFAIL);
if (!ee)
return NULL;
 
@@ -1481,7 +1480,7 @@ gt_record_engines(struct intel_gt_coredump *gt,
struct intel_engine_coredump *ee;
 
/* Refill our page pool before entering atomic section */
-   pool_refill(>pool, ALLOW_FAIL);
+   pool_refill(>pool, ATOMIC_MAYFAIL);
 
ee = capture_engine(engine, compress);
if (!ee)
@@ -1507,7 +1506,7 @@ gt_record_uc(struct intel_gt_coredump *gt,
const struct intel_uc *uc = >_gt->uc;
   

[Intel-gfx] [PATCH 15/21] drm/i915/guc: Release submit fence from an IRQ

2021-08-15 Thread Matthew Brost
A subsequent patch will flip the locking hierarchy from
ce->guc_state.lock -> sched_engine->lock to sched_engine->lock ->
ce->guc_state.lock. As such we need to release the submit fence for a
request from an IRQ to break a lock inversion - i.e. the fence must be
release went holding ce->guc_state.lock and the releasing of the can
acquire sched_engine->lock.

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 15 ++-
 drivers/gpu/drm/i915/i915_request.h   |  5 +
 2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 54c0914b21f3..b09be9b88cb3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2016,6 +2016,14 @@ static const struct intel_context_ops guc_context_ops = {
.create_virtual = guc_create_virtual,
 };
 
+static void submit_work_cb(struct irq_work *wrk)
+{
+   struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
+
+   might_lock(>engine->sched_engine->lock);
+   i915_sw_fence_complete(>submit);
+}
+
 static void __guc_signal_context_fence(struct intel_context *ce)
 {
struct i915_request *rq;
@@ -2025,8 +2033,12 @@ static void __guc_signal_context_fence(struct 
intel_context *ce)
if (!list_empty(>guc_state.fences))
trace_intel_context_fence_release(ce);
 
+   /*
+* Use an IRQ to ensure locking order of sched_engine->lock ->
+* ce->guc_state.lock is preserved.
+*/
list_for_each_entry(rq, >guc_state.fences, guc_fence_link)
-   i915_sw_fence_complete(>submit);
+   irq_work_queue(>submit_work);
 
INIT_LIST_HEAD(>guc_state.fences);
 }
@@ -2136,6 +2148,7 @@ static int guc_request_alloc(struct i915_request *rq)
spin_lock_irqsave(>guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
+   init_irq_work(>submit_work, submit_work_cb);
i915_sw_fence_await(>submit);
 
list_add_tail(>guc_fence_link, >guc_state.fences);
diff --git a/drivers/gpu/drm/i915/i915_request.h 
b/drivers/gpu/drm/i915/i915_request.h
index 1bc1349ba3c2..d818cfbfc41d 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -218,6 +218,11 @@ struct i915_request {
};
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
+   /**
+* @submit_work: complete submit fence from an IRQ if needed for
+* locking hierarchy reasons.
+*/
+   struct irq_work submit_work;
 
/*
 * A list of everyone we wait upon, and everyone who waits upon us.
-- 
2.32.0



[Intel-gfx] [PATCH 11/21] drm/i915/guc: Don't touch guc_state.sched_state without a lock

2021-08-15 Thread Matthew Brost
Before we did some clever tricks to not use the a lock when touching
guc_state.sched_state in certain cases. Don't do that, enforce the use
of the lock.

Part of this is removing a dead code path from guc_lrc_desc_pin where a
context could be deregistered when the aforementioned function was
called from the submission path. Remove this dead code and add a
GEM_BUG_ON if this path is ever attempted to be used.

Signed-off-by: Matthew Brost 
---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++-
 1 file changed, 31 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index edaaa386d189..02652631e3f7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -150,11 +150,22 @@ static inline void clr_context_registered(struct 
intel_context *ce)
 #define SCHED_STATE_BLOCKED_MASK   (0xfff << SCHED_STATE_BLOCKED_SHIFT)
 static inline void init_sched_state(struct intel_context *ce)
 {
-   /* Only should be called from guc_lrc_desc_pin() */
+   lockdep_assert_held(>guc_state.lock);
atomic_set(>guc_sched_state_no_lock, 0);
ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
 }
 
+static inline bool sched_state_is_init(struct intel_context *ce)
+{
+   /*
+* XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after
+* suspend.
+*/
+   return !(atomic_read(>guc_sched_state_no_lock) &
+~SCHED_STATE_NO_LOCK_REGISTERED) &&
+   !(ce->guc_state.sched_state &= ~SCHED_STATE_BLOCKED_MASK);
+}
+
 static inline bool
 context_wait_for_deregister_to_register(struct intel_context *ce)
 {
@@ -165,7 +176,7 @@ context_wait_for_deregister_to_register(struct 
intel_context *ce)
 static inline void
 set_context_wait_for_deregister_to_register(struct intel_context *ce)
 {
-   /* Only should be called from guc_lrc_desc_pin() without lock */
+   lockdep_assert_held(>guc_state.lock);
ce->guc_state.sched_state |=
SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
 }
@@ -599,9 +610,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct 
intel_guc *guc)
bool pending_disable, pending_enable, deregister, destroyed, banned;
 
xa_for_each(>context_lookup, index, ce) {
-   /* Flush context */
spin_lock_irqsave(>guc_state.lock, flags);
-   spin_unlock_irqrestore(>guc_state.lock, flags);
 
/*
 * Once we are at this point submission_disabled() is guaranteed
@@ -617,6 +626,8 @@ static void scrub_guc_desc_for_outstanding_g2h(struct 
intel_guc *guc)
banned = context_banned(ce);
init_sched_state(ce);
 
+   spin_unlock_irqrestore(>guc_state.lock, flags);
+
if (pending_enable || destroyed || deregister) {
atomic_dec(>outstanding_submission_g2h);
if (deregister)
@@ -1317,6 +1328,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
int ret = 0;
 
GEM_BUG_ON(!engine->mask);
+   GEM_BUG_ON(!sched_state_is_init(ce));
 
/*
 * Ensure LRC + CT vmas are is same region as write barrier is done
@@ -1345,7 +1357,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
desc->priority = ce->guc_prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init(engine, desc);
-   init_sched_state(ce);
 
/*
 * The context_lookup xarray is used to determine if the hardware
@@ -1356,26 +1367,23 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
 * registering this context.
 */
if (context_registered) {
+   bool disabled;
+   unsigned long flags;
+
trace_intel_context_steal_guc_id(ce);
-   if (!loop) {
+   GEM_BUG_ON(!loop);
+
+   /* Seal race with Reset */
+   spin_lock_irqsave(>guc_state.lock, flags);
+   disabled = submission_disabled(guc);
+   if (likely(!disabled)) {
set_context_wait_for_deregister_to_register(ce);
intel_context_get(ce);
-   } else {
-   bool disabled;
-   unsigned long flags;
-
-   /* Seal race with Reset */
-   spin_lock_irqsave(>guc_state.lock, flags);
-   disabled = submission_disabled(guc);
-   if (likely(!disabled)) {
-   set_context_wait_for_deregister_to_register(ce);
-   intel_context_get(ce);
-   }
-   spin_unlock_irqrestore(>guc_state.lock, flags);
-   if (unlikely(disabled)) {
-   

[Intel-gfx] [PATCH 04/21] drm/i915/guc: Don't drop ce->guc_active.lock when unwinding context

2021-08-15 Thread Matthew Brost
Don't drop ce->guc_active.lock when unwinding a context after reset.
At one point we had to drop this because of a lock inversion but that is
no longer the case. It is much safer to hold the lock so let's do that.

Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface")
Signed-off-by: Matthew Brost 
Cc: 
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 4 
 1 file changed, 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index bc51caba50d0..3cd2da6f5c03 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -806,8 +806,6 @@ __unwind_incomplete_requests(struct intel_context *ce)
continue;
 
list_del_init(>sched.link);
-   spin_unlock(>guc_active.lock);
-
__i915_request_unsubmit(rq);
 
/* Push the request back into the queue for later resubmission. 
*/
@@ -820,8 +818,6 @@ __unwind_incomplete_requests(struct intel_context *ce)
 
list_add(>sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
-
-   spin_lock(>guc_active.lock);
}
spin_unlock(>guc_active.lock);
spin_unlock_irqrestore(_engine->lock, flags);
-- 
2.32.0



[Intel-gfx] [PATCH 10/21] drm/i915/guc: Take context ref when cancelling request

2021-08-15 Thread Matthew Brost
A context can get destroyed after cancelling a request so take a
reference to context when cancelling a request.

Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation")
Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 76c32fc24e0a..edaaa386d189 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1612,8 +1612,10 @@ static void guc_context_cancel_request(struct 
intel_context *ce,
   struct i915_request *rq)
 {
if (i915_sw_fence_signaled(>submit)) {
-   struct i915_sw_fence *fence = guc_context_block(ce);
+   struct i915_sw_fence *fence;
 
+   intel_context_get(ce);
+   fence = guc_context_block(ce);
i915_sw_fence_wait(fence);
if (!i915_request_completed(rq)) {
__i915_request_skip(rq);
@@ -1628,6 +1630,7 @@ static void guc_context_cancel_request(struct 
intel_context *ce,
flush_work(_to_guc(ce)->ct.requests.worker);
 
guc_context_unblock(ce);
+   intel_context_put(ce);
}
 }
 
-- 
2.32.0



[Intel-gfx] [PATCH 06/21] drm/i915/selftests: Add a cancel request selftest that triggers a reset

2021-08-15 Thread Matthew Brost
Add a cancel request selftest that results in an engine reset to cancel
the request as it is non-preemptable. Also insert a NOP request after
the cancelled request and confirm that it completely successfully.

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/selftests/i915_request.c | 94 +++
 1 file changed, 94 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c 
b/drivers/gpu/drm/i915/selftests/i915_request.c
index d67710d10615..bf1cf7b5af76 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -772,6 +772,98 @@ static int __cancel_completed(struct intel_engine_cs 
*engine)
return err;
 }
 
+static int __cancel_reset(struct intel_engine_cs *engine)
+{
+   struct intel_context *ce;
+   struct igt_spinner spin;
+   struct i915_request *rq, *nop;
+   unsigned long preempt_timeout_ms;
+   int err = 0;
+
+   preempt_timeout_ms = engine->props.preempt_timeout_ms;
+   engine->props.preempt_timeout_ms = 100;
+
+   if (igt_spinner_init(, engine->gt))
+   goto out_restore;
+
+   ce = intel_context_create(engine);
+   if (IS_ERR(ce)) {
+   err = PTR_ERR(ce);
+   goto out_spin;
+   }
+
+   rq = igt_spinner_create_request(, ce, MI_NOOP);
+   if (IS_ERR(rq)) {
+   err = PTR_ERR(rq);
+   goto out_ce;
+   }
+
+   pr_debug("%s: Cancelling active request\n", engine->name);
+   i915_request_get(rq);
+   i915_request_add(rq);
+   if (!igt_wait_for_spinner(, rq)) {
+   struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+   pr_err("Failed to start spinner on %s\n", engine->name);
+   intel_engine_dump(engine, , "%s\n", engine->name);
+   err = -ETIME;
+   goto out_rq;
+   }
+
+   nop = intel_context_create_request(ce);
+   if (IS_ERR(nop))
+   goto out_nop;
+   i915_request_get(nop);
+   i915_request_add(nop);
+
+   i915_request_cancel(rq, -EINTR);
+
+   if (i915_request_wait(rq, 0, HZ) < 0) {
+   struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+   pr_err("%s: Failed to cancel hung request\n", engine->name);
+   intel_engine_dump(engine, , "%s\n", engine->name);
+   err = -ETIME;
+   goto out_nop;
+   }
+
+   if (rq->fence.error != -EINTR) {
+   pr_err("%s: fence not cancelled (%u)\n",
+  engine->name, rq->fence.error);
+   err = -EINVAL;
+   goto out_nop;
+   }
+
+   if (i915_request_wait(nop, 0, HZ) < 0) {
+   struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+   pr_err("%s: Failed to complete nop request\n", engine->name);
+   intel_engine_dump(engine, , "%s\n", engine->name);
+   err = -ETIME;
+   goto out_nop;
+   }
+
+   if (nop->fence.error != 0) {
+   pr_err("%s: Nop request errored (%u)\n",
+  engine->name, nop->fence.error);
+   err = -EINVAL;
+   }
+
+out_nop:
+   i915_request_put(nop);
+out_rq:
+   i915_request_put(rq);
+out_ce:
+   intel_context_put(ce);
+out_spin:
+   igt_spinner_fini();
+out_restore:
+   engine->props.preempt_timeout_ms = preempt_timeout_ms;
+   if (err)
+   pr_err("%s: %s error %d\n", __func__, engine->name, err);
+   return err;
+}
+
 static int live_cancel_request(void *arg)
 {
struct drm_i915_private *i915 = arg;
@@ -798,6 +890,8 @@ static int live_cancel_request(void *arg)
err = __cancel_active(engine);
if (err == 0)
err = __cancel_completed(engine);
+   if (err == 0)
+   err = __cancel_reset(engine);
 
err2 = igt_live_test_end();
if (err)
-- 
2.32.0



[Intel-gfx] [PATCH 08/21] drm/i915/selftests: Fix memory corruption in live_lrc_isolation

2021-08-15 Thread Matthew Brost
GuC submission has exposed an existing memory corruption in
live_lrc_isolation. We believe that some writes to the watchdog offsets
in the LRC (0x178 & 0x17c) can result in trashing of portions of the
address space. With GuC submission there are additional objects which
can move the context redzone into the space that is trashed. To
workaround this avoid poisoning the watchdog.

v2:
 (Daniel Vetter)
  - Add VLK ref in code to workaround

Signed-off-by: Matthew Brost 
---
 drivers/gpu/drm/i915/gt/selftest_lrc.c | 29 +-
 1 file changed, 28 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c 
b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index b0977a3b699b..cdc6ae48a1e1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1074,6 +1074,32 @@ record_registers(struct intel_context *ce,
goto err_after;
 }
 
+static u32 safe_offset(u32 offset, u32 reg)
+{
+   /* XXX skip testing of watchdog - VLK-22772 */
+   if (offset == 0x178 || offset == 0x17c)
+   reg = 0;
+
+   return reg;
+}
+
+static int get_offset_mask(struct intel_engine_cs *engine)
+{
+   if (GRAPHICS_VER(engine->i915) < 12)
+   return 0xfff;
+
+   switch (engine->class) {
+   default:
+   case RENDER_CLASS:
+   return 0x07ff;
+   case COPY_ENGINE_CLASS:
+   return 0x0fff;
+   case VIDEO_DECODE_CLASS:
+   case VIDEO_ENHANCEMENT_CLASS:
+   return 0x3fff;
+   }
+}
+
 static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
 {
struct i915_vma *batch;
@@ -1117,7 +1143,8 @@ static struct i915_vma *load_context(struct intel_context 
*ce, u32 poison)
len = (len + 1) / 2;
*cs++ = MI_LOAD_REGISTER_IMM(len);
while (len--) {
-   *cs++ = hw[dw];
+   *cs++ = safe_offset(hw[dw] & 
get_offset_mask(ce->engine),
+   hw[dw]);
*cs++ = poison;
dw += 2;
}
-- 
2.32.0



[Intel-gfx] [PATCH 01/21] drm/i915/guc: Fix blocked context accounting

2021-08-15 Thread Matthew Brost
Prior to this patch the blocked context counter was cleared on
init_sched_state (used during registering a context & resets) which is
incorrect. This state needs to be persistent or the counter can read the
incorrect value resulting in scheduling never getting enabled again.

Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation")
Signed-off-by: Matthew Brost 
Reviewed-by: Daniel Vetter 
Cc: 
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 87d8dc8f51b9..69faa39da178 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -152,7 +152,7 @@ static inline void init_sched_state(struct intel_context 
*ce)
 {
/* Only should be called from guc_lrc_desc_pin() */
atomic_set(>guc_sched_state_no_lock, 0);
-   ce->guc_state.sched_state = 0;
+   ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
 }
 
 static inline bool
-- 
2.32.0



[Intel-gfx] [PATCH 00/21] Clean up GuC CI failures, simplify locking, and kernel DOC

2021-08-15 Thread Matthew Brost
Daniel Vetter pointed out that locking in the GuC submission code was
overly complicated, let's clean this up a bit before introducing more
features in the GuC submission backend.

Also fix some CI failures, port fixes from our internal tree, and add a
few more selftests for coverage.

Lastly, add some kernel DOC explaining how the GuC submission backend
works.

Signed-off-by: Matthew Brost 

Matthew Brost (21):
  drm/i915/guc: Fix blocked context accounting
  drm/i915/guc: outstanding G2H accounting
  drm/i915/guc: Unwind context requests in reverse order
  drm/i915/guc: Don't drop ce->guc_active.lock when unwinding context
  drm/i915/guc: Workaround reset G2H is received after schedule done G2H
  drm/i915/selftests: Add a cancel request selftest that triggers a
reset
  drm/i915/guc: Don't enable scheduling on a banned context, guc_id
invalid, not registered
  drm/i915/selftests: Fix memory corruption in live_lrc_isolation
  drm/i915/selftests: Add initial GuC selftest for scrubbing lost G2H
  drm/i915/guc: Take context ref when cancelling request
  drm/i915/guc: Don't touch guc_state.sched_state without a lock
  drm/i915/guc: Reset LRC descriptor if register returns -ENODEV
  drm/i915: Allocate error capture in atomic context
  drm/i915/guc: Flush G2H work queue during reset
  drm/i915/guc: Release submit fence from an IRQ
  drm/i915/guc: Move guc_blocked fence to struct guc_state
  drm/i915/guc: Rework and simplify locking
  drm/i915/guc: Proper xarray usage for contexts_lookup
  drm/i915/guc: Drop pin count check trick between sched_disable and
re-pin
  drm/i915/guc: Move GuC priority fields in context under guc_active
  drm/i915/guc: Add GuC kernel doc

 drivers/gpu/drm/i915/gt/intel_context.c   |   5 +-
 drivers/gpu/drm/i915/gt/intel_context_types.h |  68 +-
 drivers/gpu/drm/i915/gt/selftest_lrc.c|  29 +-
 drivers/gpu/drm/i915/gt/uc/intel_guc.h|  19 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 688 +++---
 drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 126 
 drivers/gpu/drm/i915/i915_gpu_error.c |  37 +-
 drivers/gpu/drm/i915/i915_request.h   |  23 +-
 drivers/gpu/drm/i915/i915_trace.h |   8 +-
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 drivers/gpu/drm/i915/selftests/i915_request.c |  94 +++
 .../i915/selftests/intel_scheduler_helpers.c  |  12 +
 .../i915/selftests/intel_scheduler_helpers.h  |   2 +
 13 files changed, 805 insertions(+), 307 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/uc/selftest_guc.c

-- 
2.32.0



Re: [Intel-gfx] [PATCH v2] drm: avoid races with modesetting rights

2021-08-15 Thread kernel test robot
Hi Desmond,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on next-20210813]
[also build test WARNING on v5.14-rc5]
[cannot apply to linus/master v5.14-rc5 v5.14-rc4 v5.14-rc3]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/Desmond-Cheong-Zhi-Xi/drm-avoid-races-with-modesetting-rights/20210815-234145
base:4b358aabb93a2c654cd1dcab1a25a589f6e2b153
config: arc-randconfig-r031-20210815 (attached as .config)
compiler: arceb-elf-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# 
https://github.com/0day-ci/linux/commit/cf6d8354b7d7953cd866fad004cbb189adfa074f
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review 
Desmond-Cheong-Zhi-Xi/drm-avoid-races-with-modesetting-rights/20210815-234145
git checkout cf6d8354b7d7953cd866fad004cbb189adfa074f
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross 
ARCH=arc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All warnings (new ones prefixed by >>):

>> drivers/gpu/drm/drm_auth.c:483:6: warning: no previous prototype for 
>> 'master_flush' [-Wmissing-prototypes]
 483 | void master_flush(struct callback_head *cb)
 |  ^~~~


vim +/master_flush +483 drivers/gpu/drm/drm_auth.c

   479  
   480  /* After flushing, all readers that might have seen old master/lease
   481   * permissions are guaranteed to have completed.
   482   */
 > 483  void master_flush(struct callback_head *cb)
   484  {
   485  struct drm_device *dev = container_of(cb, struct drm_device,
   486master_flush_work);
   487  
   488  down_write(>master_rwsem);
   489  up_write(>master_rwsem);
   490  }
   491  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org


.config.gz
Description: application/gzip


Re: [Intel-gfx] [PATCH v2] drm: avoid races with modesetting rights

2021-08-15 Thread kernel test robot
Hi Desmond,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on next-20210813]
[also build test ERROR on v5.14-rc5]
[cannot apply to linus/master v5.14-rc5 v5.14-rc4 v5.14-rc3]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/Desmond-Cheong-Zhi-Xi/drm-avoid-races-with-modesetting-rights/20210815-234145
base:4b358aabb93a2c654cd1dcab1a25a589f6e2b153
config: i386-randconfig-a004-20210815 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
# 
https://github.com/0day-ci/linux/commit/cf6d8354b7d7953cd866fad004cbb189adfa074f
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review 
Desmond-Cheong-Zhi-Xi/drm-avoid-races-with-modesetting-rights/20210815-234145
git checkout cf6d8354b7d7953cd866fad004cbb189adfa074f
# save the attached .config to linux build tree
make W=1 ARCH=i386 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All errors (new ones prefixed by >>, old ones prefixed by <<):

>> ERROR: modpost: "task_work_add" [drivers/gpu/drm/drm.ko] undefined!

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org


.config.gz
Description: application/gzip


[Intel-gfx] ✗ Fi.CI.BUILD: failure for Patch "drm/i915/display: Fix the 12 BPC bits for PIPE_MISC reg" has been added to the 5.10-stable tree

2021-08-15 Thread Patchwork
== Series Details ==

Series: Patch "drm/i915/display: Fix the 12 BPC bits for PIPE_MISC reg" has 
been added to the 5.10-stable tree
URL   : https://patchwork.freedesktop.org/series/93699/
State : failure

== Summary ==

Patch is empty.
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".




[Intel-gfx] Patch "drm/i915/display: Fix the 12 BPC bits for PIPE_MISC reg" has been added to the 5.13-stable tree

2021-08-15 Thread gregkh


This is a note to let you know that I've just added the patch titled

drm/i915/display: Fix the 12 BPC bits for PIPE_MISC reg

to the 5.13-stable tree which can be found at:

http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
 drm-i915-display-fix-the-12-bpc-bits-for-pipe_misc-reg.patch
and it can be found in the queue-5.13 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let  know about it.


>From abd9d66a055722393d33685214c08386694871d7 Mon Sep 17 00:00:00 2001
From: Ankit Nautiyal 
Date: Wed, 11 Aug 2021 10:48:57 +0530
Subject: drm/i915/display: Fix the 12 BPC bits for PIPE_MISC reg
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

From: Ankit Nautiyal 

commit abd9d66a055722393d33685214c08386694871d7 upstream.

Till DISPLAY12 the PIPE_MISC bits 5-7 are used to set the
Dithering BPC, with valid values of 6, 8, 10 BPC.
For ADLP+ these bits are used to set the PORT OUTPUT BPC, with valid
values of: 6, 8, 10, 12 BPC, and need to be programmed whether
dithering is enabled or not.

This patch:
-corrects the bits 5-7 for PIPE MISC register for 12 BPC.
-renames the bits and mask to have generic names for these bits for
dithering bpc and port output bpc.

v3: Added a note for MIPI DSI which uses the PIPE_MISC for readout
for pipe_bpp. (Uma Shankar)

v2: Added 'display' to the subject and fixes tag. (Uma Shankar)

Fixes: 756f85cffef2 ("drm/i915/bdw: Broadwell has PIPEMISC")
Cc: Paulo Zanoni  (v1)
Cc: Ville Syrjälä 
Cc: Daniel Vetter 
Cc: Jani Nikula 
Cc: Joonas Lahtinen 
Cc: Rodrigo Vivi 
Cc: intel-gfx@lists.freedesktop.org
Cc:  # v3.13+

Signed-off-by: Ankit Nautiyal 
Reviewed-by: Uma Shankar 
Signed-off-by: Uma Shankar 
Link: 
https://patchwork.freedesktop.org/patch/msgid/20210811051857.109723-1-ankit.k.nauti...@intel.com
(cherry picked from commit 70418a68713c13da3f36c388087d0220b456a430)
Signed-off-by: Rodrigo Vivi 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/gpu/drm/i915/display/intel_display.c |   34 +++
 drivers/gpu/drm/i915/i915_reg.h  |   16 
 2 files changed, 35 insertions(+), 15 deletions(-)

--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -5424,16 +5424,18 @@ static void bdw_set_pipemisc(const struc
 
switch (crtc_state->pipe_bpp) {
case 18:
-   val |= PIPEMISC_DITHER_6_BPC;
+   val |= PIPEMISC_6_BPC;
break;
case 24:
-   val |= PIPEMISC_DITHER_8_BPC;
+   val |= PIPEMISC_8_BPC;
break;
case 30:
-   val |= PIPEMISC_DITHER_10_BPC;
+   val |= PIPEMISC_10_BPC;
break;
case 36:
-   val |= PIPEMISC_DITHER_12_BPC;
+   /* Port output 12BPC defined for ADLP+ */
+   if (DISPLAY_VER(dev_priv) > 12)
+   val |= PIPEMISC_12_BPC_ADLP;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
@@ -5469,15 +5471,27 @@ int bdw_get_pipemisc_bpp(struct intel_cr
 
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
 
-   switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
-   case PIPEMISC_DITHER_6_BPC:
+   switch (tmp & PIPEMISC_BPC_MASK) {
+   case PIPEMISC_6_BPC:
return 18;
-   case PIPEMISC_DITHER_8_BPC:
+   case PIPEMISC_8_BPC:
return 24;
-   case PIPEMISC_DITHER_10_BPC:
+   case PIPEMISC_10_BPC:
return 30;
-   case PIPEMISC_DITHER_12_BPC:
-   return 36;
+   /*
+* PORT OUTPUT 12 BPC defined for ADLP+.
+*
+* TODO:
+* For previous platforms with DSI interface, bits 5:7
+* are used for storing pipe_bpp irrespective of dithering.
+* Since the value of 12 BPC is not defined for these bits
+* on older platforms, need to find a workaround for 12 BPC
+* MIPI DSI HW readout.
+*/
+   case PIPEMISC_12_BPC_ADLP:
+   if (DISPLAY_VER(dev_priv) > 12)
+   return 36;
+   fallthrough;
default:
MISSING_CASE(tmp);
return 0;
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6134,11 +6134,17 @@ enum {
 #define   PIPEMISC_HDR_MODE_PRECISION  (1 << 23) /* icl+ */
 #define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1 << 11)
 #define   PIPEMISC_PIXEL_ROUNDING_TRUNCREG_BIT(8) /* tgl+ */
-#define   PIPEMISC_DITHER_BPC_MASK (7 << 5)
-#define   PIPEMISC_DITHER_8_BPC(0 << 5)
-#define   PIPEMISC_DITHER_10_BPC   (1 << 5)
-#define   PIPEMISC_DITHER_6_BPC(2 << 5)
-#define   PIPEMISC_DITHER_12_BPC   (3 << 5)
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ 

[Intel-gfx] Patch "drm/i915/display: Fix the 12 BPC bits for PIPE_MISC reg" has been added to the 5.10-stable tree

2021-08-15 Thread gregkh


This is a note to let you know that I've just added the patch titled

drm/i915/display: Fix the 12 BPC bits for PIPE_MISC reg

to the 5.10-stable tree which can be found at:

http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
 drm-i915-display-fix-the-12-bpc-bits-for-pipe_misc-reg.patch
and it can be found in the queue-5.10 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let  know about it.


>From abd9d66a055722393d33685214c08386694871d7 Mon Sep 17 00:00:00 2001
From: Ankit Nautiyal 
Date: Wed, 11 Aug 2021 10:48:57 +0530
Subject: drm/i915/display: Fix the 12 BPC bits for PIPE_MISC reg
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

From: Ankit Nautiyal 

commit abd9d66a055722393d33685214c08386694871d7 upstream.

Till DISPLAY12 the PIPE_MISC bits 5-7 are used to set the
Dithering BPC, with valid values of 6, 8, 10 BPC.
For ADLP+ these bits are used to set the PORT OUTPUT BPC, with valid
values of: 6, 8, 10, 12 BPC, and need to be programmed whether
dithering is enabled or not.

This patch:
-corrects the bits 5-7 for PIPE MISC register for 12 BPC.
-renames the bits and mask to have generic names for these bits for
dithering bpc and port output bpc.

v3: Added a note for MIPI DSI which uses the PIPE_MISC for readout
for pipe_bpp. (Uma Shankar)

v2: Added 'display' to the subject and fixes tag. (Uma Shankar)

Fixes: 756f85cffef2 ("drm/i915/bdw: Broadwell has PIPEMISC")
Cc: Paulo Zanoni  (v1)
Cc: Ville Syrjälä 
Cc: Daniel Vetter 
Cc: Jani Nikula 
Cc: Joonas Lahtinen 
Cc: Rodrigo Vivi 
Cc: intel-gfx@lists.freedesktop.org
Cc:  # v3.13+

Signed-off-by: Ankit Nautiyal 
Reviewed-by: Uma Shankar 
Signed-off-by: Uma Shankar 
Link: 
https://patchwork.freedesktop.org/patch/msgid/20210811051857.109723-1-ankit.k.nauti...@intel.com
(cherry picked from commit 70418a68713c13da3f36c388087d0220b456a430)
Signed-off-by: Rodrigo Vivi 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/gpu/drm/i915/display/intel_display.c |   34 +++
 drivers/gpu/drm/i915/i915_reg.h  |   16 
 2 files changed, 35 insertions(+), 15 deletions(-)

--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -10173,16 +10173,18 @@ static void bdw_set_pipemisc(const struc
 
switch (crtc_state->pipe_bpp) {
case 18:
-   val |= PIPEMISC_DITHER_6_BPC;
+   val |= PIPEMISC_6_BPC;
break;
case 24:
-   val |= PIPEMISC_DITHER_8_BPC;
+   val |= PIPEMISC_8_BPC;
break;
case 30:
-   val |= PIPEMISC_DITHER_10_BPC;
+   val |= PIPEMISC_10_BPC;
break;
case 36:
-   val |= PIPEMISC_DITHER_12_BPC;
+   /* Port output 12BPC defined for ADLP+ */
+   if (DISPLAY_VER(dev_priv) > 12)
+   val |= PIPEMISC_12_BPC_ADLP;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
@@ -10218,15 +10220,27 @@ int bdw_get_pipemisc_bpp(struct intel_cr
 
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
 
-   switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
-   case PIPEMISC_DITHER_6_BPC:
+   switch (tmp & PIPEMISC_BPC_MASK) {
+   case PIPEMISC_6_BPC:
return 18;
-   case PIPEMISC_DITHER_8_BPC:
+   case PIPEMISC_8_BPC:
return 24;
-   case PIPEMISC_DITHER_10_BPC:
+   case PIPEMISC_10_BPC:
return 30;
-   case PIPEMISC_DITHER_12_BPC:
-   return 36;
+   /*
+* PORT OUTPUT 12 BPC defined for ADLP+.
+*
+* TODO:
+* For previous platforms with DSI interface, bits 5:7
+* are used for storing pipe_bpp irrespective of dithering.
+* Since the value of 12 BPC is not defined for these bits
+* on older platforms, need to find a workaround for 12 BPC
+* MIPI DSI HW readout.
+*/
+   case PIPEMISC_12_BPC_ADLP:
+   if (DISPLAY_VER(dev_priv) > 12)
+   return 36;
+   fallthrough;
default:
MISSING_CASE(tmp);
return 0;
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6011,11 +6011,17 @@ enum {
 #define   PIPEMISC_HDR_MODE_PRECISION  (1 << 23) /* icl+ */
 #define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1 << 11)
 #define   PIPEMISC_PIXEL_ROUNDING_TRUNCREG_BIT(8) /* tgl+ */
-#define   PIPEMISC_DITHER_BPC_MASK (7 << 5)
-#define   PIPEMISC_DITHER_8_BPC(0 << 5)
-#define   PIPEMISC_DITHER_10_BPC   (1 << 5)
-#define   PIPEMISC_DITHER_6_BPC(2 << 5)
-#define   PIPEMISC_DITHER_12_BPC   (3 << 5)
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10