Re: [Intel-gfx] [PATCH 1/1] drm/i915/guc: Enable compute scheduling on DG2

2022-09-22 Thread Umesh Nerlige Ramappa

On Thu, Sep 22, 2022 at 01:12:09PM -0700, john.c.harri...@intel.com wrote:

From: John Harrison 

DG2 has issues. To work around one of these the GuC must schedule
apps in an exclusive manner across both RCS and CCS. That is, if a
context from app X is running on RCS then all CCS engines must sit
idle even if there are contexts from apps Y, Z, ... waiting to run. A
certain OS favours RCS to the total starvation of CCS. Linux does not.
Hence the GuC now has a scheduling policy setting to control this
abitration.

Signed-off-by: John Harrison 


lgtm,

Reviewed-by: Umesh Nerlige Ramappa 

Regards,
Umesh

---
.../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |  1 +
drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h |  9 +-
drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   | 22 +
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 93 +++
4 files changed, 124 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h 
b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 29ef8afc8c2e4..f359bef046e0b 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -117,6 +117,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+   INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
INTEL_GUC_ACTION_SCHED_CONTEXT = 0x1000,
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h 
b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 4a59478c3b5c4..58012edd4eb0e 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -81,10 +81,17 @@
#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY   0x0907
#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN   1u

+/*
+ * Global scheduling policy update keys.
+ */
+enum {
+   GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD = 0x1001,
+};
+
/*
 * Per context scheduling policy update keys.
 */
-enum  {
+enum {
GUC_CONTEXT_POLICIES_KLV_ID_EXECUTION_QUANTUM   = 
0x2001,
GUC_CONTEXT_POLICIES_KLV_ID_PREEMPTION_TIMEOUT  = 
0x2002,
GUC_CONTEXT_POLICIES_KLV_ID_SCHEDULING_PRIORITY = 
0x2003,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 323b055e5db97..e7a7fb450f442 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -290,6 +290,25 @@ struct guc_update_context_policy {
struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
} __packed;

+/* Format of the UPDATE_SCHEDULING_POLICIES H2G data packet */
+struct guc_update_scheduling_policy_header {
+   u32 action;
+} __packed;
+
+/*
+ * Can't dynmically allocate memory for the scheduling policy KLV because
+ * it will be sent from within the reset path. Need a fixed size lump on
+ * the stack instead :(.
+ *
+ * Currently, there is only one KLV defined, which has 1 word of KL + 2 words 
of V.
+ */
+#define MAX_SCHEDULING_POLICY_SIZE 3
+
+struct guc_update_scheduling_policy {
+   struct guc_update_scheduling_policy_header header;
+   u32 data[MAX_SCHEDULING_POLICY_SIZE];
+} __packed;
+
#define GUC_POWER_UNSPECIFIED   0
#define GUC_POWER_D01
#define GUC_POWER_D12
@@ -298,6 +317,9 @@ struct guc_update_context_policy {

/* Scheduling policy settings */

+#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION   100 /* in ms */
+#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO  50  /* in percent */
+
#define GLOBAL_POLICY_MAX_NUM_WI 15

/* Don't reset an engine upon preemption failure */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 22ba66e48a9b0..f09f530198f4d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -4177,6 +4177,98 @@ int intel_guc_submission_setup(struct intel_engine_cs 
*engine)
return 0;
}

+struct scheduling_policy {
+   /* internal data */
+   u32 max_words, num_words;
+   u32 count;
+   /* API data */
+   struct guc_update_scheduling_policy h2g;
+};
+
+static u32 __guc_scheduling_policy_action_size(struct scheduling_policy 
*policy)
+{
+   u32 *start = (void *)&policy->h2g;
+   u32 *end = policy->h2g.data + policy->num_words;
+   size_t delta = end - start;
+
+   return delta;
+}
+
+static struct scheduling_policy *__guc_scheduling_policy_start_klv(struct 
scheduling_policy *policy)
+{
+   policy->h2g.header.action = 
INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
+   policy->max_words = ARRAY_SIZE(policy->h2g.data);
+   policy->num_words = 0;
+

[PATCH 1/1] drm/i915/guc: Enable compute scheduling on DG2

2022-09-22 Thread John . C . Harrison
From: John Harrison 

DG2 has issues. To work around one of these the GuC must schedule
apps in an exclusive manner across both RCS and CCS. That is, if a
context from app X is running on RCS then all CCS engines must sit
idle even if there are contexts from apps Y, Z, ... waiting to run. A
certain OS favours RCS to the total starvation of CCS. Linux does not.
Hence the GuC now has a scheduling policy setting to control this
abitration.

Signed-off-by: John Harrison 
---
 .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |  1 +
 drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h |  9 +-
 drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   | 22 +
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 93 +++
 4 files changed, 124 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h 
b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 29ef8afc8c2e4..f359bef046e0b 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -117,6 +117,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+   INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
INTEL_GUC_ACTION_SCHED_CONTEXT = 0x1000,
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h 
b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 4a59478c3b5c4..58012edd4eb0e 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -81,10 +81,17 @@
 #define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY  0x0907
 #define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN  1u
 
+/*
+ * Global scheduling policy update keys.
+ */
+enum {
+   GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD = 0x1001,
+};
+
 /*
  * Per context scheduling policy update keys.
  */
-enum  {
+enum {
GUC_CONTEXT_POLICIES_KLV_ID_EXECUTION_QUANTUM   = 
0x2001,
GUC_CONTEXT_POLICIES_KLV_ID_PREEMPTION_TIMEOUT  = 
0x2002,
GUC_CONTEXT_POLICIES_KLV_ID_SCHEDULING_PRIORITY = 
0x2003,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 323b055e5db97..e7a7fb450f442 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -290,6 +290,25 @@ struct guc_update_context_policy {
struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
 } __packed;
 
+/* Format of the UPDATE_SCHEDULING_POLICIES H2G data packet */
+struct guc_update_scheduling_policy_header {
+   u32 action;
+} __packed;
+
+/*
+ * Can't dynmically allocate memory for the scheduling policy KLV because
+ * it will be sent from within the reset path. Need a fixed size lump on
+ * the stack instead :(.
+ *
+ * Currently, there is only one KLV defined, which has 1 word of KL + 2 words 
of V.
+ */
+#define MAX_SCHEDULING_POLICY_SIZE 3
+
+struct guc_update_scheduling_policy {
+   struct guc_update_scheduling_policy_header header;
+   u32 data[MAX_SCHEDULING_POLICY_SIZE];
+} __packed;
+
 #define GUC_POWER_UNSPECIFIED  0
 #define GUC_POWER_D0   1
 #define GUC_POWER_D1   2
@@ -298,6 +317,9 @@ struct guc_update_context_policy {
 
 /* Scheduling policy settings */
 
+#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION   100 /* in ms */
+#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO  50  /* in percent */
+
 #define GLOBAL_POLICY_MAX_NUM_WI 15
 
 /* Don't reset an engine upon preemption failure */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 22ba66e48a9b0..f09f530198f4d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -4177,6 +4177,98 @@ int intel_guc_submission_setup(struct intel_engine_cs 
*engine)
return 0;
 }
 
+struct scheduling_policy {
+   /* internal data */
+   u32 max_words, num_words;
+   u32 count;
+   /* API data */
+   struct guc_update_scheduling_policy h2g;
+};
+
+static u32 __guc_scheduling_policy_action_size(struct scheduling_policy 
*policy)
+{
+   u32 *start = (void *)&policy->h2g;
+   u32 *end = policy->h2g.data + policy->num_words;
+   size_t delta = end - start;
+
+   return delta;
+}
+
+static struct scheduling_policy *__guc_scheduling_policy_start_klv(struct 
scheduling_policy *policy)
+{
+   policy->h2g.header.action = 
INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
+   policy->max_words = ARRAY_SIZE(policy->h2g.data);
+   policy->num_words = 0;
+   policy->count = 0;
+
+   return policy;
+}
+
+static void __guc_scheduling_policy_add_klv(struct scheduling_p