On 2/18/2022 1:33 PM, john.c.harri...@intel.com wrote:
From: John Harrison <john.c.harri...@intel.com>

GuC converts the pre-emption timeout and timeslice quantum values into
clock ticks internally. That significantly reduces the point of 32bit
overflow. On current platforms, worst case scenario is approximately
110 seconds. Rather than allowing the user to set higher values and
then get confused by early timeouts, add limits when setting these
values.

Signed-off-by: John Harrison <john.c.harri...@intel.com>
---
  drivers/gpu/drm/i915/gt/intel_engine_cs.c   | 15 +++++++++++++++
  drivers/gpu/drm/i915/gt/sysfs_engines.c     | 14 ++++++++++++++
  drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h |  9 +++++++++
  3 files changed, 38 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c 
b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index e53008b4dd05..2a1e9f36e6f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -389,6 +389,21 @@ static int intel_engine_setup(struct intel_gt *gt, enum 
intel_engine_id id,
        if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
                engine->props.preempt_timeout_ms = 0;
+ /* Cap timeouts to prevent overflow inside GuC */
+       if (intel_guc_submission_is_wanted(&gt->uc.guc)) {
+               if (engine->props.timeslice_duration_ms > 
GUC_POLICY_MAX_EXEC_QUANTUM_MS) {
+                       drm_info(&engine->i915->drm, "Warning, clamping timeslice 
duration to %d to prevent possibly overflow\n",

I'd drop the word "possibly"

+                                GUC_POLICY_MAX_EXEC_QUANTUM_MS);
+                       engine->props.timeslice_duration_ms = 
GUC_POLICY_MAX_EXEC_QUANTUM_MS;
+               }
+
+               if (engine->props.preempt_timeout_ms > 
GUC_POLICY_MAX_PREEMPT_TIMEOUT_MS) {
+                       drm_info(&engine->i915->drm, "Warning, clamping pre-emption 
timeout to %d to prevent possibly overflow\n",
+                                GUC_POLICY_MAX_PREEMPT_TIMEOUT_MS);
+                       engine->props.preempt_timeout_ms = 
GUC_POLICY_MAX_PREEMPT_TIMEOUT_MS;
+               }
+       }
+
        engine->defaults = engine->props; /* never to change again */
engine->context_size = intel_engine_context_size(gt, engine->class);
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c 
b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index 967031056202..f57efe026474 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -221,6 +221,13 @@ timeslice_store(struct kobject *kobj, struct 
kobj_attribute *attr,
        if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
                return -EINVAL;
+ if (intel_uc_uses_guc_submission(&engine->gt->uc) &&
+           duration > GUC_POLICY_MAX_EXEC_QUANTUM_MS) {
+               duration = GUC_POLICY_MAX_EXEC_QUANTUM_MS;
+               drm_info(&engine->i915->drm, "Warning, clamping timeslice duration 
to %lld to prevent possibly overflow\n",
+                        duration);
+       }
+
        WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
if (execlists_active(&engine->execlists))
@@ -325,6 +332,13 @@ preempt_timeout_store(struct kobject *kobj, struct 
kobj_attribute *attr,
        if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
                return -EINVAL;
+ if (intel_uc_uses_guc_submission(&engine->gt->uc) &&
+           timeout > GUC_POLICY_MAX_PREEMPT_TIMEOUT_MS) {
+               timeout = GUC_POLICY_MAX_PREEMPT_TIMEOUT_MS;
+               drm_info(&engine->i915->drm, "Warning, clamping pre-emption timeout 
to %lld to prevent possibly overflow\n",
+                        timeout);
+       }
+
        WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
if (READ_ONCE(engine->execlists.pending[0]))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 6a4612a852e2..ad131092f8df 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -248,6 +248,15 @@ struct guc_lrc_desc {
#define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000 +/*
+ * GuC converts the timeout to clock ticks internally. Different platforms have
+ * different GuC clocks. Thus, the maximum value before overflow is platform
+ * dependent. Current worst case scenario is about 110s. So, limit to 100s to 
be
+ * safe.
+ */
+#define GUC_POLICY_MAX_EXEC_QUANTUM_MS         (100 * 1000)
+#define GUC_POLICY_MAX_PREEMPT_TIMEOUT_MS      (100 * 1000)

Those values don't seem to be defined in the GuC interface. If I'm correct, IMO we need to ask the GuC team to add them in, because it shouldn't be our responsibility to convert from ms to GuC clocks, considering that the interface is in ms. Not a blocker for this patch.

Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospu...@intel.com>

Daniele

+
  struct guc_policies {
        u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
        /* In micro seconds. How much time to allow before DPC processing is

Reply via email to