GVT needs single submission and cannot allow merge. So when GuC submitting
a GVT request, the next one should be submitted to guc later until the
previous one is completed. This is following the usage when using execlist
mode submission.

v2: make force-single-submission specific to gvt (Chris)
v3: keep the original code implementation (Chris)

Cc: ch...@chris-wilson.co.uk
Signed-off-by: Chuanxiao Dong <chuanxiao.d...@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_context.h    | 13 +++++++++++++
 drivers/gpu/drm/i915/i915_guc_submission.c |  6 +++++-
 drivers/gpu/drm/i915/intel_gvt.h           | 11 +++++++++++
 drivers/gpu/drm/i915/intel_lrc.c           | 25 ++++---------------------
 4 files changed, 33 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.h 
b/drivers/gpu/drm/i915/i915_gem_context.h
index 4af2ab94..2c3afec 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -246,6 +246,19 @@ static inline bool i915_gem_context_is_kernel(struct 
i915_gem_context *ctx)
        return !ctx->file_priv;
 }
 
+static inline bool
+i915_gem_context_can_merge(const struct i915_gem_context *prev,
+               const struct i915_gem_context *next)
+{
+       if (prev != next)
+               return false;
+
+       if (i915_gem_context_force_single_submission(prev))
+               return false;
+
+       return true;
+}
+
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv);
 void i915_gem_context_lost(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
index 1642fff..862f4fd 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -668,10 +668,14 @@ static bool i915_guc_dequeue(struct intel_engine_cs 
*engine)
                struct drm_i915_gem_request *rq =
                        rb_entry(rb, typeof(*rq), priotree.node);
 
-               if (last && rq->ctx != last->ctx) {
+               if (last && !i915_gem_context_can_merge(last->ctx, rq->ctx)) {
                        if (port != engine->execlist_port)
                                break;
 
+                       if (intel_gvt_context_single_port_submit(last->ctx) ||
+                               intel_gvt_context_single_port_submit(rq->ctx))
+                               break;
+
                        i915_gem_request_assign(&port->request, last);
                        nested_enable_signaling(last);
                        port++;
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 25df2d6..c0dcd66 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -32,6 +32,12 @@ void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
 int intel_gvt_init_device(struct drm_i915_private *dev_priv);
 void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
 int intel_gvt_init_host(void);
+
+static inline bool
+intel_gvt_context_single_port_submit(const struct i915_gem_context *ctx)
+{
+       return i915_gem_context_force_single_submission(ctx);
+}
 #else
 static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
 {
@@ -40,6 +46,11 @@ static inline int intel_gvt_init(struct drm_i915_private 
*dev_priv)
 static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
 {
 }
+static inline bool
+intel_gvt_context_single_port_submit(const struct i915_gem_context *ctx)
+{
+       return false;
+}
 #endif
 
 #endif /* _INTEL_GVT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0dc1cc4..61291e9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -377,24 +377,6 @@ static void execlists_submit_ports(struct intel_engine_cs 
*engine)
        writel(lower_32_bits(desc[0]), elsp);
 }
 
-static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
-{
-       return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
-               i915_gem_context_force_single_submission(ctx));
-}
-
-static bool can_merge_ctx(const struct i915_gem_context *prev,
-                         const struct i915_gem_context *next)
-{
-       if (prev != next)
-               return false;
-
-       if (ctx_single_port_submission(prev))
-               return false;
-
-       return true;
-}
-
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *last;
@@ -450,7 +432,8 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
                 * request, and so we never need to tell the hardware about
                 * the first.
                 */
-               if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
+               if (last &&
+                       !i915_gem_context_can_merge(last->ctx, cursor->ctx)) {
                        /* If we are on the second port and cannot combine
                         * this request with the last, then we are done.
                         */
@@ -463,8 +446,8 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
                         * context (even though a different request) to
                         * the second port.
                         */
-                       if (ctx_single_port_submission(last->ctx) ||
-                           ctx_single_port_submission(cursor->ctx))
+                       if (intel_gvt_context_single_port_submit(last->ctx) ||
+                           intel_gvt_context_single_port_submit(cursor->ctx))
                                break;
 
                        GEM_BUG_ON(last->ctx == cursor->ctx);
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to