---
 drivers/gpu/drm/i915/i915_drv.h               |  1 +
 drivers/gpu/drm/i915/i915_gem.c               | 13 ++--
 drivers/gpu/drm/i915/i915_gem_context.c       | 66 +------------------
 drivers/gpu/drm/i915/i915_gem_context.h       |  3 +-
 drivers/gpu/drm/i915/i915_gem_evict.c         |  2 +-
 drivers/gpu/drm/i915/i915_request.c           |  1 +
 drivers/gpu/drm/i915/intel_engine_cs.c        |  5 ++
 .../gpu/drm/i915/selftests/i915_gem_context.c |  3 +-
 .../gpu/drm/i915/selftests/igt_flush_test.c   |  2 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  4 ++
 10 files changed, 28 insertions(+), 72 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8a72dad9471f..e554691304dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1980,6 +1980,7 @@ struct drm_i915_private {
 
                struct list_head active_rings;
                struct list_head closed_vma;
+               unsigned long active_engines;
                u32 active_requests;
 
                /**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 690a111f3c58..cb3232a16394 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2947,9 +2947,10 @@ static void assert_kernel_context_is_current(struct 
drm_i915_private *i915)
        }
 }
 
-static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
+static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
+                                         unsigned long mask)
 {
-       if (i915_gem_switch_to_kernel_context(i915))
+       if (i915_gem_switch_to_kernel_context(i915, mask))
                return false;
 
        if (i915_gem_wait_for_idle(i915,
@@ -2964,7 +2965,8 @@ static bool switch_to_kernel_context_sync(struct 
drm_i915_private *i915)
 
 static bool load_power_context(struct drm_i915_private *i915)
 {
-       if (!switch_to_kernel_context_sync(i915))
+       /* Force loading the kernel context on all engines */
+       if (!switch_to_kernel_context_sync(i915, -1))
                return false;
 
        /*
@@ -3013,7 +3015,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
        if (!gt->active_requests && !work_pending(&gt->idle_work.work)) {
                ++gt->active_requests; /* don't requeue idle */
 
-               if (!switch_to_kernel_context_sync(i915)) {
+               if (!switch_to_kernel_context_sync(i915,
+                                                  i915->gt.active_engines)) {
                        dev_err(i915->drm.dev,
                                "Failed to idle engines, declaring wedged!\n");
                        GEM_TRACE_DUMP();
@@ -4487,7 +4490,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
         * state. Fortunately, the kernel_context is disposable and we do
         * not rely on its state.
         */
-       if (!switch_to_kernel_context_sync(i915)) {
+       if (!switch_to_kernel_context_sync(i915, i915->gt.active_engines)) {
                /* Forcibly cancel outstanding work and leave the gpu quiet. */
                i915_gem_set_wedged(i915);
        }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index 1bdb067845f2..582a7015e6a4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -682,63 +682,10 @@ last_request_on_engine(struct i915_timeline *timeline,
        return NULL;
 }
 
-static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *i915 = engine->i915;
-       const struct intel_context * const ce =
-               to_intel_context(i915->kernel_context, engine);
-       struct i915_timeline *barrier = ce->ring->timeline;
-       struct intel_ring *ring;
-       bool any_active = false;
-
-       lockdep_assert_held(&i915->drm.struct_mutex);
-       list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
-               struct i915_request *rq;
-
-               rq = last_request_on_engine(ring->timeline, engine);
-               if (!rq)
-                       continue;
-
-               any_active = true;
-
-               if (rq->hw_context == ce)
-                       continue;
-
-               /*
-                * Was this request submitted after the previous
-                * switch-to-kernel-context?
-                */
-               if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
-                       GEM_TRACE("%s needs barrier for %llx:%lld\n",
-                                 ring->timeline->name,
-                                 rq->fence.context,
-                                 rq->fence.seqno);
-                       return false;
-               }
-
-               GEM_TRACE("%s has barrier after %llx:%lld\n",
-                         ring->timeline->name,
-                         rq->fence.context,
-                         rq->fence.seqno);
-       }
-
-       /*
-        * If any other timeline was still active and behind the last barrier,
-        * then our last switch-to-kernel-context must still be queued and
-        * will run last (leaving the engine in the kernel context when it
-        * eventually idles).
-        */
-       if (any_active)
-               return true;
-
-       /* The engine is idle; check that it is idling in the kernel context. */
-       return engine->last_retired_context == ce;
-}
-
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+                                     unsigned long mask)
 {
        struct intel_engine_cs *engine;
-       enum intel_engine_id id;
 
        GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
 
@@ -749,17 +696,11 @@ int i915_gem_switch_to_kernel_context(struct 
drm_i915_private *i915)
        if (i915_terminally_wedged(&i915->gpu_error))
                return 0;
 
-       i915_retire_requests(i915);
-
-       for_each_engine(engine, i915, id) {
+       for_each_engine_masked(engine, i915, mask, mask) {
                struct intel_ring *ring;
                struct i915_request *rq;
 
                GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
-               if (engine_has_kernel_context_barrier(engine))
-                       continue;
-
-               GEM_TRACE("emit barrier on %s\n", engine->name);
 
                rq = i915_request_alloc(engine, i915->kernel_context);
                if (IS_ERR(rq))
@@ -783,7 +724,6 @@ int i915_gem_switch_to_kernel_context(struct 
drm_i915_private *i915)
                        i915_sw_fence_await_sw_fence_gfp(&rq->submit,
                                                         &prev->submit,
                                                         I915_FENCE_GFP);
-                       i915_timeline_sync_set(rq->timeline, &prev->fence);
                }
 
                i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h 
b/drivers/gpu/drm/i915/i915_gem_context.h
index ca150a764c24..651f2e4badb6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -354,7 +354,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 void i915_gem_context_close(struct drm_file *file);
 
 int i915_switch_context(struct i915_request *rq);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+                                     unsigned long engine_mask);
 
 void i915_gem_context_release(struct kref *ctx_ref);
 struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c 
b/drivers/gpu/drm/i915/i915_gem_evict.c
index 68d74c50ac39..7d8e90dfca84 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -62,7 +62,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
         * the hopes that we can then remove contexts and the like only
         * bound by their active reference.
         */
-       err = i915_gem_switch_to_kernel_context(i915);
+       err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
        if (err)
                return err;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c 
b/drivers/gpu/drm/i915/i915_request.c
index 8321f2d8a301..3aaf9d32768d 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1057,6 +1057,7 @@ void i915_request_add(struct i915_request *request)
                GEM_TRACE("marking %s as active\n", ring->timeline->name);
                list_add(&ring->active_link, &request->i915->gt.active_rings);
        }
+       request->i915->gt.active_engines |= request->engine->mask;
        request->emitted_jiffies = jiffies;
 
        /*
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c 
b/drivers/gpu/drm/i915/intel_engine_cs.c
index 45e38877ab17..0f2f8a10149d 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1114,6 +1114,9 @@ bool intel_engine_has_kernel_context(const struct 
intel_engine_cs *engine)
 
        lockdep_assert_held(&engine->i915->drm.struct_mutex);
 
+       if (!engine->context_size)
+               return true;
+
        /*
         * Check the last context seen by the engine. If active, it will be
         * the last request that remains in the timeline. When idle, it is
@@ -1213,6 +1216,8 @@ void intel_engines_park(struct drm_i915_private *i915)
                i915_gem_batch_pool_fini(&engine->batch_pool);
                engine->execlists.no_priolist = false;
        }
+
+       i915->gt.active_engines = 0;
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index f8931086eb70..08a4dd697e25 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -1512,7 +1512,8 @@ static int __igt_switch_to_kernel_context(struct 
drm_i915_private *i915,
                        }
                }
 
-               err = i915_gem_switch_to_kernel_context(i915);
+               err = i915_gem_switch_to_kernel_context(i915,
+                                                       
i915->gt.active_engines);
                if (err)
                        return err;
 
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c 
b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index af66e3d4e23a..e6a3395459f1 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned 
int flags)
        cond_resched();
 
        if (flags & I915_WAIT_LOCKED &&
-           i915_gem_switch_to_kernel_context(i915)) {
+           i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) {
                pr_err("Failed to switch back to kernel context; declaring 
wedged\n");
                i915_gem_set_wedged(i915);
        }
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c 
b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 5a98caba6d69..d00679e21415 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -112,6 +112,10 @@ static void mock_retire_work_handler(struct work_struct 
*work)
 
 static void mock_idle_work_handler(struct work_struct *work)
 {
+       struct drm_i915_private *i915 =
+               container_of(work, typeof(*i915), gt.idle_work.work);
+
+       i915->gt.active_engines = 0;
 }
 
 static int pm_domain_resume(struct device *dev)
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to