In case of GT is suspended, don't allow submission of new TLB invalidation
request and cancel all pending requests. The TLB entries will be
invalidated either during GuC reload or on system resume.

Signed-off-by: Fei Yang <fei.y...@intel.com>
Signed-off-by: Jonathan Cavitt <jonathan.cav...@intel.com>
CC: John Harrison <john.c.harri...@intel.com>
Reviewed-by: Andi Shyti <andi.sh...@linux.intel.com>
Acked-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
Acked-by: Nirmoy Das <nirmoy....@intel.com>
---
 drivers/gpu/drm/i915/gt/uc/intel_guc.h           |  1 +
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c    | 13 +++++++++++++
 drivers/gpu/drm/i915/gt/uc/intel_uc.c            | 16 ++++++++--------
 3 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h 
b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 0949628d69f8b..2b6dfe62c8f2a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -537,4 +537,5 @@ int intel_guc_invalidate_tlb_engines(struct intel_guc *guc);
 int intel_guc_invalidate_tlb_guc(struct intel_guc *guc);
 int intel_guc_tlb_invalidation_done(struct intel_guc *guc,
                                    const u32 *payload, u32 len);
+void wake_up_all_tlb_invalidate(struct intel_guc *guc);
 #endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 8a304b4c85462..1b9fa2bafaad6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1796,6 +1796,19 @@ static void __guc_reset_context(struct intel_context 
*ce, intel_engine_mask_t st
        intel_context_put(parent);
 }
 
+void wake_up_all_tlb_invalidate(struct intel_guc *guc)
+{
+       struct intel_guc_tlb_wait *wait;
+       unsigned long i;
+
+       if (intel_guc_tlb_invalidation_is_available(guc)) {
+               xa_lock_irq(&guc->tlb_lookup);
+               xa_for_each(&guc->tlb_lookup, i, wait)
+                       wake_up(&wait->wq);
+               xa_unlock_irq(&guc->tlb_lookup);
+       }
+}
+
 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t 
stalled)
 {
        struct intel_context *ce;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c 
b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 83ffdf19fd3fc..7ad560149a189 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -464,8 +464,6 @@ static int __uc_init_hw(struct intel_uc *uc)
        struct drm_i915_private *i915 = gt->i915;
        struct intel_guc *guc = &uc->guc;
        struct intel_huc *huc = &uc->huc;
-       struct intel_guc_tlb_wait *wait;
-       long unsigned int i;
        int ret, attempts;
        bool pl1en = false;
 
@@ -566,12 +564,7 @@ static int __uc_init_hw(struct intel_uc *uc)
         * The full GT reset will have cleared the TLB caches and flushed the
         * G2H message queue; we can release all the blocked waiters.
         */
-       if (intel_guc_tlb_invalidation_is_available(guc)) {
-               xa_lock_irq(&guc->tlb_lookup);
-               xa_for_each(&guc->tlb_lookup, i, wait)
-                       wake_up(&wait->wq);
-               xa_unlock_irq(&guc->tlb_lookup);
-       }
+       wake_up_all_tlb_invalidate(guc);
 
        return 0;
 
@@ -701,6 +694,8 @@ void intel_uc_suspend(struct intel_uc *uc)
        /* flush the GSC worker */
        intel_gsc_uc_flush_work(&uc->gsc);
 
+       wake_up_all_tlb_invalidate(guc);
+
        if (!intel_guc_is_ready(guc)) {
                guc->interrupts.enabled = false;
                return;
@@ -749,6 +744,11 @@ static int __uc_resume(struct intel_uc *uc, bool 
enable_communication)
 
        intel_gsc_uc_resume(&uc->gsc);
 
+       if (intel_guc_tlb_invalidation_is_available(guc)) {
+               intel_guc_invalidate_tlb_engines(guc);
+               intel_guc_invalidate_tlb_guc(guc);
+       }
+
        return 0;
 }
 
-- 
2.25.1

Reply via email to