From: Honglei Huang <[email protected]>

Add top-level MMU notifier callback and checkpoint timestamp:
- amdgpu_svm_capture_checkpoint_ts: capture interrupt handler write
  pointer timestamp for stale retry fault filtering
- amdgpu_svm_range_invalidate: top-level drm_gpusvm_ops.invalidate
  callback dispatching per-range notifier begin/end with TLB flush
  batching, MMU_NOTIFY_RELEASE filtering, and exiting-state check

These complete the invalidation path: attribute changes trigger
invalidate_interval, while MMU notifier events flow through this
callback to perform PTE zap, TLB flush, and GC queueing.

Signed-off-by: Honglei Huang <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_svm_range.c | 71 +++++++++++++++++++
 1 file changed, 71 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_range.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_range.c
index 49240c704..fe543a16b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_range.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_svm_range.c
@@ -714,3 +714,74 @@ amdgpu_svm_range_put_if_dequeued(struct amdgpu_svm *svm,
        if (dequeue)
                drm_gpusvm_range_put(&range->base);
 }
+
+void amdgpu_svm_capture_checkpoint_ts(struct amdgpu_svm *svm)
+{
+       struct amdgpu_device *adev = svm->adev;
+       struct amdgpu_ih_ring *ih;
+       uint32_t checkpoint_wptr;
+
+       if (!adev->irq.retry_cam_enabled && adev->irq.ih1.ring_size) {
+               ih = &adev->irq.ih1;
+               checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+               if (ih->rptr != checkpoint_wptr) {
+                       WRITE_ONCE(svm->checkpoint_ts,
+                                  amdgpu_ih_decode_iv_ts(adev, ih,
+                                                         checkpoint_wptr, -1));
+                       return;
+               }
+       }
+
+       ih = &adev->irq.ih_soft;
+       checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+       if (ih->rptr != checkpoint_wptr)
+               WRITE_ONCE(svm->checkpoint_ts,
+                          amdgpu_ih_decode_iv_ts(adev, ih,
+                                                 checkpoint_wptr, -1));
+}
+
+void amdgpu_svm_range_invalidate(struct amdgpu_svm *svm,
+                                struct drm_gpusvm_notifier *notifier,
+                                const struct mmu_notifier_range *mmu_range)
+{
+       struct drm_gpusvm_range *r, *first;
+       uint64_t adj_start = mmu_range->start, adj_end = mmu_range->end;
+       bool needs_flush = false;
+
+       amdgpu_svm_assert_in_notifier(svm);
+
+       AMDGPU_SVM_TRACE("INVALIDATE: pasid=%u, gpusvm=%p, seqno=%lu, 
[0x%016lx-0x%016lx]-0x%lx, event=%d\n",
+                        svm->vm->pasid, &svm->gpusvm,
+                        notifier->notifier.invalidate_seq,
+                        mmu_range->start, mmu_range->end,
+                        mmu_range->end - mmu_range->start, mmu_range->event);
+
+       if (mmu_range->event == MMU_NOTIFY_RELEASE)
+               return;
+       if (atomic_read(&svm->exiting))
+               return;
+
+       adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
+       adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
+
+       first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
+       if (!first)
+               return;
+
+       if (mmu_range->event == MMU_NOTIFY_UNMAP)
+               amdgpu_svm_capture_checkpoint_ts(svm);
+
+       r = first;
+       drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+               needs_flush |= amdgpu_svm_range_notifier_event_begin(svm, r,
+                                                                    mmu_range);
+       if (!needs_flush)
+               goto range_notifier_event_end;
+
+       svm->flush_tlb(svm);
+
+range_notifier_event_end:
+       r = first;
+       drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+               amdgpu_svm_range_notifier_event_end(svm, r, mmu_range);
+}
-- 
2.34.1

Reply via email to