During GPU page table invalidation with xnack off, new ranges
split may occur concurrently in the same prange. Creating a new
child per split. Each child should also increment its
invalid counter, to assure GPU page table updates in these
ranges.

Signed-off-by: Alex Sierra <[email protected]>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 294c9480a184..67605e4bc3c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1622,6 +1622,7 @@ svm_range_evict(struct svm_range *prange, struct 
mm_struct *mm,
                unsigned long start, unsigned long last)
 {
        struct svm_range_list *svms = prange->svms;
+       struct svm_range *pchild;
        struct kfd_process *p;
        int r = 0;
 
@@ -1633,6 +1634,12 @@ svm_range_evict(struct svm_range *prange, struct 
mm_struct *mm,
        if (!p->xnack_enabled) {
                int evicted_ranges;
 
+               list_for_each_entry(pchild, &prange->child_list, child_list) {
+                       mutex_lock_nested(&pchild->lock, 1);
+                       atomic_inc(&pchild->invalid);
+                       mutex_unlock(&pchild->lock);
+               }
+
                atomic_inc(&prange->invalid);
                evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
                if (evicted_ranges != 1)
@@ -1650,7 +1657,6 @@ svm_range_evict(struct svm_range *prange, struct 
mm_struct *mm,
                schedule_delayed_work(&svms->restore_work,
                        msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
        } else {
-               struct svm_range *pchild;
                unsigned long s, l;
 
                pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
-- 
2.32.0

_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to