Page table restore implementation in SVM API. This is called from
the fault handler at amdgpu_vm. To update page tables through
the page fault retry IH.

Signed-off-by: Alex Sierra <[email protected]>
Signed-off-by: Philip Yang <[email protected]>
Signed-off-by: Felix Kuehling <[email protected]>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 69 ++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h |  2 +
 2 files changed, 71 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index fb8ca844d9bd..c791d91cb45d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1946,6 +1946,75 @@ svm_range_from_addr(struct svm_range_list *svms, 
unsigned long addr,
        return NULL;
 }
 
+int
+svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+                       uint64_t addr)
+{
+       int r = 0;
+       struct mm_struct *mm = NULL;
+       struct svm_range *prange;
+       struct svm_range_list *svms;
+       struct kfd_process *p;
+
+       p = kfd_lookup_process_by_pasid(pasid);
+       if (!p) {
+               pr_debug("kfd process not founded pasid 0x%x\n", pasid);
+               return -ESRCH;
+       }
+       svms = &p->svms;
+
+       pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
+
+       mm = get_task_mm(p->lead_thread);
+       if (!mm) {
+               pr_debug("svms 0x%p failed to get mm\n", svms);
+               r = -ESRCH;
+               goto out;
+       }
+
+       svm_range_list_lock_and_flush_work(svms, mm);
+       mutex_lock(&svms->lock);
+       prange = svm_range_from_addr(svms, addr, NULL);
+
+       mmap_write_downgrade(mm);
+
+       if (!prange) {
+               pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
+                        svms, addr);
+               r = -EFAULT;
+               goto out_unlock_svms;
+       }
+
+       mutex_lock(&prange->migrate_mutex);
+
+       r = svm_range_validate(mm, prange);
+       if (r) {
+               pr_debug("failed %d to validate svms 0x%p [0x%lx 0x%lx]\n", r,
+                        svms, prange->start, prange->last);
+
+               goto out_unlock_range;
+       }
+
+       pr_debug("restoring svms 0x%p [0x%lx %lx] mapping\n",
+                svms, prange->start, prange->last);
+
+       r = svm_range_map_to_gpus(prange, true);
+       if (r)
+               pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpu\n", r,
+                        svms, prange->start, prange->last);
+
+out_unlock_range:
+       mutex_unlock(&prange->migrate_mutex);
+out_unlock_svms:
+       mutex_unlock(&svms->lock);
+       mmap_read_unlock(mm);
+       mmput(mm);
+out:
+       kfd_unref_process(p);
+
+       return r;
+}
+
 void svm_range_list_fini(struct kfd_process *p)
 {
        struct svm_range *prange;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 3f945a601546..3aa6f6b97481 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -159,6 +159,8 @@ int svm_range_split_by_granularity(struct kfd_process *p, 
struct mm_struct *mm,
                               struct svm_range *prange,
                               struct svm_range **pmigrate,
                               struct list_head *deferred_update_list);
+int svm_range_restore_pages(struct amdgpu_device *adev,
+                           unsigned int pasid, uint64_t addr);
 void svm_range_add_list_work(struct svm_range_list *svms,
                             struct svm_range *prange, struct mm_struct *mm,
                             enum svm_work_list_ops op);
-- 
2.31.0

_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to