From: Honglei Huang <[email protected]>

Add core page management functions for batch userptr allocations.

This adds:
- get_user_pages_batch_locked(): gets user pages for batch
- set_user_pages_batch(): populates TTM page array from multiple
  HMM ranges

Signed-off-by: Honglei Huang <[email protected]>
---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 45 +++++++++++++++++++
 1 file changed, 45 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a22a99b8d..5f10a4514 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1202,6 +1202,51 @@ static const struct mmu_interval_notifier_ops 
amdgpu_amdkfd_hsa_batch_ops = {
        .invalidate = amdgpu_amdkfd_invalidate_userptr_batch,
 };
 
+static int get_user_pages_batch_locked(struct mm_struct *mm,
+                                        struct kgd_mem *mem,
+                                        struct user_range_info *range,
+                                        struct hmm_range **range_hmm,
+                                        bool readonly)
+{
+       struct vm_area_struct *vma;
+       int r;
+
+       *range_hmm = NULL;
+
+       vma = vma_lookup(mm, range->start);
+       if (unlikely(!vma))
+               return -EFAULT;
+
+       r = amdgpu_hmm_range_get_pages(&mem->batch_notifier, range->start,
+                                      range->size >> PAGE_SHIFT, readonly,
+                                      NULL, range_hmm);
+       return r;
+}
+
+static int set_user_pages_batch(struct ttm_tt *ttm,
+                               struct user_range_info *ranges,
+                               uint32_t nranges)
+{
+       uint32_t i, j, k = 0, range_npfns;
+
+       for (i = 0; i < nranges; ++i) {
+               if (!ranges[i].range || !ranges[i].range->hmm_pfns)
+                       return -EINVAL;
+
+               range_npfns = (ranges[i].range->end - ranges[i].range->start) >>
+                             PAGE_SHIFT;
+
+               if (k + range_npfns > ttm->num_pages)
+                       return -EOVERFLOW;
+
+               for (j = 0; j < range_npfns; ++j)
+                       ttm->pages[k++] =
+                               hmm_pfn_to_page(ranges[i].range->hmm_pfns[j]);
+       }
+
+       return 0;
+}
+
 /* Reserving a BO and its page table BOs must happen atomically to
  * avoid deadlocks. Some operations update multiple VMs at once. Track
  * all the reservation info in a context structure. Optionally a sync
-- 
2.34.1

Reply via email to