Add a helper function svm_populate_range to populate
a svm range. This functions calls hmm_range_fault
to read CPU page tables and populate all pfns of this
virtual address range into an array, saved in hmm_range::
hmm_pfns. This is prepare work to bind a svm range to
GPU. The hmm_pfns array will be used for the GPU binding.

Signed-off-by: Oak Zeng <oak.z...@intel.com>
Co-developed-by: Niranjana Vishwanathapura <niranjana.vishwanathap...@intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathap...@intel.com>
Cc: Matthew Brost <matthew.br...@intel.com>
Cc: Thomas Hellström <thomas.hellst...@intel.com>
Cc: Brian Welty <brian.we...@intel.com>
---
 drivers/gpu/drm/xe/xe_svm.c | 61 +++++++++++++++++++++++++++++++++++++
 1 file changed, 61 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 44d4f4216a93..0c13690a19f5 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -145,3 +145,64 @@ int xe_svm_build_sg(struct hmm_range *range,
        sg_mark_end(sg);
        return 0;
 }
+
+/** Populate physical pages of a virtual address range
+ * This function also read mmu notifier sequence # (
+ * mmu_interval_read_begin), for the purpose of later
+ * comparison (through mmu_interval_read_retry).
+ * This must be called with mmap read or write lock held.
+ *
+ * This function alloates hmm_range->hmm_pfns, it is caller's
+ * responsibility to free it.
+ *
+ * @svm_range: The svm range to populate
+ * @hmm_range: pointer to hmm_range struct. hmm_rang->hmm_pfns
+ * will hold the populated pfns.
+ * @write: populate pages with write permission
+ *
+ * returns: 0 for succuss; negative error no on failure
+ */
+static int svm_populate_range(struct xe_svm_range *svm_range,
+                           struct hmm_range *hmm_range, bool write)
+{
+       unsigned long timeout =
+               jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+       unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
+       u64 npages;
+       int ret;
+
+       mmap_assert_locked(svm_range->svm->mm);
+
+       npages = ((svm_range->end - 1) >> PAGE_SHIFT) -
+                                               (svm_range->start >> 
PAGE_SHIFT) + 1;
+       pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+       if (unlikely(!pfns))
+               return -ENOMEM;
+
+       if (write)
+               flags |= HMM_PFN_REQ_WRITE;
+
+       memset64((u64 *)pfns, (u64)flags, npages);
+       hmm_range->hmm_pfns = pfns;
+       hmm_range->notifier_seq = mmu_interval_read_begin(&svm_range->notifier);
+       hmm_range->notifier = &svm_range->notifier;
+       hmm_range->start = svm_range->start;
+       hmm_range->end = svm_range->end;
+       hmm_range->pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE;
+       hmm_range->dev_private_owner = svm_range->svm->vm->xe->drm.dev;
+
+       while (true) {
+               ret = hmm_range_fault(hmm_range);
+               if (time_after(jiffies, timeout))
+                       goto free_pfns;
+
+               if (ret == -EBUSY)
+                       continue;
+               break;
+       }
+
+free_pfns:
+       if (ret)
+               kvfree(pfns);
+       return ret;
+}
-- 
2.26.3

Reply via email to