Introduce xe_svm_build_sg helper function to build a scatter
gather table from a hmm_range struct. This is prepare work
for binding hmm range to gpu.

Signed-off-by: Oak Zeng <oak.z...@intel.com>
Co-developed-by: Niranjana Vishwanathapura <niranjana.vishwanathap...@intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathap...@intel.com>
Cc: Matthew Brost <matthew.br...@intel.com>
Cc: Thomas Hellström <thomas.hellst...@intel.com>
Cc: Brian Welty <brian.we...@intel.com>
---
 drivers/gpu/drm/xe/xe_svm.c | 52 +++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_svm.h |  3 +++
 2 files changed, 55 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 559188471949..ab3cc2121869 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -6,6 +6,8 @@
 #include <linux/mutex.h>
 #include <linux/mm_types.h>
 #include "xe_svm.h"
+#include <linux/hmm.h>
+#include <linux/scatterlist.h>
 
 DEFINE_HASHTABLE(xe_svm_table, XE_MAX_SVM_PROCESS);
 
@@ -61,3 +63,53 @@ struct xe_svm *xe_lookup_svm_by_mm(struct mm_struct *mm)
 
        return NULL;
 }
+
+/**
+ * xe_svm_build_sg() - build a scatter gather table for all the physical 
pages/pfn
+ * in a hmm_range.
+ *
+ * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
+ * has the pfn numbers of pages that back up this hmm address range.
+ * @st: pointer to the sg table.
+ *
+ * All the contiguous pfns will be collapsed into one entry in
+ * the scatter gather table. This is for the convenience of
+ * later on operations to bind address range to GPU page table.
+ *
+ * This function allocates the storage of the sg table. It is
+ * caller's responsibility to free it calling sg_free_table.
+ *
+ * Returns 0 if successful; -ENOMEM if fails to allocate memory
+ */
+int xe_svm_build_sg(struct hmm_range *range,
+                            struct sg_table *st)
+{
+       struct scatterlist *sg;
+       u64 i, npages;
+
+       sg = NULL;
+       st->nents = 0;
+       npages = ((range->end - 1) >> PAGE_SHIFT) - (range->start >> 
PAGE_SHIFT) + 1;
+
+       if (unlikely(sg_alloc_table(st, npages, GFP_KERNEL)))
+               return -ENOMEM;
+
+       for (i = 0; i < npages; i++) {
+               unsigned long addr = range->hmm_pfns[i];
+
+               if (sg && (addr == (sg_dma_address(sg) + sg->length))) {
+                       sg->length += PAGE_SIZE;
+                       sg_dma_len(sg) += PAGE_SIZE;
+                       continue;
+               }
+
+               sg =  sg ? sg_next(sg) : st->sgl;
+               sg_dma_address(sg) = addr;
+               sg_dma_len(sg) = PAGE_SIZE;
+               sg->length = PAGE_SIZE;
+               st->nents++;
+       }
+
+       sg_mark_end(sg);
+       return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 3ed106ecc02b..191bce6425db 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -13,6 +13,8 @@
 #include <linux/interval_tree.h>
 #include <linux/hashtable.h>
 #include <linux/types.h>
+#include <linux/hmm.h>
+#include "xe_device_types.h"
 
 struct xe_vm;
 struct mm_struct;
@@ -69,4 +71,5 @@ struct xe_svm *xe_create_svm(struct xe_vm *vm);
 struct xe_svm *xe_lookup_svm_by_mm(struct mm_struct *mm);
 struct xe_svm_range *xe_svm_range_from_addr(struct xe_svm *svm,
                                                                unsigned long 
addr);
+int xe_svm_build_sg(struct hmm_range *range, struct sg_table *st);
 #endif
-- 
2.26.3

Reply via email to