This reverts commit d97e7b1eb8afd7a404466533b0bc192351b760c7.

Needed for the next revert patch.

Signed-off-by: Philip Yang <philip.y...@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 60 ++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h |  3 ++
 2 files changed, 63 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 4d000c63cde8..3422eee8d0d0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1145,6 +1145,66 @@ svm_range_add_child(struct svm_range *prange, struct 
mm_struct *mm,
        list_add_tail(&pchild->child_list, &prange->child_list);
 }
 
+/**
+ * svm_range_split_by_granularity - collect ranges within granularity boundary
+ *
+ * @p: the process with svms list
+ * @mm: mm structure
+ * @addr: the vm fault address in pages, to split the prange
+ * @parent: parent range if prange is from child list
+ * @prange: prange to split
+ *
+ * Trims @prange to be a single aligned block of prange->granularity if
+ * possible. The head and tail are added to the child_list in @parent.
+ *
+ * Context: caller must hold mmap_read_lock and prange->lock
+ *
+ * Return:
+ * 0 - OK, otherwise error code
+ */
+int
+svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
+                              unsigned long addr, struct svm_range *parent,
+                              struct svm_range *prange)
+{
+       struct svm_range *head, *tail;
+       unsigned long start, last, size;
+       int r;
+
+       /* Align splited range start and size to granularity size, then a single
+        * PTE will be used for whole range, this reduces the number of PTE
+        * updated and the L1 TLB space used for translation.
+        */
+       size = 1UL << prange->granularity;
+       start = ALIGN_DOWN(addr, size);
+       last = ALIGN(addr + 1, size) - 1;
+
+       pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
+                prange->svms, prange->start, prange->last, start, last, size);
+
+       if (start > prange->start) {
+               r = svm_range_split(prange, start, prange->last, &head);
+               if (r)
+                       return r;
+               svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
+       }
+
+       if (last < prange->last) {
+               r = svm_range_split(prange, prange->start, last, &tail);
+               if (r)
+                       return r;
+               svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
+       }
+
+       /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
+       if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
+               prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
+               pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
+                        prange, prange->start, prange->last,
+                        SVM_OP_ADD_RANGE_AND_MAP);
+       }
+       return 0;
+}
 static bool
 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
 {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 026863a0abcd..be11ba0c4289 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -172,6 +172,9 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range 
*prange,
 int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
                            bool clear);
 void svm_range_vram_node_free(struct svm_range *prange);
+int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
+                              unsigned long addr, struct svm_range *parent,
+                              struct svm_range *prange);
 int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
                            uint32_t vmid, uint32_t node_id, uint64_t addr,
                            bool write_fault);
-- 
2.35.1

Reply via email to