From: Xiaogang Chen <[email protected]>

Fixes: 7ef6b2d4b7e5 (drm/amdkfd: remap unaligned svm ranges that have split)

When split svm ranges that have been mapped using huge page should use huge
page size(2MB) to check split range alignment, not prange->granularity that
means migration granularity.

Signed-off-by: Xiaogang Chen <[email protected]>:
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 63 ++++++++++++++++++++++++++--
 1 file changed, 59 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 521c14c7a789..7bb94555e5f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1145,12 +1145,39 @@ svm_range_split_tail(struct svm_range *prange, uint64_t 
new_last,
                     struct list_head *insert_list, struct list_head 
*remap_list)
 {
        struct svm_range *tail = NULL;
+       unsigned long start_align = ALIGN(prange->start, 512);
+       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
        int r = svm_range_split(prange, prange->start, new_last, &tail);
+       bool huge_page_mapping = (last_align_down > start_align) &&
+                                (last_align_down - start_align) >= 512;
 
        if (!r) {
                list_add(&tail->list, insert_list);
-               if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
-                       list_add(&tail->update_list, remap_list);
+               /* original prange has huge page mapping */
+               if (huge_page_mapping) {
+
+                       /* tail->start is inside huge page mapping but not 2MB 
aligned
+                        * or tail size is smaller than 512 pages
+                        */
+                       if (tail->start >= start_align && tail->start <= 
last_align_down &&
+                               (!IS_ALIGNED(tail->start, 512) ||
+                               (tail->last - tail->start) < 512)) {
+
+                               list_add(&tail->update_list, remap_list);
+                               return 0;
+                       }
+
+                       /* tail->last is inside huge page mapping but not 2MB 
aligned
+                        * or tail size is smaller than 512 pages
+                        */
+                       if (tail->last >= start_align && tail->last <= 
last_align_down &&
+                               (!IS_ALIGNED(tail->last, 512) ||
+                               (tail->last - tail->start) < 512)) {
+
+                               list_add(&tail->update_list, remap_list);
+                               return 0;
+                       }
+               }
        }
        return r;
 }
@@ -1160,13 +1187,41 @@ svm_range_split_head(struct svm_range *prange, uint64_t 
new_start,
                     struct list_head *insert_list, struct list_head 
*remap_list)
 {
        struct svm_range *head = NULL;
+       unsigned long start_align = ALIGN(prange->start, 512);
+       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
        int r = svm_range_split(prange, new_start, prange->last, &head);
+       bool huge_page_mapping = (last_align_down >= start_align) &&
+                                (last_align_down - start_align) >= 512;
 
        if (!r) {
                list_add(&head->list, insert_list);
-               if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
-                       list_add(&head->update_list, remap_list);
+
+               /* original prange has huge page mapping */
+               if (huge_page_mapping) {
+                       /* head->start is inside huge page mapping but not 2MB 
aligned
+                        * or head size is smaller than 512 pages
+                        */
+                       if (head->start >= start_align && head->start <= 
last_align_down &&
+                               (!IS_ALIGNED(head->start, 512) ||
+                               (head->last - head->start) < 512)) {
+
+                               list_add(&head->update_list, remap_list);
+                               return 0;
+                       }
+
+                       /* head->last is inside huge page mapping but not 2MB 
aligned
+                        * or head size is smaller than 512 pages
+                        */
+                       if (head->last >= start_align && head->last <= 
last_align_down &&
+                               (!IS_ALIGNED(head->last, 512) ||
+                               (head->last - head->start) < 512)) {
+
+                               list_add(&head->update_list, remap_list);
+                               return 0;
+                       }
+               }
        }
+
        return r;
 }
 
-- 
2.34.1

Reply via email to