On 11/19/2025 6:22 PM, Philip Yang wrote:
resend, to fix the code format issue.
On 2025-11-19 18:33, Philip Yang wrote:
On 2025-11-18 12:32, Xiaogang.Chen wrote:
From: Xiaogang Chen <[email protected]>
Fixes: 7ef6b2d4b7e5 (drm/amdkfd: remap unaligned svm ranges that
have split)
When split svm ranges that have been mapped using huge page should
use huge
page size(2MB) to check split range alignment, not
prange->granularity that
means migration granularity.
Signed-off-by: Xiaogang Chen <[email protected]>:
---
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 63
++++++++++++++++++++++++++--
1 file changed, 59 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 521c14c7a789..7bb94555e5f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1145,12 +1145,39 @@ svm_range_split_tail(struct svm_range
*prange, uint64_t new_last,
struct list_head *insert_list, struct list_head
*remap_list)
{
struct svm_range *tail = NULL;
+ unsigned long start_align = ALIGN(prange->start, 512);
+ unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
int r = svm_range_split(prange, prange->start, new_last, &tail);
move this function call after variable definition, easier to read.
+ bool huge_page_mapping = (last_align_down > start_align) &&
+ (last_align_down - start_align) >= 512;
>= 512 is redundant because start, last already aligned to 512
if (!r) {
list_add(&tail->list, insert_list);
- if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
- list_add(&tail->update_list, remap_list);
+ /* original prange has huge page mapping */
+ if (huge_page_mapping) {
+
+ /* tail->start is inside huge page mapping but not 2MB
aligned
+ * or tail size is smaller than 512 pages
+ */
+ if (tail->start >= start_align && tail->start <=
last_align_down &&
+ (!IS_ALIGNED(tail->start, 512) ||
split from the tail, new range is the last part of prange, don't need
check size
+ (tail->last - tail->start) < 512)) {
+
+ list_add(&tail->update_list, remap_list);
+ return 0;
+ }
+
+ /* tail->last is inside huge page mapping but not 2MB
aligned
+ * or tail size is smaller than 512 pages
+ */
tail->last is the original prange->last, impossible inside prange.
+ if (tail->last >= start_align && tail->last <=
last_align_down &&
+ (!IS_ALIGNED(tail->last, 512) ||
+ (tail->last - tail->start) < 512)) {
+
+ list_add(&tail->update_list, remap_list);
+ return 0;
+ }
+ }
}
to refactor the conditions
unsigned long start_align = ALIGN(prange->start, 512);
unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
bool huge_page_mapping = last_align_down > start_align;
int r;
r = svm_range_split(prange, prange->start, new_last, &tail);
if (r || !huge_page_mapping)
return r;
if (tail->start > start_align && tail->start < last_align_down &&
(!IS_ALIGNED(tail->start, 512))
list_add(&tail->update_list, remap_list);
return 0;
ok, that makes code concise. Thanks.
Still need add tail to insert list:
list_add(&tail->list, insert_list);
Regards
Xiaogang
do the similar change for below split head.
Regards,
Philip
@@ -1160,13 +1187,41 @@ svm_range_split_head(struct svm_range
*prange, uint64_t new_start,
struct list_head *insert_list, struct list_head
*remap_list)
{
struct svm_range *head = NULL;
+ unsigned long start_align = ALIGN(prange->start, 512);
+ unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
int r = svm_range_split(prange, new_start, prange->last, &head);
+ bool huge_page_mapping = (last_align_down >= start_align) &&
+ (last_align_down - start_align) >= 512;
if (!r) {
list_add(&head->list, insert_list);
- if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
- list_add(&head->update_list, remap_list);
+
+ /* original prange has huge page mapping */
+ if (huge_page_mapping) {
+ /* head->start is inside huge page mapping but not 2MB
aligned
+ * or head size is smaller than 512 pages
+ */
+ if (head->start >= start_align && head->start <=
last_align_down &&
+ (!IS_ALIGNED(head->start, 512) ||
+ (head->last - head->start) < 512)) {
+
+ list_add(&head->update_list, remap_list);
+ return 0;
+ }
+
+ /* head->last is inside huge page mapping but not 2MB
aligned
+ * or head size is smaller than 512 pages
+ */
+ if (head->last >= start_align && head->last <=
last_align_down &&
+ (!IS_ALIGNED(head->last, 512) ||
+ (head->last - head->start) < 512)) {
+
+ list_add(&head->update_list, remap_list);
+ return 0;
+ }
+ }
}
+
return r;
}