From: Honglei Huang <[email protected]>

Integrate batch userptr allocation into the KFD ioctl interface.

This adds:
- kfd_copy_userptr_ranges(): validates and copies batch range data
  from userspace, checking alignment, sizes, and total size match
- Modifications to kfd_ioctl_alloc_memory_of_gpu() to detect batch
  mode and route to appropriate allocation function
- SVM conflict checking extended for batch ranges

Signed-off-by: Honglei Huang <[email protected]>
---
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 128 +++++++++++++++++++++--
 1 file changed, 122 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index a72cc980a..d0b56d5cc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1047,10 +1047,79 @@ static int kfd_ioctl_get_available_memory(struct file 
*filep,
        return 0;
 }
 
+static int kfd_copy_userptr_ranges(void __user *user_data, uint64_t 
expected_size,
+                                  struct kfd_ioctl_userptr_range **ranges_out,
+                                  uint32_t *num_ranges_out)
+{
+       struct kfd_ioctl_userptr_ranges_data ranges_header;
+       struct kfd_ioctl_userptr_range *ranges;
+       uint64_t total_size = 0;
+       uint32_t num_ranges;
+       size_t header_size;
+       uint32_t i;
+
+       if (!user_data) {
+               pr_err("Batch allocation: ranges pointer is NULL\n");
+               return -EINVAL;
+       }
+
+       header_size = offsetof(struct kfd_ioctl_userptr_ranges_data, ranges);
+       if (copy_from_user(&ranges_header, user_data, header_size)) {
+               pr_err("Failed to copy ranges data header from user space\n");
+               return -EFAULT;
+       }
+
+       num_ranges = ranges_header.num_ranges;
+       if (num_ranges == 0) {
+               pr_err("Batch allocation: invalid number of ranges %u\n", 
num_ranges);
+               return -EINVAL;
+       }
+
+       if (ranges_header.reserved != 0) {
+               pr_err("Batch allocation: reserved field must be 0\n");
+               return -EINVAL;
+       }
+
+       ranges = kvmalloc_array(num_ranges, sizeof(*ranges), GFP_KERNEL);
+       if (!ranges)
+               return -ENOMEM;
+
+       if (copy_from_user(ranges, user_data + header_size,
+                          num_ranges * sizeof(*ranges))) {
+               pr_err("Failed to copy ranges from user space\n");
+               kvfree(ranges);
+               return -EFAULT;
+       }
+
+       for (i = 0; i < num_ranges; i++) {
+               if (!ranges[i].start || !ranges[i].size ||
+                   (ranges[i].start & ~PAGE_MASK) ||
+                   (ranges[i].size & ~PAGE_MASK)) {
+                       pr_err("Invalid range %u: start=0x%llx size=0x%llx\n",
+                              i, ranges[i].start, ranges[i].size);
+                       kvfree(ranges);
+                       return -EINVAL;
+               }
+               total_size += ranges[i].size;
+       }
+
+       if (total_size != expected_size) {
+               pr_err("Size mismatch: provided %llu != calculated %llu\n",
+                      expected_size, total_size);
+               kvfree(ranges);
+               return -EINVAL;
+       }
+
+       *ranges_out = ranges;
+       *num_ranges_out = num_ranges;
+       return 0;
+}
+
 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
                                        struct kfd_process *p, void *data)
 {
        struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
+       struct kfd_ioctl_userptr_range *ranges = NULL;
        struct kfd_process_device *pdd;
        void *mem;
        struct kfd_node *dev;
@@ -1058,16 +1127,32 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file 
*filep,
        long err;
        uint64_t offset = args->mmap_offset;
        uint32_t flags = args->flags;
+       uint32_t num_ranges = 0;
+       bool is_batch = false;
 
        if (args->size == 0)
                return -EINVAL;
 
+       if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
+           (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR_BATCH)) {
+               is_batch = true;
+       }
+
        if (p->context_id != KFD_CONTEXT_ID_PRIMARY && (flags & 
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
                pr_debug("USERPTR is not supported on non-primary 
kfd_process\n");
 
                return -EOPNOTSUPP;
        }
 
+       if (is_batch) {
+               err = kfd_copy_userptr_ranges((void __user *)args->mmap_offset,
+                                             args->size, &ranges, &num_ranges);
+               if (err)
+                       return err;
+
+               offset = 0;
+       }
+
 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
        /* Flush pending deferred work to avoid racing with deferred actions
         * from previous memory map changes (e.g. munmap).
@@ -1086,13 +1171,15 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file 
*filep,
                pr_err("Address: 0x%llx already allocated by SVM\n",
                        args->va_addr);
                mutex_unlock(&p->svms.lock);
-               return -EADDRINUSE;
+               err = -EADDRINUSE;
+               goto err_free_ranges;
        }
 
        /* When register user buffer check if it has been registered by svm by
         * buffer cpu virtual address.
+        * For batch mode, check each range individually below.
         */
-       if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
+       if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) && !is_batch &&
            interval_tree_iter_first(&p->svms.objects,
                                     args->mmap_offset >> PAGE_SHIFT,
                                     (args->mmap_offset  + args->size - 1) >> 
PAGE_SHIFT)) {
@@ -1102,6 +1189,22 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file 
*filep,
                return -EADDRINUSE;
        }
 
+       /* Check each userptr range for SVM conflicts in batch mode */
+       if (is_batch) {
+               uint32_t i;
+               for (i = 0; i < num_ranges; i++) {
+                       if (interval_tree_iter_first(&p->svms.objects,
+                                                    ranges[i].start >> 
PAGE_SHIFT,
+                                                    (ranges[i].start + 
ranges[i].size - 1) >> PAGE_SHIFT)) {
+                               pr_err("Userptr range %u (0x%llx) already 
allocated by SVM\n",
+                                      i, ranges[i].start);
+                               mutex_unlock(&p->svms.lock);
+                               err = -EADDRINUSE;
+                               goto err_free_ranges;
+                       }
+               }
+       }
+
        mutex_unlock(&p->svms.lock);
 #endif
        mutex_lock(&p->mutex);
@@ -1149,10 +1252,17 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file 
*filep,
                }
        }
 
-       err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
-               dev->adev, args->va_addr, args->size,
-               pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
-               flags, false);
+       if (is_batch) {
+               err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu_batch(
+                       dev->adev, args->va_addr, args->size, pdd->drm_priv,
+                       (struct kgd_mem **)&mem, &offset, ranges, num_ranges,
+                       flags, false);
+       } else {
+               err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+                       dev->adev, args->va_addr, args->size,
+                       pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
+                       flags, false);
+       }
 
        if (err)
                goto err_unlock;
@@ -1184,6 +1294,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file 
*filep,
                args->mmap_offset = KFD_MMAP_TYPE_MMIO
                                        | KFD_MMAP_GPU_ID(args->gpu_id);
 
+       if (is_batch)
+               kvfree(ranges);
+
        return 0;
 
 err_free:
@@ -1193,6 +1306,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file 
*filep,
 err_pdd:
 err_large_bar:
        mutex_unlock(&p->mutex);
+err_free_ranges:
+       if (ranges)
+               kvfree(ranges);
        return err;
 }
 
-- 
2.34.1

Reply via email to