From: Honglei Huang <[email protected]>

Implement the main batch userptr allocation function and export it
through the AMDKFD API.

This adds:
- init_user_pages_batch(): initializes batch allocation by setting
  up interval tree, registering single MMU notifier, and getting
  pages for all ranges
- amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu_batch(): main entry point
  for batch userptr allocation
- Function export in amdgpu_amdkfd.h

Signed-off-by: Honglei Huang <[email protected]>
---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 264 ++++++++++++++++++
 1 file changed, 264 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 7aca1868d..bc075f5f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1254,6 +1254,151 @@ static int set_user_pages_batch(struct ttm_tt *ttm,
        return 0;
 }
 
+static int init_user_pages_batch(struct kgd_mem *mem,
+                                struct kfd_ioctl_userptr_range *ranges,
+                                uint32_t num_ranges, bool criu_resume,
+                                uint64_t user_addr, uint32_t size)
+{
+       struct amdkfd_process_info *process_info = mem->process_info;
+       struct amdgpu_bo *bo = mem->bo;
+       struct ttm_operation_ctx ctx = { true, false };
+       struct hmm_range *range;
+       uint64_t va_min = ULLONG_MAX, va_max = 0;
+       int ret = 0;
+       uint32_t i;
+
+       if (!num_ranges || !ranges)
+               return -EINVAL;
+
+       mutex_lock(&process_info->lock);
+
+       mem->user_ranges = kvcalloc(num_ranges, sizeof(struct user_range_info), 
+                                   GFP_KERNEL);
+
+       if (!mem->user_ranges) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       mem->num_user_ranges = num_ranges;
+
+       mem->user_ranges_itree = RB_ROOT_CACHED;
+
+       ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
+       if (ret) {
+               pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
+               goto out;
+       }
+
+       for (i = 0; i < num_ranges; i++) {
+               uint64_t range_end;
+
+               mem->user_ranges[i].start = ranges[i].start;
+               mem->user_ranges[i].size = ranges[i].size;
+               mem->user_ranges[i].range = NULL;
+
+               range_end = ranges[i].start + ranges[i].size;
+
+               mem->user_ranges[i].it_node.start = ranges[i].start;
+               mem->user_ranges[i].it_node.last = range_end - 1;
+               interval_tree_insert(&mem->user_ranges[i].it_node, 
&mem->user_ranges_itree);
+
+               if (ranges[i].start < va_min)
+                       va_min = ranges[i].start;
+               if (range_end > va_max)
+                       va_max = range_end;
+
+               pr_debug("Initializing userptr range %u: addr=0x%llx 
size=0x%llx\n",
+                        i, mem->user_ranges[i].start, 
mem->user_ranges[i].size);
+       }
+
+       mem->batch_va_min = va_min;
+       mem->batch_va_max = va_max;
+
+       pr_debug("Batch userptr: registering single notifier for span [0x%llx - 
0x%llx)\n",
+                va_min, va_max);
+
+       ret = mmu_interval_notifier_insert(&mem->batch_notifier,
+                                          current->mm, va_min, va_max - va_min,
+                                          &amdgpu_amdkfd_hsa_batch_ops);
+       if (ret) {
+               pr_err("%s: Failed to register batch MMU notifier: %d\n",
+                      __func__, ret);
+               goto err_cleanup_ranges;
+       }
+
+       if (criu_resume) {
+               mutex_lock(&process_info->notifier_lock);
+               mem->invalid++;
+               mutex_unlock(&process_info->notifier_lock);
+               mutex_unlock(&process_info->lock);
+               return 0;
+       }
+
+       for (i = 0; i < num_ranges; i++) {
+               ret = get_user_pages_batch(
+                       current->mm, mem, &mem->user_ranges[i], &range,
+                       amdgpu_ttm_tt_is_readonly(bo->tbo.ttm));
+               if (ret) {
+                       if (ret == -EAGAIN)
+                               pr_debug("Failed to get user pages for range 
%u, try again\n", i);
+                       else
+                               pr_err("%s: Failed to get user pages for range 
%u: %d\n",
+                                      __func__, i, ret);
+                       goto err_unregister;
+               }
+
+               mem->user_ranges[i].range = range;
+       }
+
+       ret = amdgpu_bo_reserve(bo, true);
+       if (ret) {
+               pr_err("%s: Failed to reserve BO\n", __func__);
+               goto release_pages;
+       }
+
+       if (bo->tbo.ttm->pages) {
+               set_user_pages_batch(bo->tbo.ttm,
+                                                    mem->user_ranges,
+                                                    num_ranges);
+       } else {
+               pr_err("%s: TTM pages array is NULL\n", __func__);
+               ret = -EINVAL;
+               amdgpu_bo_unreserve(bo);
+               goto release_pages;
+       }
+
+       amdgpu_bo_placement_from_domain(bo, mem->domain);
+       ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (ret)
+               pr_err("%s: failed to validate BO\n", __func__);
+       
+       amdgpu_bo_unreserve(bo);
+
+release_pages:
+       for (i = 0; i < num_ranges; i++) {
+               if (mem->user_ranges[i].range) {
+                       amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, 
+                                                         
mem->user_ranges[i].range);
+               }
+       }
+
+err_unregister:
+       if (ret && mem->batch_notifier.mm) {
+               mmu_interval_notifier_remove(&mem->batch_notifier);
+               mem->batch_notifier.mm = NULL;
+       }
+err_cleanup_ranges:
+       if (ret) {
+               for (i = 0; i < num_ranges; i++) {
+                       mem->user_ranges[i].range = NULL;
+               }
+       }
+
+out:
+       mutex_unlock(&process_info->lock);
+       return ret;
+}
+
 /* Reserving a BO and its page table BOs must happen atomically to
  * avoid deadlocks. Some operations update multiple VMs at once. Track
  * all the reservation info in a context structure. Optionally a sync
@@ -2012,6 +2157,125 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        return ret;
 }
 
+int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu_batch(
+       struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv,
+       struct kgd_mem **mem, uint64_t *offset,
+       struct kfd_ioctl_userptr_range *ranges, uint32_t num_ranges,
+       uint32_t flags, bool criu_resume)
+{
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+       struct amdgpu_bo *bo;
+       struct drm_gem_object *gobj = NULL;
+       u32 domain, alloc_domain;
+       uint64_t aligned_size;
+       int8_t xcp_id = -1;
+       u64 alloc_flags;
+       int ret;
+
+       if (!(flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
+               pr_err("Batch allocation requires USERPTR flag\n");
+               return -EINVAL;
+       }
+
+       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM) {
+               pr_err("Batch userptr does not support AQL queue\n");
+               return -EINVAL;
+       }
+
+       domain = AMDGPU_GEM_DOMAIN_GTT;
+       alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+       alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
+
+       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
+               alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
+       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT)
+               alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT;
+       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
+               alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
+
+       *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+       if (!*mem) {
+               ret = -ENOMEM;
+               goto err;
+       }
+       INIT_LIST_HEAD(&(*mem)->attachments);
+       mutex_init(&(*mem)->lock);
+       (*mem)->aql_queue = false;
+
+       aligned_size = PAGE_ALIGN(size);
+
+       (*mem)->alloc_flags = flags;
+
+       amdgpu_sync_create(&(*mem)->sync);
+
+       ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
+                                             xcp_id);
+       if (ret) {
+               pr_debug("Insufficient memory\n");
+               goto err_reserve_limit;
+       }
+
+       pr_debug("\tcreate BO VA 0x%llx size 0x%llx for batch userptr 
(ranges=%u)\n",
+                va, size, num_ranges);
+
+       ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, 
alloc_flags,
+                                      ttm_bo_type_device, NULL, &gobj, xcp_id 
+ 1);
+       if (ret) {
+               pr_debug("Failed to create BO on domain %s. ret %d\n",
+                        domain_string(alloc_domain), ret);
+               goto err_bo_create;
+       }
+
+       ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
+       if (ret) {
+               pr_debug("Failed to allow vma node access. ret %d\n", ret);
+               goto err_node_allow;
+       }
+
+       ret = drm_gem_handle_create(adev->kfd.client.file, gobj, 
&(*mem)->gem_handle);
+       if (ret)
+               goto err_gem_handle_create;
+
+       bo = gem_to_amdgpu_bo(gobj);
+       bo->kfd_bo = *mem;
+       bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
+
+       (*mem)->bo = bo;
+       (*mem)->va = va;
+       (*mem)->domain = domain;
+       (*mem)->mapped_to_gpu_memory = 0;
+       (*mem)->process_info = avm->process_info;
+
+       add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, ranges[0].start);
+
+       ret = init_user_pages_batch(*mem, ranges, num_ranges, criu_resume, va, 
aligned_size);
+       if (ret) {
+               pr_err("Failed to initialize batch user pages: %d\n", ret);
+               goto allocate_init_user_pages_failed;
+       }
+
+       return 0;
+
+allocate_init_user_pages_failed:
+       remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
+       drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle);
+err_gem_handle_create:
+       drm_vma_node_revoke(&gobj->vma_node, drm_priv);
+err_node_allow:
+       goto err_reserve_limit;
+err_bo_create:
+       amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
+err_reserve_limit:
+       amdgpu_sync_free(&(*mem)->sync);
+       mutex_destroy(&(*mem)->lock);
+       if (gobj)
+               drm_gem_object_put(gobj);
+       else
+               kfree(*mem);
+err:
+       return ret;
+}
+
 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
                struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
                uint64_t *size)
-- 
2.34.1

Reply via email to