On Mon, Sep 8, 2025 at 4:49 AM Prike Liang <prike.li...@amd.com> wrote:
>
> Add the userq object virtual address get(),mapped() and put()
> helpers for tracking the userq obj va address usage.

This adds too much queue specific info to the user queue structure.
Just set the bo_va flag when you validate the VAs in patch 7.  Then
when we validate the BOs in amdgpu_userq_restore_worker() verify that
all of the buffers with the bo_va flag are present.  If they are not,
fail the buffer validation and set the error on the queue(s).  If you
want a per queue list, just use a list_head in the userq structure for
critical VAs.

Alex


Alex


>
> Signed-off-by: Prike Liang <prike.li...@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c  | 172 ++++++++++++++++++++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h  |  14 ++
>  drivers/gpu/drm/amd/amdgpu/mes_userqueue.c |   4 +
>  3 files changed, 189 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> index 739135c3f450..5aebce63d86f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
> @@ -76,6 +76,174 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, 
> u64 addr,
>         return r;
>  }
>
> +int amdgpu_userq_buffer_va_get(struct amdgpu_vm *vm, u64 addr)
> +{
> +       struct amdgpu_bo_va_mapping *mapping;
> +       u64 user_addr;
> +       int r;
> +
> +       user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
> +       r = amdgpu_bo_reserve(vm->root.bo, false);
> +       if (r)
> +               return r;
> +
> +       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
> +       if (!mapping)
> +               goto out_err;
> +
> +       /*
> +        * Need to unify the following userq va reference.
> +        *  mqd  bo
> +        *  rptr bo
> +        *  wptr bo
> +        *  eop  bo
> +        *  shadow bo
> +        *  csa bo
> +        */
> +       /*amdgpu_bo_ref(mapping->bo_va->base.bo);*/
> +       mapping->bo_va->queue_refcount++;
> +
> +       amdgpu_bo_unreserve(vm->root.bo);
> +       return 0;
> +
> +out_err:
> +       amdgpu_bo_unreserve(vm->root.bo);
> +       return -EINVAL;
> +}
> +
> +bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
> +{
> +       struct amdgpu_bo_va_mapping *mapping;
> +       u64 user_addr;
> +       bool r;
> +
> +       user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
> +
> +       if (amdgpu_bo_reserve(vm->root.bo, false))
> +               return false;
> +
> +       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
> +       if (!IS_ERR_OR_NULL(mapping) && mapping->bo_va->queue_refcount > 0)
> +               r = true;
> +       else
> +               r = false;
> +       amdgpu_bo_unreserve(vm->root.bo);
> +
> +       return r;
> +}
> +
> +bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
> +                       struct amdgpu_usermode_queue *queue)
> +{
> +
> +       switch (queue->queue_type) {
> +       case AMDGPU_HW_IP_GFX:
> +               if (amdgpu_userq_buffer_va_mapped(vm, queue->queue_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->rptr_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->wptr_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->shadow_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->csa_va))
> +                       return true;
> +               break;
> +       case AMDGPU_HW_IP_COMPUTE:
> +               if (amdgpu_userq_buffer_va_mapped(vm, queue->queue_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->rptr_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->wptr_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->eop_va))
> +                       return true;
> +               break;
> +       case AMDGPU_HW_IP_DMA:
> +               if (amdgpu_userq_buffer_va_mapped(vm, queue->queue_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->rptr_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->wptr_va) ||
> +                   amdgpu_userq_buffer_va_mapped(vm, queue->csa_va))
> +                       return true;
> +               break;
> +       default:
> +               break;
> +       }
> +
> +       return false;
> +}
> +
> +int amdgpu_userq_buffer_va_put(struct amdgpu_vm *vm, u64 addr)
> +{
> +       struct amdgpu_bo_va_mapping *mapping;
> +       u64 user_addr;
> +       int r;
> +
> +       user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
> +       r = amdgpu_bo_reserve(vm->root.bo, false);
> +       if (r)
> +               return r;
> +
> +       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
> +       if (!mapping)
> +               goto out_err;
> +       /*
> +        * TODO: It requires figuring out the root cause of userq va mapping
> +        * reference imbalance issue.
> +        */
> +       /*amdgpu_bo_unref(&mapping->bo_va->base.bo);*/
> +       mapping->bo_va->queue_refcount--;
> +
> +       amdgpu_bo_unreserve(vm->root.bo);
> +       return 0;
> +
> +out_err:
> +       amdgpu_bo_unreserve(vm->root.bo);
> +       return -EINVAL;
> +}
> +
> +static void amdgpu_userq_buffer_vas_get(struct amdgpu_vm *vm,
> +                       struct amdgpu_usermode_queue *queue)
> +{
> +
> +
> +       amdgpu_userq_buffer_va_get(vm, queue->queue_va);
> +       amdgpu_userq_buffer_va_get(vm, queue->rptr_va);
> +       amdgpu_userq_buffer_va_get(vm, queue->wptr_va);
> +
> +       switch (queue->queue_type) {
> +       case AMDGPU_HW_IP_GFX:
> +               amdgpu_userq_buffer_va_get(vm, queue->shadow_va);
> +               amdgpu_userq_buffer_va_get(vm, queue->csa_va);
> +               break;
> +       case AMDGPU_HW_IP_COMPUTE:
> +               amdgpu_userq_buffer_va_get(vm, queue->eop_va);
> +               break;
> +       case AMDGPU_HW_IP_DMA:
> +               amdgpu_userq_buffer_va_get(vm, queue->csa_va);
> +               break;
> +       default:
> +               break;
> +       }
> +}
> +
> +int amdgpu_userq_buffer_vas_put(struct amdgpu_vm *vm,
> +                       struct amdgpu_usermode_queue *queue)
> +{
> +       amdgpu_userq_buffer_va_put(vm, queue->queue_va);
> +       amdgpu_userq_buffer_va_put(vm, queue->rptr_va);
> +       amdgpu_userq_buffer_va_put(vm, queue->wptr_va);
> +
> +       switch (queue->queue_type) {
> +       case AMDGPU_HW_IP_GFX:
> +               amdgpu_userq_buffer_va_put(vm, queue->shadow_va);
> +               amdgpu_userq_buffer_va_put(vm, queue->csa_va);
> +               break;
> +       case AMDGPU_HW_IP_COMPUTE:
> +               amdgpu_userq_buffer_va_put(vm, queue->eop_va);
> +               break;
> +       case AMDGPU_HW_IP_DMA:
> +               amdgpu_userq_buffer_va_put(vm, queue->csa_va);
> +               break;
> +       default:
> +               break;
> +       }
> +       return 0;
> +}
> +
>  static int
>  amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
>                           struct amdgpu_usermode_queue *queue)
> @@ -444,6 +612,9 @@ amdgpu_userq_create(struct drm_file *filp, union 
> drm_amdgpu_userq *args)
>         queue->vm = &fpriv->vm;
>         queue->priority = priority;
>         queue->generation = amdgpu_vm_generation(adev, &fpriv->vm);
> +       queue->queue_va = args->in.queue_va;
> +       queue->rptr_va = args->in.rptr_va;
> +       queue->wptr_va = args->in.wptr_va;
>
>         db_info.queue_type = queue->queue_type;
>         db_info.doorbell_handle = queue->doorbell_handle;
> @@ -474,7 +645,6 @@ amdgpu_userq_create(struct drm_file *filp, union 
> drm_amdgpu_userq *args)
>                 goto unlock;
>         }
>
> -
>         qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, 
> GFP_KERNEL);
>         if (qid < 0) {
>                 drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> index 46e927df67c1..39d9bc6fc47a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
> @@ -53,6 +53,13 @@ struct amdgpu_usermode_queue {
>         enum amdgpu_userq_state state;
>         uint64_t                doorbell_handle;
>         uint64_t                doorbell_index;
> +       uint64_t                queue_va;
> +       uint64_t                rptr_va;
> +       uint64_t                wptr_va;
> +       uint64_t                eop_va;
> +       uint64_t                shadow_va;
> +       uint64_t                csa_va;
> +
>         uint64_t                flags;
>         struct amdgpu_mqd_prop  *userq_prop;
>         struct amdgpu_userq_mgr *userq_mgr;
> @@ -136,4 +143,11 @@ int 
> amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
>
>  int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
>                         u64 expected_size);
> +int amdgpu_userq_buffer_va_get(struct amdgpu_vm *vm, u64 addr);
> +bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr);
> +bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
> +                       struct amdgpu_usermode_queue *queue);
> +int amdgpu_userq_buffer_va_put(struct amdgpu_vm *vm, u64 addr);
> +int amdgpu_userq_buffer_vas_put(struct amdgpu_vm *vm,
> +                       struct amdgpu_usermode_queue *queue);
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c 
> b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
> index 3bf328eb7b69..431397158fe5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
> @@ -260,6 +260,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr 
> *uq_mgr,
>                 userq_props->hqd_active = false;
>                 userq_props->tmz_queue =
>                         mqd_user->flags & 
> AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
> +               queue->eop_va = compute_mqd->eop_va;
>                 kfree(compute_mqd);
>         } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
>                 struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
> @@ -281,6 +282,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr 
> *uq_mgr,
>                 userq_props->csa_addr = mqd_gfx_v11->csa_va;
>                 userq_props->tmz_queue =
>                         mqd_user->flags & 
> AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
> +               queue->shadow_va = mqd_gfx_v11->shadow_va;
> +               queue->csa_va = mqd_gfx_v11->csa_va;
>
>                 if (amdgpu_userq_input_va_validate(queue->vm, 
> mqd_gfx_v11->shadow_va,
>                                         shadow_info.shadow_size))
> @@ -308,6 +311,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr 
> *uq_mgr,
>                         goto free_mqd;
>
>                 userq_props->csa_addr = mqd_sdma_v11->csa_va;
> +               queue->csa_va = mqd_sdma_v11->csa_va;
>                 kfree(mqd_sdma_v11);
>         }
>
> --
> 2.34.1
>

Reply via email to