On 13.08.25 20:49, David Francis wrote:
> Add new GEM_OP_IOCTL option GET_MAPPING_INFO, which
> returns a list of mappings associated with a given bo, along with
> their positions and offsets.
> 
> Signed-off-by: David Francis <david.fran...@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 87 +++++++++++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |  5 ++
>  include/uapi/drm/amdgpu_drm.h           | 21 +++++-
>  3 files changed, 112 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index d17cc7ba66d4..f66f6e1f1c52 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -948,6 +948,89 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void 
> *data,
>       return r;
>  }
>  
> +/**
> + * amdgpu_gem_list_mappings - get information about a buffer's mappings
> + *
> + * @gobj: gem object
> + * @args: gem_op arguments
> + * @fpriv: drm file pointer
> + *
> + * num_entries is set as an input to the size of the user-allocated array of
> + * drm_amdgpu_gem_vm_entry stored at args->value.
> + * num_entries is sent back as output as the number of mappings the bo has.
> + * If that number is larger than the size of the array, the ioctl must
> + * be retried.
> + *
> + * Returns:
> + * 0 for success, -errno for errors.
> + */
> +static int amdgpu_gem_list_mappings(struct drm_gem_object *gobj, struct 
> amdgpu_fpriv *fpriv,
> +                                       struct drm_amdgpu_gem_op *args)
> +{
> +     struct amdgpu_vm *avm = &fpriv->vm;
> +     struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
> +     struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(avm, bo);
> +     struct drm_amdgpu_gem_vm_entry *vm_entries;
> +     struct amdgpu_bo_va_mapping *mapping;
> +     struct drm_exec exec;
> +     int num_mappings = 0;
> +     int ret;
> +
> +     if (args->padding)
> +             return -EINVAL;
> +
> +     vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), 
> GFP_KERNEL);
> +     if (!vm_entries)
> +             return -ENOMEM;
> +
> +     drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
> +                       DRM_EXEC_IGNORE_DUPLICATES, 0);
> +     drm_exec_until_all_locked(&exec) {
> +             if (gobj) {
> +                     ret = drm_exec_lock_obj(&exec, gobj);
> +                     drm_exec_retry_on_contention(&exec);
> +                     if (ret)
> +                             goto unlock_exec;
> +             }
> +
> +             ret = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
> +             drm_exec_retry_on_contention(&exec);
> +             if (ret)
> +                     goto unlock_exec;
> +     }
> +
> +     amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
> +             if (num_mappings < args->num_entries) {

> +                     vm_entries[num_mappings].start = mapping->start;
> +                     vm_entries[num_mappings].last = mapping->last;

Those two needs to be multiplied by AMDGPU_GPU_PAGE_SIZE.

> +                     vm_entries[num_mappings].offset = mapping->offset;
> +                     vm_entries[num_mappings].flags = mapping->flags;
> +             }
> +             num_mappings += 1;
> +     }
> +
> +     amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
> +             if (num_mappings < args->num_entries) {

> +                     vm_entries[num_mappings].start = mapping->start;
> +                     vm_entries[num_mappings].last = mapping->last;

Here as well.

> +                     vm_entries[num_mappings].offset = mapping->offset;
> +                     vm_entries[num_mappings].flags = mapping->flags;
> +             }
> +             num_mappings += 1;
> +     }
> +
> +     if (num_mappings > 0 && num_mappings <= args->num_entries)
> +             ret = copy_to_user(u64_to_user_ptr(args->value), vm_entries, 
> num_mappings * sizeof(*vm_entries));
> +
> +     args->num_entries = num_mappings;
> +
> +unlock_exec:
> +     drm_exec_fini(&exec);
> +     kvfree(vm_entries);
> +
> +     return ret;
> +}
> +
>  int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>                       struct drm_file *filp)
>  {
> @@ -1014,6 +1097,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void 
> *data,
>  
>               amdgpu_bo_unreserve(robj);
>               break;
> +     case AMDGPU_GEM_OP_GET_MAPPING_INFO:
> +             amdgpu_bo_unreserve(robj);

Yeah that is not really 100% clean.

It would be better if we use drm_exec() in the amdgpu_gem_op_ioctl().

> +             r = amdgpu_gem_list_mappings(gobj, filp->driver_priv, args);
> +             break;
>       default:
>               amdgpu_bo_unreserve(robj);
>               r = -EINVAL;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index f9549f6b3d1f..5a63ae490b0e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -668,4 +668,9 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device 
> *adev,
>                                struct amdgpu_vm *vm,
>                                struct dma_fence **fence);
>  
> +#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
> +             list_for_each_entry(mapping, &bo_va->valids, list)
> +#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
> +             list_for_each_entry(mapping, &bo_va->invalids, list)
> +
>  #endif
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index 59b423883e91..bc1e18c06ce4 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -802,6 +802,21 @@ union drm_amdgpu_wait_fences {
>  
>  #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO    0
>  #define AMDGPU_GEM_OP_SET_PLACEMENT          1
> +#define AMDGPU_GEM_OP_GET_MAPPING_INFO               2
> +
> +struct drm_amdgpu_gem_vm_entry {
> +     /* Start of mapping (in number of pages) */
> +     __u64 start;
> +
> +     /* End of mapping (in number of pages) */
> +     __u64 last;

Size please, not the end. To match the UAPI to create mappings.



> +
> +     /* Mapping offset */
> +     __u64 offset;
> +
> +     /* flags needed to recreate mapping */
> +     __u64 flags;
> +};
>  
>  /* Sets or returns a value associated with a buffer. */
>  struct drm_amdgpu_gem_op {
> @@ -809,8 +824,12 @@ struct drm_amdgpu_gem_op {
>       __u32   handle;
>       /** AMDGPU_GEM_OP_* */
>       __u32   op;
> -     /** Input or return value */
> +     /** Input or return value. For MAPPING_INFO op: pointer to array of 
> struct drm_amdgpu_gem_vm_entry */
>       __u64   value;
> +     /** For MAPPING_INFO op: number of mappings (in/out) */
> +     __u32   num_entries;
> +
> +     __u32   padding;
>  };
>  
>  #define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT       (1 << 0)

Reply via email to