Re: [PATCH drm-misc-next v2 5/7] drm/gpuvm: add an abstraction for a VM / BO combination

2023-09-07 Thread Boris Brezillon
On Thu, 7 Sep 2023 11:41:11 +0200
Danilo Krummrich  wrote:

> On 9/7/23 10:42, Boris Brezillon wrote:
> > On Wed,  6 Sep 2023 23:47:13 +0200
> > Danilo Krummrich  wrote:
> >   
> >> +void drm_gpuvm_bo_destroy(struct kref *kref);  
> > 
> > I usually keep kref's release functions private so people are not
> > tempted to use them.  
> 
> I think I did that because drm_gpuvm_bo_put() needs it.

Yeah, but you can move the drm_gpuvm_bo_put() implementation to the C
file and make this one private.

> 
> >   
> >> +
> >> +/**
> >> + * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
> >> + * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
> >> + *
> >> + * This function acquires an additional reference to @vm_bo. It is 
> >> illegal to
> >> + * call this without already holding a reference. No locks required.
> >> + */
> >> +static inline struct drm_gpuvm_bo *
> >> +drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
> >> +{
> >> +  kref_get(&vm_bo->kref);
> >> +  return vm_bo;
> >> +}
> >> +
> >> +/**
> >> + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
> >> + * @vm_bo: the &drm_gpuvm_bo to release the reference of
> >> + *
> >> + * This releases a reference to @vm_bo.
> >> + */
> >> +static inline void
> >> +drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
> >> +{
> >> +  kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);  
> > 
> > nit: can we have
> > 
> > if (vm_bo)
> > kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
> > 
> > so we don't have to bother testing the vm_bo value in the error/cleanup
> > paths?
> >   
> >> +}
> >> +  
> >   
> 



Re: [PATCH drm-misc-next v2 5/7] drm/gpuvm: add an abstraction for a VM / BO combination

2023-09-07 Thread Danilo Krummrich

On 9/7/23 10:42, Boris Brezillon wrote:

On Wed,  6 Sep 2023 23:47:13 +0200
Danilo Krummrich  wrote:


+void drm_gpuvm_bo_destroy(struct kref *kref);


I usually keep kref's release functions private so people are not
tempted to use them.


I think I did that because drm_gpuvm_bo_put() needs it.




+
+/**
+ * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
+ * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
+ *
+ * This function acquires an additional reference to @vm_bo. It is illegal to
+ * call this without already holding a reference. No locks required.
+ */
+static inline struct drm_gpuvm_bo *
+drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
+{
+   kref_get(&vm_bo->kref);
+   return vm_bo;
+}
+
+/**
+ * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
+ * @vm_bo: the &drm_gpuvm_bo to release the reference of
+ *
+ * This releases a reference to @vm_bo.
+ */
+static inline void
+drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
+{
+   kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);


nit: can we have

if (vm_bo)
kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);

so we don't have to bother testing the vm_bo value in the error/cleanup
paths?


+}
+






Re: [PATCH drm-misc-next v2 5/7] drm/gpuvm: add an abstraction for a VM / BO combination

2023-09-07 Thread Danilo Krummrich

On 9/7/23 10:16, Boris Brezillon wrote:

On Wed,  6 Sep 2023 23:47:13 +0200
Danilo Krummrich  wrote:


@@ -812,15 +967,20 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove);
  /**
   * drm_gpuva_link() - link a &drm_gpuva
   * @va: the &drm_gpuva to link
+ * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to
   *
- * This adds the given &va to the GPU VA list of the &drm_gem_object it is
- * associated with.
+ * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
+ * &drm_gpuvm_bo to the &drm_gem_object it is associated with.
+ *
+ * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional
+ * reference of the latter is taken.
   *
   * This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * concurrent access using either the GEMs dma_resv lock or a driver specific
+ * lock set through drm_gem_gpuva_set_lock().
   */
  void
-drm_gpuva_link(struct drm_gpuva *va)
+drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
  {
struct drm_gem_object *obj = va->gem.obj;
  
@@ -829,7 +989,10 @@ drm_gpuva_link(struct drm_gpuva *va)
  
  	drm_gem_gpuva_assert_lock_held(obj);
  
-	list_add_tail(&va->gem.entry, &obj->gpuva.list);

+   drm_gpuvm_bo_get(vm_bo);


Guess we should WARN if vm_obj->obj == obj, at least.


+   list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
+   if (list_empty(&vm_bo->list.entry.gem))
+   list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
  }
  EXPORT_SYMBOL_GPL(drm_gpuva_link);
  
@@ -840,20 +1003,40 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);

   * This removes the given &va from the GPU VA list of the &drm_gem_object it 
is
   * associated with.
   *
+ * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
+ * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case
+ * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
+ *
+ * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of
+ * the latter is dropped.
+ *
   * This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * concurrent access using either the GEMs dma_resv lock or a driver specific
+ * lock set through drm_gem_gpuva_set_lock().
   */
  void
  drm_gpuva_unlink(struct drm_gpuva *va)
  {
struct drm_gem_object *obj = va->gem.obj;
+   struct drm_gpuvm_bo *vm_bo;
  
  	if (unlikely(!obj))

return;
  
  	drm_gem_gpuva_assert_lock_held(obj);
  
+	vm_bo = __drm_gpuvm_bo_find(va->vm, obj);


Could we add a drm_gpuva::vm_bo field so we don't have to search the
vm_bo here, and maybe drop the drm_gpuva::vm and drm_gpuva::obj fields,
since drm_gpuvm_bo contains both the vm and the GEM object. I know that
means adding an extra indirection + allocation for drivers that don't
want to use drm_gpuva_[un]link(), but I wonder if it's not preferable
over having the information duplicated (with potential mismatch)


I was considering that and I think we can add a drm_gpuva::vm_bo field and
get rid of drm_gpuva::obj. However, I think we need to keep drm_gpuva::vm,
since it is valid for ::obj to be NULL, hence it must be valid for ::vm_bo too.
Null objects are used for sparse mappings / userptr.




+   if (WARN(!vm_bo, "GPUVA doesn't seem to be linked.\n"))
+   return;
+
list_del_init(&va->gem.entry);
+
+   /* This is the last mapping being unlinked for this GEM object, hence
+* also remove the VM_BO from the GEM's gpuva list.
+*/
+   if (list_empty(&vm_bo->list.gpuva))
+   list_del_init(&vm_bo->list.entry.gem);
+   drm_gpuvm_bo_put(vm_bo);
  }
  EXPORT_SYMBOL_GPL(drm_gpuva_unlink);






Re: [PATCH drm-misc-next v2 5/7] drm/gpuvm: add an abstraction for a VM / BO combination

2023-09-07 Thread Boris Brezillon
On Wed,  6 Sep 2023 23:47:13 +0200
Danilo Krummrich  wrote:

> +void drm_gpuvm_bo_destroy(struct kref *kref);

I usually keep kref's release functions private so people are not
tempted to use them.

> +
> +/**
> + * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
> + * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
> + *
> + * This function acquires an additional reference to @vm_bo. It is illegal to
> + * call this without already holding a reference. No locks required.
> + */
> +static inline struct drm_gpuvm_bo *
> +drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
> +{
> + kref_get(&vm_bo->kref);
> + return vm_bo;
> +}
> +
> +/**
> + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
> + * @vm_bo: the &drm_gpuvm_bo to release the reference of
> + *
> + * This releases a reference to @vm_bo.
> + */
> +static inline void
> +drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
> +{
> + kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);

nit: can we have

if (vm_bo)
kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);

so we don't have to bother testing the vm_bo value in the error/cleanup
paths?

> +}
> +


Re: [PATCH drm-misc-next v2 5/7] drm/gpuvm: add an abstraction for a VM / BO combination

2023-09-07 Thread Boris Brezillon
On Wed,  6 Sep 2023 23:47:13 +0200
Danilo Krummrich  wrote:

> @@ -812,15 +967,20 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove);
>  /**
>   * drm_gpuva_link() - link a &drm_gpuva
>   * @va: the &drm_gpuva to link
> + * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to
>   *
> - * This adds the given &va to the GPU VA list of the &drm_gem_object it is
> - * associated with.
> + * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
> + * &drm_gpuvm_bo to the &drm_gem_object it is associated with.
> + *
> + * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional
> + * reference of the latter is taken.
>   *
>   * This function expects the caller to protect the GEM's GPUVA list against
> - * concurrent access using the GEMs dma_resv lock.
> + * concurrent access using either the GEMs dma_resv lock or a driver specific
> + * lock set through drm_gem_gpuva_set_lock().
>   */
>  void
> -drm_gpuva_link(struct drm_gpuva *va)
> +drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
>  {
>   struct drm_gem_object *obj = va->gem.obj;
>  
> @@ -829,7 +989,10 @@ drm_gpuva_link(struct drm_gpuva *va)
>  
>   drm_gem_gpuva_assert_lock_held(obj);
>  
> - list_add_tail(&va->gem.entry, &obj->gpuva.list);
> + drm_gpuvm_bo_get(vm_bo);

Guess we should WARN if vm_obj->obj == obj, at least.

> + list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
> + if (list_empty(&vm_bo->list.entry.gem))
> + list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
>  }
>  EXPORT_SYMBOL_GPL(drm_gpuva_link);
>  
> @@ -840,20 +1003,40 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);
>   * This removes the given &va from the GPU VA list of the &drm_gem_object it 
> is
>   * associated with.
>   *
> + * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
> + * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case
> + * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
> + *
> + * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of
> + * the latter is dropped.
> + *
>   * This function expects the caller to protect the GEM's GPUVA list against
> - * concurrent access using the GEMs dma_resv lock.
> + * concurrent access using either the GEMs dma_resv lock or a driver specific
> + * lock set through drm_gem_gpuva_set_lock().
>   */
>  void
>  drm_gpuva_unlink(struct drm_gpuva *va)
>  {
>   struct drm_gem_object *obj = va->gem.obj;
> + struct drm_gpuvm_bo *vm_bo;
>  
>   if (unlikely(!obj))
>   return;
>  
>   drm_gem_gpuva_assert_lock_held(obj);
>  
> + vm_bo = __drm_gpuvm_bo_find(va->vm, obj);

Could we add a drm_gpuva::vm_bo field so we don't have to search the
vm_bo here, and maybe drop the drm_gpuva::vm and drm_gpuva::obj fields,
since drm_gpuvm_bo contains both the vm and the GEM object. I know that
means adding an extra indirection + allocation for drivers that don't
want to use drm_gpuva_[un]link(), but I wonder if it's not preferable
over having the information duplicated (with potential mismatch)

> + if (WARN(!vm_bo, "GPUVA doesn't seem to be linked.\n"))
> + return;
> +
>   list_del_init(&va->gem.entry);
> +
> + /* This is the last mapping being unlinked for this GEM object, hence
> +  * also remove the VM_BO from the GEM's gpuva list.
> +  */
> + if (list_empty(&vm_bo->list.gpuva))
> + list_del_init(&vm_bo->list.entry.gem);
> + drm_gpuvm_bo_put(vm_bo);
>  }
>  EXPORT_SYMBOL_GPL(drm_gpuva_unlink);


[PATCH drm-misc-next v2 5/7] drm/gpuvm: add an abstraction for a VM / BO combination

2023-09-06 Thread Danilo Krummrich
This patch adds an abstraction layer between the drm_gpuva mappings of
a particular drm_gem_object and this GEM object itself. The abstraction
represents a combination of a drm_gem_object and drm_gpuvm. The
drm_gem_object holds a list of drm_gpuvm_bo structures (the structure
representing this abstraction), while each drm_gpuvm_bo contains list of
mappings of this GEM object.

This has multiple advantages:

1) We can use the drm_gpuvm_bo structure to attach it to various lists
   of the drm_gpuvm. This is useful for tracking external and evicted
   objects per VM, which is introduced in subsequent patches.

2) Finding mappings of a certain drm_gem_object mapped in a certain
   drm_gpuvm becomes much cheaper.

3) Drivers can derive and extend the structure to easily represent
   driver specific states of a BO for a certain GPUVM.

The idea of this abstraction was taken from amdgpu, hence the credit for
this idea goes to the developers of amdgpu.

Cc: Christian König 
Signed-off-by: Danilo Krummrich 
---
 drivers/gpu/drm/drm_gpuva_mgr.c| 210 +++--
 drivers/gpu/drm/nouveau/nouveau_uvmm.c |  77 ++---
 include/drm/drm_gem.h  |  48 --
 include/drm/drm_gpuva_mgr.h| 153 +-
 4 files changed, 437 insertions(+), 51 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpuva_mgr.c b/drivers/gpu/drm/drm_gpuva_mgr.c
index 838277794990..da7a6e1aabe0 100644
--- a/drivers/gpu/drm/drm_gpuva_mgr.c
+++ b/drivers/gpu/drm/drm_gpuva_mgr.c
@@ -723,6 +723,161 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
 }
 EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
 
+/**
+ * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
+ * @gpuvm: The &drm_gpuvm the @obj is mapped in.
+ * @obj: The &drm_gem_object being mapped in the @gpuvm.
+ *
+ * If provided by the driver, this function uses the &drm_gpuvm_ops
+ * vm_bo_alloc() callback to allocate.
+ *
+ * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
+   struct drm_gem_object *obj)
+{
+   const struct drm_gpuvm_ops *ops = gpuvm->ops;
+   struct drm_gpuvm_bo *vm_bo;
+
+   if (ops && ops->vm_bo_alloc)
+   vm_bo = ops->vm_bo_alloc();
+   else
+   vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL);
+
+   if (unlikely(!vm_bo))
+   return NULL;
+
+   vm_bo->vm = gpuvm;
+   vm_bo->obj = obj;
+
+   kref_init(&vm_bo->kref);
+   INIT_LIST_HEAD(&vm_bo->list.gpuva);
+   INIT_LIST_HEAD(&vm_bo->list.entry.gem);
+
+   drm_gem_object_get(obj);
+
+   return vm_bo;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
+
+void
+drm_gpuvm_bo_destroy(struct kref *kref)
+{
+   struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+   const struct drm_gpuvm_ops *ops = vm_bo->vm->ops;
+
+   drm_gem_object_put(vm_bo->obj);
+
+   if (ops && ops->vm_bo_free)
+   ops->vm_bo_free(vm_bo);
+   else
+   kfree(vm_bo);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_destroy);
+
+static struct drm_gpuvm_bo *
+__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+   struct drm_gem_object *obj)
+{
+   struct drm_gpuvm_bo *vm_bo;
+
+   drm_gem_gpuva_assert_lock_held(obj);
+
+   drm_gem_for_each_gpuva_gem(vm_bo, obj)
+   if (vm_bo->vm == gpuvm)
+   return vm_bo;
+
+   return NULL;
+}
+
+/**
+ * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
+ * &drm_gpuvm and &drm_gem_object
+ * @gpuvm: The &drm_gpuvm the @obj is mapped in.
+ * @obj: The &drm_gem_object being mapped in the @gpuvm.
+ *
+ * Find the &drm_gpuvm_bo representing the combination of the given
+ * &drm_gpuvm and &drm_gem_object. If found, increases the reference
+ * count of the &drm_gpuvm_bo accordingly.
+ *
+ * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+   struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj);
+
+   return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
+
+/**
+ * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
+ * given &drm_gpuvm and &drm_gem_object
+ * @gpuvm: The &drm_gpuvm the @obj is mapped in.
+ * @obj: The &drm_gem_object being mapped in the @gpuvm.
+ *
+ * Find the &drm_gpuvm_bo representing the combination of the given
+ * &drm_gpuvm and &drm_gem_object. If found, increases the reference
+ * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
+ * &drm_gpuvm_bo.
+ *
+ * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
+   struct drm_gem_object *obj)
+{
+   struct drm_gpuvm