Re: [PATCH drm-misc-next v7 1/7] drm/gpuvm: convert WARN() to drm_WARN() variants

2023-10-31 Thread Danilo Krummrich

On 10/31/23 11:08, Thomas Hellström wrote:

On Mon, 2023-10-23 at 22:16 +0200, Danilo Krummrich wrote:

Use drm_WARN() and drm_WARN_ON() variants to indicate drivers the
context the failing VM resides in.

Signed-off-by: Danilo Krummrich 
---
  drivers/gpu/drm/drm_gpuvm.c    | 32 ++--
--
  drivers/gpu/drm/nouveau/nouveau_uvmm.c |  3 ++-
  include/drm/drm_gpuvm.h    |  7 ++
  3 files changed, 26 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpuvm.c
b/drivers/gpu/drm/drm_gpuvm.c
index 08c088319652..d7367a202fee 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -614,12 +614,12 @@ static int __drm_gpuva_insert(struct drm_gpuvm
*gpuvm,
  static void __drm_gpuva_remove(struct drm_gpuva *va);
  
  static bool

-drm_gpuvm_check_overflow(u64 addr, u64 range)
+drm_gpuvm_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64
range)
  {
 u64 end;
  
-   return WARN(check_add_overflow(addr, range, ),

-   "GPUVA address limited to %zu bytes.\n",
sizeof(end));
+   return drm_WARN(gpuvm->drm, check_add_overflow(addr, range,
),
+   "GPUVA address limited to %zu bytes.\n",
sizeof(end));
  }
  
  static bool

@@ -647,7 +647,7 @@ static bool
  drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
   u64 addr, u64 range)
  {
-   return !drm_gpuvm_check_overflow(addr, range) &&
+   return !drm_gpuvm_check_overflow(gpuvm, addr, range) &&
    drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
    !drm_gpuvm_in_kernel_node(gpuvm, addr, range);




  }
@@ -656,6 +656,7 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
   * drm_gpuvm_init() - initialize a _gpuvm
   * @gpuvm: pointer to the _gpuvm to initialize
   * @name: the name of the GPU VA space
+ * @drm: the _device this VM resides in
   * @start_offset: the start offset of the GPU VA space
   * @range: the size of the GPU VA space
   * @reserve_offset: the start of the kernel reserved GPU VA area
@@ -668,8 +669,8 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
   *  is expected to be managed by the surrounding driver
structures.
   */
  void
-drm_gpuvm_init(struct drm_gpuvm *gpuvm,
-  const char *name,
+drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+  struct drm_device *drm,
    u64 start_offset, u64 range,
    u64 reserve_offset, u64 reserve_range,
    const struct drm_gpuvm_ops *ops)
@@ -677,20 +678,20 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm,
 gpuvm->rb.tree = RB_ROOT_CACHED;
 INIT_LIST_HEAD(>rb.list);
  
-   drm_gpuvm_check_overflow(start_offset, range);

-   gpuvm->mm_start = start_offset;
-   gpuvm->mm_range = range;
-
 gpuvm->name = name ? name : "unknown";
 gpuvm->ops = ops;
+   gpuvm->drm = drm;
  
-   memset(>kernel_alloc_node, 0, sizeof(struct

drm_gpuva));
+   drm_gpuvm_check_overflow(gpuvm, start_offset, range);
+   gpuvm->mm_start = start_offset;
+   gpuvm->mm_range = range;
  
+   memset(>kernel_alloc_node, 0, sizeof(struct

drm_gpuva));
 if (reserve_range) {
 gpuvm->kernel_alloc_node.va.addr = reserve_offset;
 gpuvm->kernel_alloc_node.va.range = reserve_range;
  
-   if (likely(!drm_gpuvm_check_overflow(reserve_offset,

+   if (likely(!drm_gpuvm_check_overflow(gpuvm,
reserve_offset,
  reserve_range)))
 __drm_gpuva_insert(gpuvm, 

kernel_alloc_node);

 }
@@ -712,8 +713,8 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
 if (gpuvm->kernel_alloc_node.va.range)
 __drm_gpuva_remove(>kernel_alloc_node);
  
-   WARN(!RB_EMPTY_ROOT(>rb.tree.rb_root),

-    "GPUVA tree is not empty, potentially leaking memory.");
+   drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(>rb.tree.rb_root),
+    "GPUVA tree is not empty, potentially leaking
memory.\n");
  }
  EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
  
@@ -795,7 +796,8 @@ drm_gpuva_remove(struct drm_gpuva *va)

 struct drm_gpuvm *gpuvm = va->vm;
  
 if (unlikely(va == >kernel_alloc_node)) {

-   WARN(1, "Can't destroy kernel reserved node.\n");
+   drm_WARN(gpuvm->drm, 1,
+    "Can't destroy kernel reserved node.\n");
 return;
 }
  
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c

b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 5cf892c50f43..aaf5d28bd587 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1808,6 +1808,7 @@ int
  nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli
*cli,
   u64 kernel_managed_addr, u64 kernel_managed_size)
  {
+   struct drm_device *drm = cli->drm->dev;
 int ret;
 u64 kernel_managed_end = kernel_managed_addr +

Re: [PATCH drm-misc-next v7 1/7] drm/gpuvm: convert WARN() to drm_WARN() variants

2023-10-31 Thread Thomas Hellström
On Mon, 2023-10-23 at 22:16 +0200, Danilo Krummrich wrote:
> Use drm_WARN() and drm_WARN_ON() variants to indicate drivers the
> context the failing VM resides in.
> 
> Signed-off-by: Danilo Krummrich 
> ---
>  drivers/gpu/drm/drm_gpuvm.c    | 32 ++--
> --
>  drivers/gpu/drm/nouveau/nouveau_uvmm.c |  3 ++-
>  include/drm/drm_gpuvm.h    |  7 ++
>  3 files changed, 26 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gpuvm.c
> b/drivers/gpu/drm/drm_gpuvm.c
> index 08c088319652..d7367a202fee 100644
> --- a/drivers/gpu/drm/drm_gpuvm.c
> +++ b/drivers/gpu/drm/drm_gpuvm.c
> @@ -614,12 +614,12 @@ static int __drm_gpuva_insert(struct drm_gpuvm
> *gpuvm,
>  static void __drm_gpuva_remove(struct drm_gpuva *va);
>  
>  static bool
> -drm_gpuvm_check_overflow(u64 addr, u64 range)
> +drm_gpuvm_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64
> range)
>  {
> u64 end;
>  
> -   return WARN(check_add_overflow(addr, range, ),
> -   "GPUVA address limited to %zu bytes.\n",
> sizeof(end));
> +   return drm_WARN(gpuvm->drm, check_add_overflow(addr, range,
> ),
> +   "GPUVA address limited to %zu bytes.\n",
> sizeof(end));
>  }
>  
>  static bool
> @@ -647,7 +647,7 @@ static bool
>  drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
>   u64 addr, u64 range)
>  {
> -   return !drm_gpuvm_check_overflow(addr, range) &&
> +   return !drm_gpuvm_check_overflow(gpuvm, addr, range) &&
>    drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
>    !drm_gpuvm_in_kernel_node(gpuvm, addr, range);


>  }
> @@ -656,6 +656,7 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
>   * drm_gpuvm_init() - initialize a _gpuvm
>   * @gpuvm: pointer to the _gpuvm to initialize
>   * @name: the name of the GPU VA space
> + * @drm: the _device this VM resides in
>   * @start_offset: the start offset of the GPU VA space
>   * @range: the size of the GPU VA space
>   * @reserve_offset: the start of the kernel reserved GPU VA area
> @@ -668,8 +669,8 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
>   *  is expected to be managed by the surrounding driver
> structures.
>   */
>  void
> -drm_gpuvm_init(struct drm_gpuvm *gpuvm,
> -  const char *name,
> +drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
> +  struct drm_device *drm,
>    u64 start_offset, u64 range,
>    u64 reserve_offset, u64 reserve_range,
>    const struct drm_gpuvm_ops *ops)
> @@ -677,20 +678,20 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm,
> gpuvm->rb.tree = RB_ROOT_CACHED;
> INIT_LIST_HEAD(>rb.list);
>  
> -   drm_gpuvm_check_overflow(start_offset, range);
> -   gpuvm->mm_start = start_offset;
> -   gpuvm->mm_range = range;
> -
> gpuvm->name = name ? name : "unknown";
> gpuvm->ops = ops;
> +   gpuvm->drm = drm;
>  
> -   memset(>kernel_alloc_node, 0, sizeof(struct
> drm_gpuva));
> +   drm_gpuvm_check_overflow(gpuvm, start_offset, range);
> +   gpuvm->mm_start = start_offset;
> +   gpuvm->mm_range = range;
>  
> +   memset(>kernel_alloc_node, 0, sizeof(struct
> drm_gpuva));
> if (reserve_range) {
> gpuvm->kernel_alloc_node.va.addr = reserve_offset;
> gpuvm->kernel_alloc_node.va.range = reserve_range;
>  
> -   if (likely(!drm_gpuvm_check_overflow(reserve_offset,
> +   if (likely(!drm_gpuvm_check_overflow(gpuvm,
> reserve_offset,
>  reserve_range)))
> __drm_gpuva_insert(gpuvm, 
> >kernel_alloc_node);
> }
> @@ -712,8 +713,8 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
> if (gpuvm->kernel_alloc_node.va.range)
> __drm_gpuva_remove(>kernel_alloc_node);
>  
> -   WARN(!RB_EMPTY_ROOT(>rb.tree.rb_root),
> -    "GPUVA tree is not empty, potentially leaking memory.");
> +   drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(>rb.tree.rb_root),
> +    "GPUVA tree is not empty, potentially leaking
> memory.\n");
>  }
>  EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
>  
> @@ -795,7 +796,8 @@ drm_gpuva_remove(struct drm_gpuva *va)
> struct drm_gpuvm *gpuvm = va->vm;
>  
> if (unlikely(va == >kernel_alloc_node)) {
> -   WARN(1, "Can't destroy kernel reserved node.\n");
> +   drm_WARN(gpuvm->drm, 1,
> +    "Can't destroy kernel reserved node.\n");
> return;
> }
>  
> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> index 5cf892c50f43..aaf5d28bd587 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> @@ -1808,6 +1808,7 @@ int
>  nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli
> *cli,
>   u64 kernel_managed_addr, u64 

Re: [PATCH drm-misc-next v7 1/7] drm/gpuvm: convert WARN() to drm_WARN() variants

2023-10-24 Thread Danilo Krummrich

On 10/24/23 10:45, Christian König wrote:



Am 23.10.23 um 22:16 schrieb Danilo Krummrich:

Use drm_WARN() and drm_WARN_ON() variants to indicate drivers the
context the failing VM resides in.

Signed-off-by: Danilo Krummrich 
---
  drivers/gpu/drm/drm_gpuvm.c    | 32 ++
  drivers/gpu/drm/nouveau/nouveau_uvmm.c |  3 ++-
  include/drm/drm_gpuvm.h    |  7 ++
  3 files changed, 26 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 08c088319652..d7367a202fee 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -614,12 +614,12 @@ static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
  static void __drm_gpuva_remove(struct drm_gpuva *va);
  static bool
-drm_gpuvm_check_overflow(u64 addr, u64 range)
+drm_gpuvm_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
  {
  u64 end;
-    return WARN(check_add_overflow(addr, range, ),
-    "GPUVA address limited to %zu bytes.\n", sizeof(end));
+    return drm_WARN(gpuvm->drm, check_add_overflow(addr, range, ),
+    "GPUVA address limited to %zu bytes.\n", sizeof(end));
  }
  static bool
@@ -647,7 +647,7 @@ static bool
  drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
    u64 addr, u64 range)
  {
-    return !drm_gpuvm_check_overflow(addr, range) &&
+    return !drm_gpuvm_check_overflow(gpuvm, addr, range) &&
 drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
 !drm_gpuvm_in_kernel_node(gpuvm, addr, range);


When those parameters come from userspace you don't really want a warning in 
the system log in the first place.

Otherwise userspace can trivially spam the system log with warnings. The usual 
approach is to make this debug level severity instead.


Currently, this function isn't exported and hence the driver should do the 
relevant
sanity checks before attempting to insert the mapping. However, I think it 
would make
sense to export this function and remove the WARN() and instead WARN() in 
drm_gpuvm_init()
explicitly.



Regards,
Christian.


  }
@@ -656,6 +656,7 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
   * drm_gpuvm_init() - initialize a _gpuvm
   * @gpuvm: pointer to the _gpuvm to initialize
   * @name: the name of the GPU VA space
+ * @drm: the _device this VM resides in
   * @start_offset: the start offset of the GPU VA space
   * @range: the size of the GPU VA space
   * @reserve_offset: the start of the kernel reserved GPU VA area
@@ -668,8 +669,8 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
   *  is expected to be managed by the surrounding driver structures.
   */
  void
-drm_gpuvm_init(struct drm_gpuvm *gpuvm,
-   const char *name,
+drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+   struct drm_device *drm,
 u64 start_offset, u64 range,
 u64 reserve_offset, u64 reserve_range,
 const struct drm_gpuvm_ops *ops)
@@ -677,20 +678,20 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm,
  gpuvm->rb.tree = RB_ROOT_CACHED;
  INIT_LIST_HEAD(>rb.list);
-    drm_gpuvm_check_overflow(start_offset, range);
-    gpuvm->mm_start = start_offset;
-    gpuvm->mm_range = range;
-
  gpuvm->name = name ? name : "unknown";
  gpuvm->ops = ops;
+    gpuvm->drm = drm;
-    memset(>kernel_alloc_node, 0, sizeof(struct drm_gpuva));
+    drm_gpuvm_check_overflow(gpuvm, start_offset, range);
+    gpuvm->mm_start = start_offset;
+    gpuvm->mm_range = range;
+    memset(>kernel_alloc_node, 0, sizeof(struct drm_gpuva));
  if (reserve_range) {
  gpuvm->kernel_alloc_node.va.addr = reserve_offset;
  gpuvm->kernel_alloc_node.va.range = reserve_range;
-    if (likely(!drm_gpuvm_check_overflow(reserve_offset,
+    if (likely(!drm_gpuvm_check_overflow(gpuvm, reserve_offset,
   reserve_range)))
  __drm_gpuva_insert(gpuvm, >kernel_alloc_node);
  }
@@ -712,8 +713,8 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
  if (gpuvm->kernel_alloc_node.va.range)
  __drm_gpuva_remove(>kernel_alloc_node);
-    WARN(!RB_EMPTY_ROOT(>rb.tree.rb_root),
- "GPUVA tree is not empty, potentially leaking memory.");
+    drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(>rb.tree.rb_root),
+ "GPUVA tree is not empty, potentially leaking memory.\n");
  }
  EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
@@ -795,7 +796,8 @@ drm_gpuva_remove(struct drm_gpuva *va)
  struct drm_gpuvm *gpuvm = va->vm;
  if (unlikely(va == >kernel_alloc_node)) {
-    WARN(1, "Can't destroy kernel reserved node.\n");
+    drm_WARN(gpuvm->drm, 1,
+ "Can't destroy kernel reserved node.\n");
  return;
  }
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c 
b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 5cf892c50f43..aaf5d28bd587 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1808,6 +1808,7 @@ int
  

Re: [PATCH drm-misc-next v7 1/7] drm/gpuvm: convert WARN() to drm_WARN() variants

2023-10-24 Thread Christian König




Am 23.10.23 um 22:16 schrieb Danilo Krummrich:

Use drm_WARN() and drm_WARN_ON() variants to indicate drivers the
context the failing VM resides in.

Signed-off-by: Danilo Krummrich 
---
  drivers/gpu/drm/drm_gpuvm.c| 32 ++
  drivers/gpu/drm/nouveau/nouveau_uvmm.c |  3 ++-
  include/drm/drm_gpuvm.h|  7 ++
  3 files changed, 26 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 08c088319652..d7367a202fee 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -614,12 +614,12 @@ static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
  static void __drm_gpuva_remove(struct drm_gpuva *va);
  
  static bool

-drm_gpuvm_check_overflow(u64 addr, u64 range)
+drm_gpuvm_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
  {
u64 end;
  
-	return WARN(check_add_overflow(addr, range, ),

-   "GPUVA address limited to %zu bytes.\n", sizeof(end));
+   return drm_WARN(gpuvm->drm, check_add_overflow(addr, range, ),
+   "GPUVA address limited to %zu bytes.\n", sizeof(end));
  }
  
  static bool

@@ -647,7 +647,7 @@ static bool
  drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
  u64 addr, u64 range)
  {
-   return !drm_gpuvm_check_overflow(addr, range) &&
+   return !drm_gpuvm_check_overflow(gpuvm, addr, range) &&
   drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
   !drm_gpuvm_in_kernel_node(gpuvm, addr, range);


When those parameters come from userspace you don't really want a 
warning in the system log in the first place.


Otherwise userspace can trivially spam the system log with warnings. The 
usual approach is to make this debug level severity instead.


Regards,
Christian.


  }
@@ -656,6 +656,7 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
   * drm_gpuvm_init() - initialize a _gpuvm
   * @gpuvm: pointer to the _gpuvm to initialize
   * @name: the name of the GPU VA space
+ * @drm: the _device this VM resides in
   * @start_offset: the start offset of the GPU VA space
   * @range: the size of the GPU VA space
   * @reserve_offset: the start of the kernel reserved GPU VA area
@@ -668,8 +669,8 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
   *  is expected to be managed by the surrounding driver structures.
   */
  void
-drm_gpuvm_init(struct drm_gpuvm *gpuvm,
-  const char *name,
+drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+  struct drm_device *drm,
   u64 start_offset, u64 range,
   u64 reserve_offset, u64 reserve_range,
   const struct drm_gpuvm_ops *ops)
@@ -677,20 +678,20 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm,
gpuvm->rb.tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(>rb.list);
  
-	drm_gpuvm_check_overflow(start_offset, range);

-   gpuvm->mm_start = start_offset;
-   gpuvm->mm_range = range;
-
gpuvm->name = name ? name : "unknown";
gpuvm->ops = ops;
+   gpuvm->drm = drm;
  
-	memset(>kernel_alloc_node, 0, sizeof(struct drm_gpuva));

+   drm_gpuvm_check_overflow(gpuvm, start_offset, range);
+   gpuvm->mm_start = start_offset;
+   gpuvm->mm_range = range;
  
+	memset(>kernel_alloc_node, 0, sizeof(struct drm_gpuva));

if (reserve_range) {
gpuvm->kernel_alloc_node.va.addr = reserve_offset;
gpuvm->kernel_alloc_node.va.range = reserve_range;
  
-		if (likely(!drm_gpuvm_check_overflow(reserve_offset,

+   if (likely(!drm_gpuvm_check_overflow(gpuvm, reserve_offset,
 reserve_range)))
__drm_gpuva_insert(gpuvm, >kernel_alloc_node);
}
@@ -712,8 +713,8 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
if (gpuvm->kernel_alloc_node.va.range)
__drm_gpuva_remove(>kernel_alloc_node);
  
-	WARN(!RB_EMPTY_ROOT(>rb.tree.rb_root),

-"GPUVA tree is not empty, potentially leaking memory.");
+   drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(>rb.tree.rb_root),
+"GPUVA tree is not empty, potentially leaking memory.\n");
  }
  EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
  
@@ -795,7 +796,8 @@ drm_gpuva_remove(struct drm_gpuva *va)

struct drm_gpuvm *gpuvm = va->vm;
  
  	if (unlikely(va == >kernel_alloc_node)) {

-   WARN(1, "Can't destroy kernel reserved node.\n");
+   drm_WARN(gpuvm->drm, 1,
+"Can't destroy kernel reserved node.\n");
return;
}
  
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c

index 5cf892c50f43..aaf5d28bd587 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1808,6 +1808,7 @@ int
  nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
  u64 kernel_managed_addr, u64 

[PATCH drm-misc-next v7 1/7] drm/gpuvm: convert WARN() to drm_WARN() variants

2023-10-23 Thread Danilo Krummrich
Use drm_WARN() and drm_WARN_ON() variants to indicate drivers the
context the failing VM resides in.

Signed-off-by: Danilo Krummrich 
---
 drivers/gpu/drm/drm_gpuvm.c| 32 ++
 drivers/gpu/drm/nouveau/nouveau_uvmm.c |  3 ++-
 include/drm/drm_gpuvm.h|  7 ++
 3 files changed, 26 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 08c088319652..d7367a202fee 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -614,12 +614,12 @@ static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
 static void __drm_gpuva_remove(struct drm_gpuva *va);
 
 static bool
-drm_gpuvm_check_overflow(u64 addr, u64 range)
+drm_gpuvm_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
 {
u64 end;
 
-   return WARN(check_add_overflow(addr, range, ),
-   "GPUVA address limited to %zu bytes.\n", sizeof(end));
+   return drm_WARN(gpuvm->drm, check_add_overflow(addr, range, ),
+   "GPUVA address limited to %zu bytes.\n", sizeof(end));
 }
 
 static bool
@@ -647,7 +647,7 @@ static bool
 drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
  u64 addr, u64 range)
 {
-   return !drm_gpuvm_check_overflow(addr, range) &&
+   return !drm_gpuvm_check_overflow(gpuvm, addr, range) &&
   drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
   !drm_gpuvm_in_kernel_node(gpuvm, addr, range);
 }
@@ -656,6 +656,7 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
  * drm_gpuvm_init() - initialize a _gpuvm
  * @gpuvm: pointer to the _gpuvm to initialize
  * @name: the name of the GPU VA space
+ * @drm: the _device this VM resides in
  * @start_offset: the start offset of the GPU VA space
  * @range: the size of the GPU VA space
  * @reserve_offset: the start of the kernel reserved GPU VA area
@@ -668,8 +669,8 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
  *  is expected to be managed by the surrounding driver structures.
  */
 void
-drm_gpuvm_init(struct drm_gpuvm *gpuvm,
-  const char *name,
+drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+  struct drm_device *drm,
   u64 start_offset, u64 range,
   u64 reserve_offset, u64 reserve_range,
   const struct drm_gpuvm_ops *ops)
@@ -677,20 +678,20 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm,
gpuvm->rb.tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(>rb.list);
 
-   drm_gpuvm_check_overflow(start_offset, range);
-   gpuvm->mm_start = start_offset;
-   gpuvm->mm_range = range;
-
gpuvm->name = name ? name : "unknown";
gpuvm->ops = ops;
+   gpuvm->drm = drm;
 
-   memset(>kernel_alloc_node, 0, sizeof(struct drm_gpuva));
+   drm_gpuvm_check_overflow(gpuvm, start_offset, range);
+   gpuvm->mm_start = start_offset;
+   gpuvm->mm_range = range;
 
+   memset(>kernel_alloc_node, 0, sizeof(struct drm_gpuva));
if (reserve_range) {
gpuvm->kernel_alloc_node.va.addr = reserve_offset;
gpuvm->kernel_alloc_node.va.range = reserve_range;
 
-   if (likely(!drm_gpuvm_check_overflow(reserve_offset,
+   if (likely(!drm_gpuvm_check_overflow(gpuvm, reserve_offset,
 reserve_range)))
__drm_gpuva_insert(gpuvm, >kernel_alloc_node);
}
@@ -712,8 +713,8 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
if (gpuvm->kernel_alloc_node.va.range)
__drm_gpuva_remove(>kernel_alloc_node);
 
-   WARN(!RB_EMPTY_ROOT(>rb.tree.rb_root),
-"GPUVA tree is not empty, potentially leaking memory.");
+   drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(>rb.tree.rb_root),
+"GPUVA tree is not empty, potentially leaking memory.\n");
 }
 EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
 
@@ -795,7 +796,8 @@ drm_gpuva_remove(struct drm_gpuva *va)
struct drm_gpuvm *gpuvm = va->vm;
 
if (unlikely(va == >kernel_alloc_node)) {
-   WARN(1, "Can't destroy kernel reserved node.\n");
+   drm_WARN(gpuvm->drm, 1,
+"Can't destroy kernel reserved node.\n");
return;
}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c 
b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 5cf892c50f43..aaf5d28bd587 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1808,6 +1808,7 @@ int
 nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
  u64 kernel_managed_addr, u64 kernel_managed_size)
 {
+   struct drm_device *drm = cli->drm->dev;
int ret;
u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
 
@@ -1836,7 +1837,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct 
nouveau_cli *cli,
uvmm->kernel_managed_addr = kernel_managed_addr;