Re: [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-09-22 Thread Iddamsetty, Aravind



On 22-09-2023 16:27, Tvrtko Ursulin wrote:
> 
> On 22/09/2023 09:48, Iddamsetty, Aravind wrote:
>>
>>
>> On 21-09-2023 17:18, Tvrtko Ursulin wrote:
>>> From: Tvrtko Ursulin 
>>>
>>> Use the newly added drm_print_memory_stats helper to show memory
>>> utilisation of our objects in drm/driver specific fdinfo output.
>>>
>>> To collect the stats we walk the per memory regions object lists
>>> and accumulate object size into the respective drm_memory_stats
>>> categories.
>>>
>>> Objects with multiple possible placements are reported in multiple
>>> regions for total and shared sizes, while other categories are
>>
>> I guess you forgot to correct this.
> 
> Ah yes, will fix.
> 
>>
>>> counted only for the currently active region.
>>>
>>> v2:
>>>   * Only account against the active region.
>>>   * Use DMA_RESV_USAGE_BOOKKEEP when testing for active. (Tejas)
>>>
>>> Signed-off-by: Tvrtko Ursulin 
>>> Cc: Aravind Iddamsetty 
>>> Cc: Rob Clark 
>>> Cc: Andi Shyti 
>>> Cc: Tejas Upadhyay 
>>> Reviewed-by: Andi Shyti  # v1
>>> ---
>>>   drivers/gpu/drm/i915/i915_drm_client.c | 64 ++
>>>   1 file changed, 64 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c
>>> b/drivers/gpu/drm/i915/i915_drm_client.c
>>> index a61356012df8..94abc2fb2ea6 100644
>>> --- a/drivers/gpu/drm/i915/i915_drm_client.c
>>> +++ b/drivers/gpu/drm/i915/i915_drm_client.c
>>> @@ -45,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
>>>   }
>>>     #ifdef CONFIG_PROC_FS
>>> +static void
>>> +obj_meminfo(struct drm_i915_gem_object *obj,
>>> +    struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
>>> +{
>>> +    const enum intel_region_id id = obj->mm.region ?
>>> +    obj->mm.region->id : INTEL_REGION_SMEM;
>>> +    const u64 sz = obj->base.size;
>>> +
>>> +    if (obj->base.handle_count > 1)
>>> +    stats[id].shared += sz;
>>> +    else
>>> +    stats[id].private += sz;
>>> +
>>> +    if (i915_gem_object_has_pages(obj)) {
>>> +    stats[id].resident += sz;
>>> +
>>> +    if (!dma_resv_test_signaled(obj->base.resv,
>>> +    DMA_RESV_USAGE_BOOKKEEP))
>>> +    stats[id].active += sz;
>>> +    else if (i915_gem_object_is_shrinkable(obj) &&
>>> + obj->mm.madv == I915_MADV_DONTNEED)
>>> +    stats[id].purgeable += sz;
>>> +    }
>>> +}
>>> +
>>> +static void show_meminfo(struct drm_printer *p, struct drm_file *file)
>>> +{
>>> +    struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
>>> +    struct drm_i915_file_private *fpriv = file->driver_priv;
>>> +    struct i915_drm_client *client = fpriv->client;
>>> +    struct drm_i915_private *i915 = fpriv->i915;
>>> +    struct drm_i915_gem_object *obj;
>>> +    struct intel_memory_region *mr;
>>> +    struct list_head *pos;
>>> +    unsigned int id;
>>> +
>>> +    /* Public objects. */
>>> +    spin_lock(>table_lock);
>>> +    idr_for_each_entry(>object_idr, obj, id)
>>> +    obj_meminfo(obj, stats);
>>> +    spin_unlock(>table_lock);
>>> +
>>> +    /* Internal objects. */
>>> +    rcu_read_lock();
>>> +    list_for_each_rcu(pos, >objects_list) {
>>> +    obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
>>> + client_link));
>>> +    if (!obj)
>>> +    continue;
>>> +    obj_meminfo(obj, stats);
>>> +    i915_gem_object_put(obj);
>>> +    }
>>> +    rcu_read_unlock();
>>> +
>>> +    for_each_memory_region(mr, i915, id)
>>> +    drm_print_memory_stats(p,
>>> +   [id],
>>> +   DRM_GEM_OBJECT_RESIDENT |
>>> +   DRM_GEM_OBJECT_PURGEABLE,
>>> +   mr->name);
>>> +}
>>> +
>>>   static const char * const uabi_class_names[] = {
>>>   [I915_ENGINE_CLASS_RENDER] = "render",
>>>   [I915_ENGINE_CLASS_COPY] = "copy",
>>> @@ -106,6 +168,8 @@ void i915_drm_client_fdinfo(struct drm_printer
>>> *p, struct drm_file *file)
>>>    *
>>> **
>>>    */
>>>   +    show_meminfo(p, file);
>>> +
>>>   if (GRAPHICS_VER(i915) < 8)
>>>   return;
>>>   
>>
>> Reviewed-by: Aravind Iddamsetty 
> 
> Thank you! Would you be able to also look at the IGTs I posted yesterday?

Ya sure will take a look.

Thanks,
Aravind.
> 
> Regards,
> 
> Tvrtko


Re: [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-09-22 Thread Andi Shyti
Hi Tvrtko,

On Thu, Sep 21, 2023 at 12:48:52PM +0100, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin 
> 
> Use the newly added drm_print_memory_stats helper to show memory
> utilisation of our objects in drm/driver specific fdinfo output.
> 
> To collect the stats we walk the per memory regions object lists
> and accumulate object size into the respective drm_memory_stats
> categories.
> 
> Objects with multiple possible placements are reported in multiple
> regions for total and shared sizes, while other categories are
> counted only for the currently active region.
> 
> v2:
>  * Only account against the active region.
>  * Use DMA_RESV_USAGE_BOOKKEEP when testing for active. (Tejas)
> 
> Signed-off-by: Tvrtko Ursulin 
> Cc: Aravind Iddamsetty 
> Cc: Rob Clark 
> Cc: Andi Shyti 
> Cc: Tejas Upadhyay 
> Reviewed-by: Andi Shyti  # v1

Reiewed also this version :)

Thanks,
Andi

> ---
>  drivers/gpu/drm/i915/i915_drm_client.c | 64 ++
>  1 file changed, 64 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
> b/drivers/gpu/drm/i915/i915_drm_client.c
> index a61356012df8..94abc2fb2ea6 100644
> --- a/drivers/gpu/drm/i915/i915_drm_client.c
> +++ b/drivers/gpu/drm/i915/i915_drm_client.c
> @@ -45,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
>  }
>  
>  #ifdef CONFIG_PROC_FS
> +static void
> +obj_meminfo(struct drm_i915_gem_object *obj,
> + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
> +{
> + const enum intel_region_id id = obj->mm.region ?
> + obj->mm.region->id : INTEL_REGION_SMEM;
> + const u64 sz = obj->base.size;
> +
> + if (obj->base.handle_count > 1)
> + stats[id].shared += sz;
> + else
> + stats[id].private += sz;
> +
> + if (i915_gem_object_has_pages(obj)) {
> + stats[id].resident += sz;
> +
> + if (!dma_resv_test_signaled(obj->base.resv,
> + DMA_RESV_USAGE_BOOKKEEP))
> + stats[id].active += sz;
> + else if (i915_gem_object_is_shrinkable(obj) &&
> +  obj->mm.madv == I915_MADV_DONTNEED)
> + stats[id].purgeable += sz;
> + }
> +}
> +
> +static void show_meminfo(struct drm_printer *p, struct drm_file *file)
> +{
> + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
> + struct drm_i915_file_private *fpriv = file->driver_priv;
> + struct i915_drm_client *client = fpriv->client;
> + struct drm_i915_private *i915 = fpriv->i915;
> + struct drm_i915_gem_object *obj;
> + struct intel_memory_region *mr;
> + struct list_head *pos;
> + unsigned int id;
> +
> + /* Public objects. */
> + spin_lock(>table_lock);
> + idr_for_each_entry(>object_idr, obj, id)
> + obj_meminfo(obj, stats);
> + spin_unlock(>table_lock);
> +
> + /* Internal objects. */
> + rcu_read_lock();
> + list_for_each_rcu(pos, >objects_list) {
> + obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
> +  client_link));
> + if (!obj)
> + continue;
> + obj_meminfo(obj, stats);
> + i915_gem_object_put(obj);
> + }
> + rcu_read_unlock();
> +
> + for_each_memory_region(mr, i915, id)
> + drm_print_memory_stats(p,
> +[id],
> +DRM_GEM_OBJECT_RESIDENT |
> +DRM_GEM_OBJECT_PURGEABLE,
> +mr->name);
> +}
> +
>  static const char * const uabi_class_names[] = {
>   [I915_ENGINE_CLASS_RENDER] = "render",
>   [I915_ENGINE_CLASS_COPY] = "copy",
> @@ -106,6 +168,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct 
> drm_file *file)
>* **
>*/
>  
> + show_meminfo(p, file);
> +
>   if (GRAPHICS_VER(i915) < 8)
>   return;
>  
> -- 
> 2.39.2


Re: [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-09-22 Thread Tvrtko Ursulin



On 22/09/2023 09:48, Iddamsetty, Aravind wrote:



On 21-09-2023 17:18, Tvrtko Ursulin wrote:

From: Tvrtko Ursulin 

Use the newly added drm_print_memory_stats helper to show memory
utilisation of our objects in drm/driver specific fdinfo output.

To collect the stats we walk the per memory regions object lists
and accumulate object size into the respective drm_memory_stats
categories.

Objects with multiple possible placements are reported in multiple
regions for total and shared sizes, while other categories are


I guess you forgot to correct this.


Ah yes, will fix.




counted only for the currently active region.

v2:
  * Only account against the active region.
  * Use DMA_RESV_USAGE_BOOKKEEP when testing for active. (Tejas)

Signed-off-by: Tvrtko Ursulin 
Cc: Aravind Iddamsetty 
Cc: Rob Clark 
Cc: Andi Shyti 
Cc: Tejas Upadhyay 
Reviewed-by: Andi Shyti  # v1
---
  drivers/gpu/drm/i915/i915_drm_client.c | 64 ++
  1 file changed, 64 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
b/drivers/gpu/drm/i915/i915_drm_client.c
index a61356012df8..94abc2fb2ea6 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -45,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
  }
  
  #ifdef CONFIG_PROC_FS

+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+   const enum intel_region_id id = obj->mm.region ?
+   obj->mm.region->id : INTEL_REGION_SMEM;
+   const u64 sz = obj->base.size;
+
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+
+   if (i915_gem_object_has_pages(obj)) {
+   stats[id].resident += sz;
+
+   if (!dma_resv_test_signaled(obj->base.resv,
+   DMA_RESV_USAGE_BOOKKEEP))
+   stats[id].active += sz;
+   else if (i915_gem_object_is_shrinkable(obj) &&
+obj->mm.madv == I915_MADV_DONTNEED)
+   stats[id].purgeable += sz;
+   }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+   struct drm_i915_file_private *fpriv = file->driver_priv;
+   struct i915_drm_client *client = fpriv->client;
+   struct drm_i915_private *i915 = fpriv->i915;
+   struct drm_i915_gem_object *obj;
+   struct intel_memory_region *mr;
+   struct list_head *pos;
+   unsigned int id;
+
+   /* Public objects. */
+   spin_lock(>table_lock);
+   idr_for_each_entry(>object_idr, obj, id)
+   obj_meminfo(obj, stats);
+   spin_unlock(>table_lock);
+
+   /* Internal objects. */
+   rcu_read_lock();
+   list_for_each_rcu(pos, >objects_list) {
+   obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
+client_link));
+   if (!obj)
+   continue;
+   obj_meminfo(obj, stats);
+   i915_gem_object_put(obj);
+   }
+   rcu_read_unlock();
+
+   for_each_memory_region(mr, i915, id)
+   drm_print_memory_stats(p,
+  [id],
+  DRM_GEM_OBJECT_RESIDENT |
+  DRM_GEM_OBJECT_PURGEABLE,
+  mr->name);
+}
+
  static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -106,6 +168,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct 
drm_file *file)
 * **
 */
  
+	show_meminfo(p, file);

+
if (GRAPHICS_VER(i915) < 8)
return;
  


Reviewed-by: Aravind Iddamsetty 


Thank you! Would you be able to also look at the IGTs I posted yesterday?

Regards,

Tvrtko


Re: [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-09-22 Thread Iddamsetty, Aravind



On 21-09-2023 17:18, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin 
> 
> Use the newly added drm_print_memory_stats helper to show memory
> utilisation of our objects in drm/driver specific fdinfo output.
> 
> To collect the stats we walk the per memory regions object lists
> and accumulate object size into the respective drm_memory_stats
> categories.
> 
> Objects with multiple possible placements are reported in multiple
> regions for total and shared sizes, while other categories are

I guess you forgot to correct this.

> counted only for the currently active region.
> 
> v2:
>  * Only account against the active region.
>  * Use DMA_RESV_USAGE_BOOKKEEP when testing for active. (Tejas)
> 
> Signed-off-by: Tvrtko Ursulin 
> Cc: Aravind Iddamsetty 
> Cc: Rob Clark 
> Cc: Andi Shyti 
> Cc: Tejas Upadhyay 
> Reviewed-by: Andi Shyti  # v1
> ---
>  drivers/gpu/drm/i915/i915_drm_client.c | 64 ++
>  1 file changed, 64 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
> b/drivers/gpu/drm/i915/i915_drm_client.c
> index a61356012df8..94abc2fb2ea6 100644
> --- a/drivers/gpu/drm/i915/i915_drm_client.c
> +++ b/drivers/gpu/drm/i915/i915_drm_client.c
> @@ -45,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
>  }
>  
>  #ifdef CONFIG_PROC_FS
> +static void
> +obj_meminfo(struct drm_i915_gem_object *obj,
> + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
> +{
> + const enum intel_region_id id = obj->mm.region ?
> + obj->mm.region->id : INTEL_REGION_SMEM;
> + const u64 sz = obj->base.size;
> +
> + if (obj->base.handle_count > 1)
> + stats[id].shared += sz;
> + else
> + stats[id].private += sz;
> +
> + if (i915_gem_object_has_pages(obj)) {
> + stats[id].resident += sz;
> +
> + if (!dma_resv_test_signaled(obj->base.resv,
> + DMA_RESV_USAGE_BOOKKEEP))
> + stats[id].active += sz;
> + else if (i915_gem_object_is_shrinkable(obj) &&
> +  obj->mm.madv == I915_MADV_DONTNEED)
> + stats[id].purgeable += sz;
> + }
> +}
> +
> +static void show_meminfo(struct drm_printer *p, struct drm_file *file)
> +{
> + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
> + struct drm_i915_file_private *fpriv = file->driver_priv;
> + struct i915_drm_client *client = fpriv->client;
> + struct drm_i915_private *i915 = fpriv->i915;
> + struct drm_i915_gem_object *obj;
> + struct intel_memory_region *mr;
> + struct list_head *pos;
> + unsigned int id;
> +
> + /* Public objects. */
> + spin_lock(>table_lock);
> + idr_for_each_entry(>object_idr, obj, id)
> + obj_meminfo(obj, stats);
> + spin_unlock(>table_lock);
> +
> + /* Internal objects. */
> + rcu_read_lock();
> + list_for_each_rcu(pos, >objects_list) {
> + obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
> +  client_link));
> + if (!obj)
> + continue;
> + obj_meminfo(obj, stats);
> + i915_gem_object_put(obj);
> + }
> + rcu_read_unlock();
> +
> + for_each_memory_region(mr, i915, id)
> + drm_print_memory_stats(p,
> +[id],
> +DRM_GEM_OBJECT_RESIDENT |
> +DRM_GEM_OBJECT_PURGEABLE,
> +mr->name);
> +}
> +
>  static const char * const uabi_class_names[] = {
>   [I915_ENGINE_CLASS_RENDER] = "render",
>   [I915_ENGINE_CLASS_COPY] = "copy",
> @@ -106,6 +168,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct 
> drm_file *file)
>* **
>*/
>  
> + show_meminfo(p, file);
> +
>   if (GRAPHICS_VER(i915) < 8)
>   return;
>  

Reviewed-by: Aravind Iddamsetty 

Thanks,
Aravind.


[PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-09-21 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Use the newly added drm_print_memory_stats helper to show memory
utilisation of our objects in drm/driver specific fdinfo output.

To collect the stats we walk the per memory regions object lists
and accumulate object size into the respective drm_memory_stats
categories.

Objects with multiple possible placements are reported in multiple
regions for total and shared sizes, while other categories are
counted only for the currently active region.

v2:
 * Only account against the active region.
 * Use DMA_RESV_USAGE_BOOKKEEP when testing for active. (Tejas)

Signed-off-by: Tvrtko Ursulin 
Cc: Aravind Iddamsetty 
Cc: Rob Clark 
Cc: Andi Shyti 
Cc: Tejas Upadhyay 
Reviewed-by: Andi Shyti  # v1
---
 drivers/gpu/drm/i915/i915_drm_client.c | 64 ++
 1 file changed, 64 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
b/drivers/gpu/drm/i915/i915_drm_client.c
index a61356012df8..94abc2fb2ea6 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -45,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
 }
 
 #ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+   const enum intel_region_id id = obj->mm.region ?
+   obj->mm.region->id : INTEL_REGION_SMEM;
+   const u64 sz = obj->base.size;
+
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+
+   if (i915_gem_object_has_pages(obj)) {
+   stats[id].resident += sz;
+
+   if (!dma_resv_test_signaled(obj->base.resv,
+   DMA_RESV_USAGE_BOOKKEEP))
+   stats[id].active += sz;
+   else if (i915_gem_object_is_shrinkable(obj) &&
+obj->mm.madv == I915_MADV_DONTNEED)
+   stats[id].purgeable += sz;
+   }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+   struct drm_i915_file_private *fpriv = file->driver_priv;
+   struct i915_drm_client *client = fpriv->client;
+   struct drm_i915_private *i915 = fpriv->i915;
+   struct drm_i915_gem_object *obj;
+   struct intel_memory_region *mr;
+   struct list_head *pos;
+   unsigned int id;
+
+   /* Public objects. */
+   spin_lock(>table_lock);
+   idr_for_each_entry(>object_idr, obj, id)
+   obj_meminfo(obj, stats);
+   spin_unlock(>table_lock);
+
+   /* Internal objects. */
+   rcu_read_lock();
+   list_for_each_rcu(pos, >objects_list) {
+   obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
+client_link));
+   if (!obj)
+   continue;
+   obj_meminfo(obj, stats);
+   i915_gem_object_put(obj);
+   }
+   rcu_read_unlock();
+
+   for_each_memory_region(mr, i915, id)
+   drm_print_memory_stats(p,
+  [id],
+  DRM_GEM_OBJECT_RESIDENT |
+  DRM_GEM_OBJECT_PURGEABLE,
+  mr->name);
+}
+
 static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -106,6 +168,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct 
drm_file *file)
 * **
 */
 
+   show_meminfo(p, file);
+
if (GRAPHICS_VER(i915) < 8)
return;
 
-- 
2.39.2



Re: [Intel-gfx] [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-09-20 Thread Rob Clark
On Wed, Sep 20, 2023 at 7:35 AM Tvrtko Ursulin
 wrote:
>
>
> On 24/08/2023 12:35, Upadhyay, Tejas wrote:
> >> -Original Message-
> >> From: Intel-gfx  On Behalf Of 
> >> Tvrtko
> >> Ursulin
> >> Sent: Friday, July 7, 2023 6:32 PM
> >> To: intel-...@lists.freedesktop.org; dri-devel@lists.freedesktop.org
> >> Subject: [Intel-gfx] [PATCH 5/5] drm/i915: Implement fdinfo memory stats
> >> printing
> >>
> >> From: Tvrtko Ursulin 
> >>
> >> Use the newly added drm_print_memory_stats helper to show memory
> >> utilisation of our objects in drm/driver specific fdinfo output.
> >>
> >> To collect the stats we walk the per memory regions object lists and
> >> accumulate object size into the respective drm_memory_stats categories.
> >>
> >> Objects with multiple possible placements are reported in multiple regions 
> >> for
> >> total and shared sizes, while other categories are counted only for the
> >> currently active region.
> >>
> >> Signed-off-by: Tvrtko Ursulin 
> >> Cc: Aravind Iddamsetty 
> >> Cc: Rob Clark 
> >> ---
> >>   drivers/gpu/drm/i915/i915_drm_client.c | 85 ++
> >>   1 file changed, 85 insertions(+)
> >>
> >> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c
> >> b/drivers/gpu/drm/i915/i915_drm_client.c
> >> index ffccb6239789..5c77d6987d90 100644
> >> --- a/drivers/gpu/drm/i915/i915_drm_client.c
> >> +++ b/drivers/gpu/drm/i915/i915_drm_client.c
> >> @@ -45,6 +45,89 @@ void __i915_drm_client_free(struct kref *kref)  }
> >>
> >>   #ifdef CONFIG_PROC_FS
> >> +static void
> >> +obj_meminfo(struct drm_i915_gem_object *obj,
> >> +struct drm_memory_stats stats[INTEL_REGION_UNKNOWN]) {
> >> +struct intel_memory_region *mr;
> >> +u64 sz = obj->base.size;
> >> +enum intel_region_id id;
> >> +unsigned int i;
> >> +
> >> +/* Attribute size and shared to all possible memory regions. */
> >> +for (i = 0; i < obj->mm.n_placements; i++) {
> >> +mr = obj->mm.placements[i];
> >> +id = mr->id;
> >> +
> >> +if (obj->base.handle_count > 1)
> >> +stats[id].shared += sz;
> >> +else
> >> +stats[id].private += sz;
> >> +}
> >> +
> >> +/* Attribute other categories to only the current region. */
> >> +mr = obj->mm.region;
> >> +if (mr)
> >> +id = mr->id;
> >> +else
> >> +id = INTEL_REGION_SMEM;
> >> +
> >> +if (!obj->mm.n_placements) {
> >> +if (obj->base.handle_count > 1)
> >> +stats[id].shared += sz;
> >> +else
> >> +stats[id].private += sz;
> >> +}
> >> +
> >> +if (i915_gem_object_has_pages(obj)) {
> >> +stats[id].resident += sz;
> >> +
> >> +if (!dma_resv_test_signaled(obj->base.resv,
> >> +dma_resv_usage_rw(true)))
> >
> > Should not DMA_RESV_USAGE_BOOKKEEP also considered active (why only "rw")? 
> > Some app is syncing with syncjobs and has added dma_fence with 
> > DMA_RESV_USAGE_BOOKKEEP during execbuf while that BO is busy on waiting on 
> > work!
>
> Hmm do we have a path which adds DMA_RESV_USAGE_BOOKKEEP usage in execbuf?
>
> Rob, any comments here? Given how I basically lifted the logic from
> 686b21b5f6ca ("drm: Add fdinfo memory stats"), does it sound plausible
> to upgrade the test against all fences?

Yes, I think so.. I don't have any use for BOOKKEEP so I hadn't considered it

BR,
-R


>
> Regards,
>
> Tvrtko
>
> >> +stats[id].active += sz;
> >> +else if (i915_gem_object_is_shrinkable(obj) &&
> >> + obj->mm.madv == I915_MADV_DONTNEED)
> >> +stats[id].purgeable += sz;
> >> +}
> >> +}
> >> +
> >> +static void show_meminfo(struct drm_printer *p, struct drm_file *file)
> >> +{
> >> +struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
> >> +struct drm_i915_file_private *fpriv = file->driver_priv;
> >> +struct i915_drm_client *client = fpriv

Re: [Intel-gfx] [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-09-20 Thread Tvrtko Ursulin



On 24/08/2023 12:35, Upadhyay, Tejas wrote:

-Original Message-
From: Intel-gfx  On Behalf Of Tvrtko
Ursulin
Sent: Friday, July 7, 2023 6:32 PM
To: intel-...@lists.freedesktop.org; dri-devel@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH 5/5] drm/i915: Implement fdinfo memory stats
printing

From: Tvrtko Ursulin 

Use the newly added drm_print_memory_stats helper to show memory
utilisation of our objects in drm/driver specific fdinfo output.

To collect the stats we walk the per memory regions object lists and
accumulate object size into the respective drm_memory_stats categories.

Objects with multiple possible placements are reported in multiple regions for
total and shared sizes, while other categories are counted only for the
currently active region.

Signed-off-by: Tvrtko Ursulin 
Cc: Aravind Iddamsetty 
Cc: Rob Clark 
---
  drivers/gpu/drm/i915/i915_drm_client.c | 85 ++
  1 file changed, 85 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drm_client.c
b/drivers/gpu/drm/i915/i915_drm_client.c
index ffccb6239789..5c77d6987d90 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -45,6 +45,89 @@ void __i915_drm_client_free(struct kref *kref)  }

  #ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN]) {
+   struct intel_memory_region *mr;
+   u64 sz = obj->base.size;
+   enum intel_region_id id;
+   unsigned int i;
+
+   /* Attribute size and shared to all possible memory regions. */
+   for (i = 0; i < obj->mm.n_placements; i++) {
+   mr = obj->mm.placements[i];
+   id = mr->id;
+
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   /* Attribute other categories to only the current region. */
+   mr = obj->mm.region;
+   if (mr)
+   id = mr->id;
+   else
+   id = INTEL_REGION_SMEM;
+
+   if (!obj->mm.n_placements) {
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   if (i915_gem_object_has_pages(obj)) {
+   stats[id].resident += sz;
+
+   if (!dma_resv_test_signaled(obj->base.resv,
+   dma_resv_usage_rw(true)))


Should not DMA_RESV_USAGE_BOOKKEEP also considered active (why only "rw")? Some 
app is syncing with syncjobs and has added dma_fence with DMA_RESV_USAGE_BOOKKEEP during 
execbuf while that BO is busy on waiting on work!


Hmm do we have a path which adds DMA_RESV_USAGE_BOOKKEEP usage in execbuf?

Rob, any comments here? Given how I basically lifted the logic from 
686b21b5f6ca ("drm: Add fdinfo memory stats"), does it sound plausible 
to upgrade the test against all fences?


Regards,

Tvrtko


+   stats[id].active += sz;
+   else if (i915_gem_object_is_shrinkable(obj) &&
+obj->mm.madv == I915_MADV_DONTNEED)
+   stats[id].purgeable += sz;
+   }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+   struct drm_i915_file_private *fpriv = file->driver_priv;
+   struct i915_drm_client *client = fpriv->client;
+   struct drm_i915_private *i915 = fpriv->i915;
+   struct drm_i915_gem_object *obj;
+   struct intel_memory_region *mr;
+   struct list_head *pos;
+   unsigned int id;
+
+   /* Public objects. */
+   spin_lock(>table_lock);
+   idr_for_each_entry (>object_idr, obj, id)
+   obj_meminfo(obj, stats);
+   spin_unlock(>table_lock);
+
+   /* Internal objects. */
+   rcu_read_lock();
+   list_for_each_rcu(pos, >objects_list) {
+   obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
+client_link));
+   if (!obj)
+   continue;
+   obj_meminfo(obj, stats);
+   i915_gem_object_put(obj);
+   }
+   rcu_read_unlock();
+
+   for_each_memory_region(mr, i915, id)
+   drm_print_memory_stats(p,
+  [id],
+  DRM_GEM_OBJECT_RESIDENT |
+  DRM_GEM_OBJECT_PURGEABLE,
+  mr->name);
+}
+
  static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY

RE: [Intel-gfx] [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-08-24 Thread Upadhyay, Tejas



> -Original Message-
> From: Intel-gfx  On Behalf Of Tvrtko
> Ursulin
> Sent: Friday, July 7, 2023 6:32 PM
> To: intel-...@lists.freedesktop.org; dri-devel@lists.freedesktop.org
> Subject: [Intel-gfx] [PATCH 5/5] drm/i915: Implement fdinfo memory stats
> printing
> 
> From: Tvrtko Ursulin 
> 
> Use the newly added drm_print_memory_stats helper to show memory
> utilisation of our objects in drm/driver specific fdinfo output.
> 
> To collect the stats we walk the per memory regions object lists and
> accumulate object size into the respective drm_memory_stats categories.
> 
> Objects with multiple possible placements are reported in multiple regions for
> total and shared sizes, while other categories are counted only for the
> currently active region.
> 
> Signed-off-by: Tvrtko Ursulin 
> Cc: Aravind Iddamsetty 
> Cc: Rob Clark 
> ---
>  drivers/gpu/drm/i915/i915_drm_client.c | 85 ++
>  1 file changed, 85 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c
> b/drivers/gpu/drm/i915/i915_drm_client.c
> index ffccb6239789..5c77d6987d90 100644
> --- a/drivers/gpu/drm/i915/i915_drm_client.c
> +++ b/drivers/gpu/drm/i915/i915_drm_client.c
> @@ -45,6 +45,89 @@ void __i915_drm_client_free(struct kref *kref)  }
> 
>  #ifdef CONFIG_PROC_FS
> +static void
> +obj_meminfo(struct drm_i915_gem_object *obj,
> + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN]) {
> + struct intel_memory_region *mr;
> + u64 sz = obj->base.size;
> + enum intel_region_id id;
> + unsigned int i;
> +
> + /* Attribute size and shared to all possible memory regions. */
> + for (i = 0; i < obj->mm.n_placements; i++) {
> + mr = obj->mm.placements[i];
> + id = mr->id;
> +
> + if (obj->base.handle_count > 1)
> + stats[id].shared += sz;
> + else
> + stats[id].private += sz;
> + }
> +
> + /* Attribute other categories to only the current region. */
> + mr = obj->mm.region;
> + if (mr)
> + id = mr->id;
> + else
> + id = INTEL_REGION_SMEM;
> +
> + if (!obj->mm.n_placements) {
> + if (obj->base.handle_count > 1)
> + stats[id].shared += sz;
> + else
> + stats[id].private += sz;
> + }
> +
> + if (i915_gem_object_has_pages(obj)) {
> + stats[id].resident += sz;
> +
> + if (!dma_resv_test_signaled(obj->base.resv,
> + dma_resv_usage_rw(true)))

Should not DMA_RESV_USAGE_BOOKKEEP also considered active (why only "rw")? Some 
app is syncing with syncjobs and has added dma_fence with 
DMA_RESV_USAGE_BOOKKEEP during execbuf while that BO is busy on waiting on work!

Thanks,
Tejas
> + stats[id].active += sz;
> + else if (i915_gem_object_is_shrinkable(obj) &&
> +  obj->mm.madv == I915_MADV_DONTNEED)
> + stats[id].purgeable += sz;
> + }
> +}
> +
> +static void show_meminfo(struct drm_printer *p, struct drm_file *file)
> +{
> + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
> + struct drm_i915_file_private *fpriv = file->driver_priv;
> + struct i915_drm_client *client = fpriv->client;
> + struct drm_i915_private *i915 = fpriv->i915;
> + struct drm_i915_gem_object *obj;
> + struct intel_memory_region *mr;
> + struct list_head *pos;
> + unsigned int id;
> +
> + /* Public objects. */
> + spin_lock(>table_lock);
> + idr_for_each_entry (>object_idr, obj, id)
> + obj_meminfo(obj, stats);
> + spin_unlock(>table_lock);
> +
> + /* Internal objects. */
> + rcu_read_lock();
> + list_for_each_rcu(pos, >objects_list) {
> + obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
> +  client_link));
> + if (!obj)
> + continue;
> + obj_meminfo(obj, stats);
> + i915_gem_object_put(obj);
> + }
> + rcu_read_unlock();
> +
> + for_each_memory_region(mr, i915, id)
> + drm_print_memory_stats(p,
> +[id],
> +DRM_GEM_OBJECT_RESIDENT |
> +DRM_GEM_OBJECT_PURGEABLE,
> +mr->name);
> +}
> +
>  static const char * const uabi_class_names[] = {
>   [I915_ENGINE_CLASS_RENDER] = "render",
>   [I915_ENGINE_CLASS_COPY] = "copy",
> @@ -106,6 +189,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p,
> struct drm_file *file)
>*
> 
> **
>*/
> 
> + show_meminfo(p, file);
> +
>   if (GRAPHICS_VER(i915) < 8)
>   return;
> 
> --
> 2.39.2



Re: [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-08-08 Thread Iddamsetty, Aravind



On 03-08-2023 14:19, Tvrtko Ursulin wrote:
> 
> On 03/08/2023 06:15, Iddamsetty, Aravind wrote:
>> On 27-07-2023 15:43, Tvrtko Ursulin wrote:
>>> From: Tvrtko Ursulin 
>>>
>>> Use the newly added drm_print_memory_stats helper to show memory
>>> utilisation of our objects in drm/driver specific fdinfo output.
>>>
>>> To collect the stats we walk the per memory regions object lists
>>> and accumulate object size into the respective drm_memory_stats
>>> categories.
>>>
>>> Objects with multiple possible placements are reported in multiple
>>> regions for total and shared sizes, while other categories are
>>> counted only for the currently active region.
>>>
>>> Signed-off-by: Tvrtko Ursulin 
>>> Cc: Aravind Iddamsetty 
>>> Cc: Rob Clark > ---
>>>   drivers/gpu/drm/i915/i915_drm_client.c | 85 ++
>>>   1 file changed, 85 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c
>>> b/drivers/gpu/drm/i915/i915_drm_client.c
>>> index a61356012df8..9e7a6075ee25 100644
>>> --- a/drivers/gpu/drm/i915/i915_drm_client.c
>>> +++ b/drivers/gpu/drm/i915/i915_drm_client.c
>>> @@ -45,6 +45,89 @@ void __i915_drm_client_free(struct kref *kref)
>>>   }
>>>     #ifdef CONFIG_PROC_FS
>>> +static void
>>> +obj_meminfo(struct drm_i915_gem_object *obj,
>>> +    struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
>>> +{
>>> +    struct intel_memory_region *mr;
>>> +    u64 sz = obj->base.size;
>>> +    enum intel_region_id id;
>>> +    unsigned int i;
>>> +
>>> +    /* Attribute size and shared to all possible memory regions. */
>>> +    for (i = 0; i < obj->mm.n_placements; i++) {
>>> +    mr = obj->mm.placements[i];
>>> +    id = mr->id;
>>> +
>>> +    if (obj->base.handle_count > 1)
>>> +    stats[id].shared += sz;
>>> +    else
>>> +    stats[id].private += sz;
>>> +    }
>>> +
>>> +    /* Attribute other categories to only the current region. */
>>> +    mr = obj->mm.region;
>>> +    if (mr)
>>> +    id = mr->id;
>>> +    else
>>> +    id = INTEL_REGION_SMEM;
>>> +
>>> +    if (!obj->mm.n_placements) {
>>
>> I guess we do not expect to have n_placements set to public objects, is
>> that right?
> 
> I think they are the only ones which can have placements. It is via
> I915_GEM_CREATE_EXT_MEMORY_REGIONS userspace is able to create them.
> 
> My main conundrum in this patch is a few lines above, the loop which
> adds shared and private.
> 
> Question is, if an object can be either smem or lmem, how do we want to
> report it? This patch adds the size for all possible regions and
> resident and active only to the currently active. But perhaps that is
> wrong. Maybe I should change it is only against the active region and
> multiple regions are just ignored. Then if object is migrated do access
> patterns or memory pressure, the total size would migrate too.
> 
> I think I was trying to achieve something here (have more visibility on
> what kind of backing store clients are allocating) which maybe does not
> work to well with the current categories.
> 
> Namely if userspace allocates say one 1MiB object with placement in
> either smem or lmem, and it is currently resident in lmem, I wanted it
> to show as:
> 
>  total-smem: 1 MiB
>  resident-smem: 0
>  total-lmem: 1 MiB
>  resident-lmem: 1 MiB
> 
> To constantly show how in theory client could be using memory from
> either region. Maybe that is misleading and should instead be:
> 
>  total-smem: 0
>  resident-smem: 0
>  total-lmem: 1 MiB
>  resident-lmem: 1 MiB
> 
> ?

I think the current implementation will not match with the memregion
info in query ioctl as well. While what you say is true I'm not sure if
there can be a client who is tracking the allocation say for an obj who
has 2 placements LMEM and SMEM, and might assume since I had made a
reservation in SMEM it shall not fail when i try to migrate there later.

Thanks,
Aravind.

> 
> And then if/when the same object gets migrated to smem it changes to
> (lets assume it is also not resident any more but got swapped out):
> 
>  total-smem: 1 MiB
>  resident-smem: 0
>  total-lmem: 0
>  resident-lmem: 0
> 
> Regards,
> 
> Tvrtko
> 
>>> +    if (obj->base.handle_count > 1)
>>> +    stats[id].shared += sz;
>>> +    else
>>> +    stats[id].private += sz;
>>> +    }
>>> +
>>> +    if (i915_gem_object_has_pages(obj)) {
>>> +    stats[id].resident += sz;
>>> +
>>> +    if (!dma_resv_test_signaled(obj->base.resv,
>>> +    dma_resv_usage_rw(true)))
>>> +    stats[id].active += sz;
>>> +    else if (i915_gem_object_is_shrinkable(obj) &&
>>> + obj->mm.madv == I915_MADV_DONTNEED)
>>> +    stats[id].purgeable += sz;
>>> +    }
>>> +}
>>> +
>>> +static void show_meminfo(struct drm_printer *p, struct drm_file *file)
>>> +{
>>> +    struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
>>> +    struct drm_i915_file_private *fpriv = file->driver_priv;
>>> +    struct 

Re: [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-08-03 Thread Tvrtko Ursulin



On 03/08/2023 06:15, Iddamsetty, Aravind wrote:

On 27-07-2023 15:43, Tvrtko Ursulin wrote:

From: Tvrtko Ursulin 

Use the newly added drm_print_memory_stats helper to show memory
utilisation of our objects in drm/driver specific fdinfo output.

To collect the stats we walk the per memory regions object lists
and accumulate object size into the respective drm_memory_stats
categories.

Objects with multiple possible placements are reported in multiple
regions for total and shared sizes, while other categories are
counted only for the currently active region.

Signed-off-by: Tvrtko Ursulin 
Cc: Aravind Iddamsetty 
Cc: Rob Clark > ---
  drivers/gpu/drm/i915/i915_drm_client.c | 85 ++
  1 file changed, 85 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
b/drivers/gpu/drm/i915/i915_drm_client.c
index a61356012df8..9e7a6075ee25 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -45,6 +45,89 @@ void __i915_drm_client_free(struct kref *kref)
  }
  
  #ifdef CONFIG_PROC_FS

+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+   struct intel_memory_region *mr;
+   u64 sz = obj->base.size;
+   enum intel_region_id id;
+   unsigned int i;
+
+   /* Attribute size and shared to all possible memory regions. */
+   for (i = 0; i < obj->mm.n_placements; i++) {
+   mr = obj->mm.placements[i];
+   id = mr->id;
+
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   /* Attribute other categories to only the current region. */
+   mr = obj->mm.region;
+   if (mr)
+   id = mr->id;
+   else
+   id = INTEL_REGION_SMEM;
+
+   if (!obj->mm.n_placements) {


I guess we do not expect to have n_placements set to public objects, is
that right?


I think they are the only ones which can have placements. It is via 
I915_GEM_CREATE_EXT_MEMORY_REGIONS userspace is able to create them.


My main conundrum in this patch is a few lines above, the loop which 
adds shared and private.


Question is, if an object can be either smem or lmem, how do we want to 
report it? This patch adds the size for all possible regions and 
resident and active only to the currently active. But perhaps that is 
wrong. Maybe I should change it is only against the active region and 
multiple regions are just ignored. Then if object is migrated do access 
patterns or memory pressure, the total size would migrate too.


I think I was trying to achieve something here (have more visibility on 
what kind of backing store clients are allocating) which maybe does not 
work to well with the current categories.


Namely if userspace allocates say one 1MiB object with placement in 
either smem or lmem, and it is currently resident in lmem, I wanted it 
to show as:


 total-smem: 1 MiB
 resident-smem: 0
 total-lmem: 1 MiB
 resident-lmem: 1 MiB

To constantly show how in theory client could be using memory from 
either region. Maybe that is misleading and should instead be:


 total-smem: 0
 resident-smem: 0
 total-lmem: 1 MiB
 resident-lmem: 1 MiB

?

And then if/when the same object gets migrated to smem it changes to 
(lets assume it is also not resident any more but got swapped out):


 total-smem: 1 MiB
 resident-smem: 0
 total-lmem: 0
 resident-lmem: 0

Regards,

Tvrtko


+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   if (i915_gem_object_has_pages(obj)) {
+   stats[id].resident += sz;
+
+   if (!dma_resv_test_signaled(obj->base.resv,
+   dma_resv_usage_rw(true)))
+   stats[id].active += sz;
+   else if (i915_gem_object_is_shrinkable(obj) &&
+obj->mm.madv == I915_MADV_DONTNEED)
+   stats[id].purgeable += sz;
+   }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+   struct drm_i915_file_private *fpriv = file->driver_priv;
+   struct i915_drm_client *client = fpriv->client;
+   struct drm_i915_private *i915 = fpriv->i915;
+   struct drm_i915_gem_object *obj;
+   struct intel_memory_region *mr;
+   struct list_head *pos;
+   unsigned int id;
+
+   /* Public objects. */
+   spin_lock(>table_lock);
+   idr_for_each_entry(>object_idr, obj, id)
+   obj_meminfo(obj, stats);
+   spin_unlock(>table_lock);
+
+   /* Internal objects. */
+   rcu_read_lock();
+   list_for_each_rcu(pos, >objects_list) {
+   obj = 

Re: [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-08-02 Thread Iddamsetty, Aravind



On 27-07-2023 15:43, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin 
> 
> Use the newly added drm_print_memory_stats helper to show memory
> utilisation of our objects in drm/driver specific fdinfo output.
> 
> To collect the stats we walk the per memory regions object lists
> and accumulate object size into the respective drm_memory_stats
> categories.
> 
> Objects with multiple possible placements are reported in multiple
> regions for total and shared sizes, while other categories are
> counted only for the currently active region.
> 
> Signed-off-by: Tvrtko Ursulin 
> Cc: Aravind Iddamsetty 
> Cc: Rob Clark > ---
>  drivers/gpu/drm/i915/i915_drm_client.c | 85 ++
>  1 file changed, 85 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
> b/drivers/gpu/drm/i915/i915_drm_client.c
> index a61356012df8..9e7a6075ee25 100644
> --- a/drivers/gpu/drm/i915/i915_drm_client.c
> +++ b/drivers/gpu/drm/i915/i915_drm_client.c
> @@ -45,6 +45,89 @@ void __i915_drm_client_free(struct kref *kref)
>  }
>  
>  #ifdef CONFIG_PROC_FS
> +static void
> +obj_meminfo(struct drm_i915_gem_object *obj,
> + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
> +{
> + struct intel_memory_region *mr;
> + u64 sz = obj->base.size;
> + enum intel_region_id id;
> + unsigned int i;
> +
> + /* Attribute size and shared to all possible memory regions. */
> + for (i = 0; i < obj->mm.n_placements; i++) {
> + mr = obj->mm.placements[i];
> + id = mr->id;
> +
> + if (obj->base.handle_count > 1)
> + stats[id].shared += sz;
> + else
> + stats[id].private += sz;
> + }
> +
> + /* Attribute other categories to only the current region. */
> + mr = obj->mm.region;
> + if (mr)
> + id = mr->id;
> + else
> + id = INTEL_REGION_SMEM;
> +
> + if (!obj->mm.n_placements) {

I guess we do not expect to have n_placements set to public objects, is
that right?

Thanks,
Aravind.
> + if (obj->base.handle_count > 1)
> + stats[id].shared += sz;
> + else
> + stats[id].private += sz;
> + }
> +
> + if (i915_gem_object_has_pages(obj)) {
> + stats[id].resident += sz;
> +
> + if (!dma_resv_test_signaled(obj->base.resv,
> + dma_resv_usage_rw(true)))
> + stats[id].active += sz;
> + else if (i915_gem_object_is_shrinkable(obj) &&
> +  obj->mm.madv == I915_MADV_DONTNEED)
> + stats[id].purgeable += sz;
> + }
> +}
> +
> +static void show_meminfo(struct drm_printer *p, struct drm_file *file)
> +{
> + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
> + struct drm_i915_file_private *fpriv = file->driver_priv;
> + struct i915_drm_client *client = fpriv->client;
> + struct drm_i915_private *i915 = fpriv->i915;
> + struct drm_i915_gem_object *obj;
> + struct intel_memory_region *mr;
> + struct list_head *pos;
> + unsigned int id;
> +
> + /* Public objects. */
> + spin_lock(>table_lock);
> + idr_for_each_entry(>object_idr, obj, id)
> + obj_meminfo(obj, stats);
> + spin_unlock(>table_lock);
> +
> + /* Internal objects. */
> + rcu_read_lock();
> + list_for_each_rcu(pos, >objects_list) {
> + obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
> +  client_link));
> + if (!obj)
> + continue;
> + obj_meminfo(obj, stats);
> + i915_gem_object_put(obj);
> + }
> + rcu_read_unlock();
> +
> + for_each_memory_region(mr, i915, id)
> + drm_print_memory_stats(p,
> +[id],
> +DRM_GEM_OBJECT_RESIDENT |
> +DRM_GEM_OBJECT_PURGEABLE,
> +mr->name);
> +}
> +
>  static const char * const uabi_class_names[] = {
>   [I915_ENGINE_CLASS_RENDER] = "render",
>   [I915_ENGINE_CLASS_COPY] = "copy",
> @@ -106,6 +189,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct 
> drm_file *file)
>* **
>*/
>  
> + show_meminfo(p, file);
> +
>   if (GRAPHICS_VER(i915) < 8)
>   return;
>





[PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-07-27 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Use the newly added drm_print_memory_stats helper to show memory
utilisation of our objects in drm/driver specific fdinfo output.

To collect the stats we walk the per memory regions object lists
and accumulate object size into the respective drm_memory_stats
categories.

Objects with multiple possible placements are reported in multiple
regions for total and shared sizes, while other categories are
counted only for the currently active region.

Signed-off-by: Tvrtko Ursulin 
Cc: Aravind Iddamsetty 
Cc: Rob Clark 
---
 drivers/gpu/drm/i915/i915_drm_client.c | 85 ++
 1 file changed, 85 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
b/drivers/gpu/drm/i915/i915_drm_client.c
index a61356012df8..9e7a6075ee25 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -45,6 +45,89 @@ void __i915_drm_client_free(struct kref *kref)
 }
 
 #ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+   struct intel_memory_region *mr;
+   u64 sz = obj->base.size;
+   enum intel_region_id id;
+   unsigned int i;
+
+   /* Attribute size and shared to all possible memory regions. */
+   for (i = 0; i < obj->mm.n_placements; i++) {
+   mr = obj->mm.placements[i];
+   id = mr->id;
+
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   /* Attribute other categories to only the current region. */
+   mr = obj->mm.region;
+   if (mr)
+   id = mr->id;
+   else
+   id = INTEL_REGION_SMEM;
+
+   if (!obj->mm.n_placements) {
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   if (i915_gem_object_has_pages(obj)) {
+   stats[id].resident += sz;
+
+   if (!dma_resv_test_signaled(obj->base.resv,
+   dma_resv_usage_rw(true)))
+   stats[id].active += sz;
+   else if (i915_gem_object_is_shrinkable(obj) &&
+obj->mm.madv == I915_MADV_DONTNEED)
+   stats[id].purgeable += sz;
+   }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+   struct drm_i915_file_private *fpriv = file->driver_priv;
+   struct i915_drm_client *client = fpriv->client;
+   struct drm_i915_private *i915 = fpriv->i915;
+   struct drm_i915_gem_object *obj;
+   struct intel_memory_region *mr;
+   struct list_head *pos;
+   unsigned int id;
+
+   /* Public objects. */
+   spin_lock(>table_lock);
+   idr_for_each_entry(>object_idr, obj, id)
+   obj_meminfo(obj, stats);
+   spin_unlock(>table_lock);
+
+   /* Internal objects. */
+   rcu_read_lock();
+   list_for_each_rcu(pos, >objects_list) {
+   obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
+client_link));
+   if (!obj)
+   continue;
+   obj_meminfo(obj, stats);
+   i915_gem_object_put(obj);
+   }
+   rcu_read_unlock();
+
+   for_each_memory_region(mr, i915, id)
+   drm_print_memory_stats(p,
+  [id],
+  DRM_GEM_OBJECT_RESIDENT |
+  DRM_GEM_OBJECT_PURGEABLE,
+  mr->name);
+}
+
 static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -106,6 +189,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct 
drm_file *file)
 * **
 */
 
+   show_meminfo(p, file);
+
if (GRAPHICS_VER(i915) < 8)
return;
 
-- 
2.39.2



[PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-07-07 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Use the newly added drm_print_memory_stats helper to show memory
utilisation of our objects in drm/driver specific fdinfo output.

To collect the stats we walk the per memory regions object lists
and accumulate object size into the respective drm_memory_stats
categories.

Objects with multiple possible placements are reported in multiple
regions for total and shared sizes, while other categories are
counted only for the currently active region.

Signed-off-by: Tvrtko Ursulin 
Cc: Aravind Iddamsetty 
Cc: Rob Clark 
---
 drivers/gpu/drm/i915/i915_drm_client.c | 85 ++
 1 file changed, 85 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
b/drivers/gpu/drm/i915/i915_drm_client.c
index ffccb6239789..5c77d6987d90 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -45,6 +45,89 @@ void __i915_drm_client_free(struct kref *kref)
 }
 
 #ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+   struct intel_memory_region *mr;
+   u64 sz = obj->base.size;
+   enum intel_region_id id;
+   unsigned int i;
+
+   /* Attribute size and shared to all possible memory regions. */
+   for (i = 0; i < obj->mm.n_placements; i++) {
+   mr = obj->mm.placements[i];
+   id = mr->id;
+
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   /* Attribute other categories to only the current region. */
+   mr = obj->mm.region;
+   if (mr)
+   id = mr->id;
+   else
+   id = INTEL_REGION_SMEM;
+
+   if (!obj->mm.n_placements) {
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   if (i915_gem_object_has_pages(obj)) {
+   stats[id].resident += sz;
+
+   if (!dma_resv_test_signaled(obj->base.resv,
+   dma_resv_usage_rw(true)))
+   stats[id].active += sz;
+   else if (i915_gem_object_is_shrinkable(obj) &&
+obj->mm.madv == I915_MADV_DONTNEED)
+   stats[id].purgeable += sz;
+   }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+   struct drm_i915_file_private *fpriv = file->driver_priv;
+   struct i915_drm_client *client = fpriv->client;
+   struct drm_i915_private *i915 = fpriv->i915;
+   struct drm_i915_gem_object *obj;
+   struct intel_memory_region *mr;
+   struct list_head *pos;
+   unsigned int id;
+
+   /* Public objects. */
+   spin_lock(>table_lock);
+   idr_for_each_entry (>object_idr, obj, id)
+   obj_meminfo(obj, stats);
+   spin_unlock(>table_lock);
+
+   /* Internal objects. */
+   rcu_read_lock();
+   list_for_each_rcu(pos, >objects_list) {
+   obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
+client_link));
+   if (!obj)
+   continue;
+   obj_meminfo(obj, stats);
+   i915_gem_object_put(obj);
+   }
+   rcu_read_unlock();
+
+   for_each_memory_region(mr, i915, id)
+   drm_print_memory_stats(p,
+  [id],
+  DRM_GEM_OBJECT_RESIDENT |
+  DRM_GEM_OBJECT_PURGEABLE,
+  mr->name);
+}
+
 static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -106,6 +189,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct 
drm_file *file)
 * **
 */
 
+   show_meminfo(p, file);
+
if (GRAPHICS_VER(i915) < 8)
return;
 
-- 
2.39.2



[PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-06-12 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Use the newly added drm_print_memory_stats helper to show memory
utilisation of our objects in drm/driver specific fdinfo output.

To collect the stats we walk the per memory regions object lists
and accumulate object size into the respective drm_memory_stats
categories.

Objects with multiple possible placements are reported in multiple
regions for total and shared sizes, while other categories are
counted only for the currently active region.

Signed-off-by: Tvrtko Ursulin 
Cc: Aravind Iddamsetty 
Cc: Rob Clark 
---
 drivers/gpu/drm/i915/i915_drm_client.c | 78 ++
 1 file changed, 78 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
b/drivers/gpu/drm/i915/i915_drm_client.c
index da29d01d1c3d..406e5a5c2961 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -48,6 +48,82 @@ void __i915_drm_client_free(struct kref *kref)
 }
 
 #ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+   struct intel_memory_region *mr;
+   u64 sz = obj->base.size;
+   enum intel_region_id id;
+   unsigned int i;
+
+   /* Attribute size and shared to all possible memory regions. */
+   for (i = 0; i < obj->mm.n_placements; i++) {
+   mr = obj->mm.placements[i];
+   id = mr->id;
+
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   /* Attribute other categories to only the current region. */
+   mr = obj->mm.region;
+   if (mr)
+   id = mr->id;
+   else
+   id = INTEL_REGION_SMEM;
+
+   if (!obj->mm.n_placements) {
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   if (i915_gem_object_has_pages(obj)) {
+   stats[id].resident += sz;
+
+   if (!dma_resv_test_signaled(obj->base.resv,
+   dma_resv_usage_rw(true)))
+   stats[id].active += sz;
+   else if (i915_gem_object_is_shrinkable(obj) &&
+obj->mm.madv == I915_MADV_DONTNEED)
+   stats[id].purgeable += sz;
+   }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+   struct drm_i915_file_private *fpriv = file->driver_priv;
+   struct i915_drm_client *client = fpriv->client;
+   struct drm_i915_private *i915 = fpriv->i915;
+   struct drm_i915_gem_object *obj;
+   struct intel_memory_region *mr;
+   unsigned int id;
+
+   /* Public objects. */
+   spin_lock(>table_lock);
+   idr_for_each_entry (>object_idr, obj, id)
+   obj_meminfo(obj, stats);
+   spin_unlock(>table_lock);
+
+   /* Internal objects. */
+   spin_lock(>objects_lock);
+   list_for_each_entry(obj, >objects_list, client_link)
+   obj_meminfo(obj, stats);
+   spin_unlock(>objects_lock);
+
+   for_each_memory_region(mr, i915, id)
+   drm_print_memory_stats(p,
+  [id],
+  DRM_GEM_OBJECT_RESIDENT |
+  DRM_GEM_OBJECT_PURGEABLE,
+  mr->name);
+}
+
 static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -109,6 +185,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct 
drm_file *file)
 * **
 */
 
+   show_meminfo(p, file);
+
if (GRAPHICS_VER(i915) < 8)
return;
 
-- 
2.39.2



[PATCH 5/5] drm/i915: Implement fdinfo memory stats printing

2023-06-08 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Use the newly added drm_print_memory_stats helper to show memory
utilisation of our objects in drm/driver specific fdinfo output.

To collect the stats we walk the per memory regions object lists
and accumulate object size into the respective drm_memory_stats
categories.

Objects with multiple possible placements are reported in multiple
regions for total and shared sizes, while other categories are
counted only for the currently active region.

Signed-off-by: Tvrtko Ursulin 
Cc: Aravind Iddamsetty 
Cc: Rob Clark 
---
 drivers/gpu/drm/i915/i915_drm_client.c | 64 ++
 1 file changed, 64 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drm_client.c 
b/drivers/gpu/drm/i915/i915_drm_client.c
index 777930f4995f..686db139b241 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -48,6 +48,68 @@ void __i915_drm_client_free(struct kref *kref)
 }
 
 #ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+   struct intel_memory_region *mr;
+   u64 sz = obj->base.size;
+   enum intel_region_id id;
+   unsigned int i;
+
+   /* Attribute size and shared to all possible memory regions. */
+   for (i = 0; i < obj->mm.n_placements; i++) {
+   mr = obj->mm.placements[i];
+   id = mr->id;
+
+   if (obj->base.handle_count > 1)
+   stats[id].shared += sz;
+   else
+   stats[id].private += sz;
+   }
+
+   /* Attribute other categories to only the current region. */
+   mr = obj->mm.region;
+   if (mr)
+   id = mr->id;
+   else
+   id = INTEL_REGION_SMEM;
+
+   if (i915_gem_object_has_pages(obj)) {
+   stats[id].resident += sz;
+
+   if (!dma_resv_test_signaled(obj->base.resv,
+   dma_resv_usage_rw(true)))
+   stats[id].active += sz;
+   else if (i915_gem_object_is_shrinkable(obj) &&
+obj->mm.madv == I915_MADV_DONTNEED)
+   stats[id].purgeable += sz;
+   }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+   struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+   struct drm_i915_file_private *fpriv = file->driver_priv;
+   struct i915_drm_client *client = fpriv->client;
+   struct drm_i915_private *i915 = fpriv->i915;
+   struct drm_i915_gem_object *obj;
+   struct intel_memory_region *mr;
+   unsigned int id;
+
+   mutex_lock(>objects_lock);
+   list_for_each_entry(obj, >objects_list, client_link)
+   obj_meminfo(obj, stats);
+   mutex_unlock(>objects_lock);
+
+   for_each_memory_region(mr, i915, id)
+   drm_print_memory_stats(p,
+  [id],
+  DRM_GEM_OBJECT_RESIDENT |
+  DRM_GEM_OBJECT_PURGEABLE,
+  mr->name);
+}
+
 static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -109,6 +171,8 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct 
drm_file *file)
 * **
 */
 
+   show_meminfo(p, file);
+
if (GRAPHICS_VER(i915) < 8)
return;
 
-- 
2.39.2