[PATCH v2] drm/ttm: Change the meaning of the fields in the ttm_place structure from pfn to bytes

2023-03-06 Thread Somalapuram Amaranath
The ttm_plac structure allows page-based allocation,
to support byte-based allocation using default or custom
ttm_resource_manager_func function like
ttm_range_man_alloc, amdgpu_gtt_mgr_new,
i915_ttm_buddy_man_alloc,nouveau_vram_manager_new etc.
Change the ttm_place structure member fpfn, lpfn, mem_type to
res_start, res_end, res_type.
Change the unsigned to u64.
Fix the dependence in all the DRM drivers and
clean up PAGE_SHIFT operation.

v1 -> v2: fix the bug reviewed by Michel, addressing Stanislaw
missing justification.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c   |  11 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c|  66 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  22 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |   4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  17 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c  |  40 ---
 drivers/gpu/drm/drm_gem_vram_helper.c |  10 +-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c   |  22 ++--
 drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c  |   2 +-
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 102 --
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.h |   2 +-
 drivers/gpu/drm/i915/intel_region_ttm.c   |  12 +--
 drivers/gpu/drm/nouveau/nouveau_bo.c  |  41 +++
 drivers/gpu/drm/nouveau/nouveau_mem.c |  10 +-
 drivers/gpu/drm/qxl/qxl_object.c  |  14 +--
 drivers/gpu/drm/qxl/qxl_ttm.c |   8 +-
 drivers/gpu/drm/radeon/radeon_object.c|  50 -
 drivers/gpu/drm/radeon/radeon_ttm.c   |  20 ++--
 drivers/gpu/drm/radeon/radeon_uvd.c   |   8 +-
 drivers/gpu/drm/ttm/ttm_bo.c  |  20 ++--
 drivers/gpu/drm/ttm/ttm_range_manager.c   |  21 ++--
 drivers/gpu/drm/ttm/ttm_resource.c|   8 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c|  46 
 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c|  30 +++---
 include/drm/ttm/ttm_placement.h   |  12 +--
 25 files changed, 293 insertions(+), 305 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 44367f03316f..2cf1e3697250 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -131,11 +131,12 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
goto err_free;
}
 
-   if (place->lpfn) {
+   if (place->res_end) {
spin_lock(>lock);
r = drm_mm_insert_node_in_range(>mm, >mm_nodes[0],
-   num_pages, tbo->page_alignment,
-   0, place->fpfn, place->lpfn,
+   num_pages, tbo->page_alignment, 
0,
+   place->res_start >> PAGE_SHIFT,
+   place->res_end >> PAGE_SHIFT,
DRM_MM_INSERT_BEST);
spin_unlock(>lock);
if (unlikely(r))
@@ -219,7 +220,7 @@ static bool amdgpu_gtt_mgr_intersects(struct 
ttm_resource_manager *man,
  const struct ttm_place *place,
  size_t size)
 {
-   return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
+   return !place->res_end || amdgpu_gtt_mgr_has_gart_addr(res);
 }
 
 /**
@@ -237,7 +238,7 @@ static bool amdgpu_gtt_mgr_compatible(struct 
ttm_resource_manager *man,
  const struct ttm_place *place,
  size_t size)
 {
-   return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
+   return !place->res_end || amdgpu_gtt_mgr_has_gart_addr(res);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 283e8fe608ce..2926389e21d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -130,15 +130,15 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo 
*abo, u32 domain)
u32 c = 0;
 
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
-   unsigned visible_pfn = adev->gmc.visible_vram_size >> 
PAGE_SHIFT;
+   u64 visible_pfn = adev->gmc.visible_vram_size;
 
-   places[c].fpfn = 0;
-   places[c].lpfn = 0;
-   places[c].mem_type = TTM_PL_VRAM;
+   places[c].res_start = 0;
+   places[c].res_end = 0;
+   places[c].res_type = TTM_PL_VRAM;
places[c].flags = 0;
 
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
-   places[c].lpfn = visible_pfn;
+   places[c].res_end = visible_pfn;

[PATCH] Change the meaning of the fields in the ttm_place structure from pfn to bytes

2023-03-02 Thread Somalapuram Amaranath
Change the ttm_place structure member fpfn, lpfn, mem_type to
res_start, res_end, res_type.
Change the unsigned to u64.
Fix the dependence in all the DRM drivers and
clean up PAGE_SHIFT operation.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c   |  11 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c|  66 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  22 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |   4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  17 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c  |  40 ---
 drivers/gpu/drm/drm_gem_vram_helper.c |  10 +-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c   |  22 ++--
 drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c  |   2 +-
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 102 --
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.h |   2 +-
 drivers/gpu/drm/i915/intel_region_ttm.c   |  12 +--
 drivers/gpu/drm/nouveau/nouveau_bo.c  |  41 +++
 drivers/gpu/drm/nouveau/nouveau_mem.c |  10 +-
 drivers/gpu/drm/qxl/qxl_object.c  |  14 +--
 drivers/gpu/drm/qxl/qxl_ttm.c |   8 +-
 drivers/gpu/drm/radeon/radeon_object.c|  50 -
 drivers/gpu/drm/radeon/radeon_ttm.c   |  20 ++--
 drivers/gpu/drm/radeon/radeon_uvd.c   |   8 +-
 drivers/gpu/drm/ttm/ttm_bo.c  |  20 ++--
 drivers/gpu/drm/ttm/ttm_range_manager.c   |  21 ++--
 drivers/gpu/drm/ttm/ttm_resource.c|   8 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c|  46 
 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c|  30 +++---
 include/drm/ttm/ttm_placement.h   |  12 +--
 25 files changed, 293 insertions(+), 305 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 44367f03316f..5b5104e724e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -131,11 +131,12 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
goto err_free;
}
 
-   if (place->lpfn) {
+   if (place->res_end) {
spin_lock(>lock);
r = drm_mm_insert_node_in_range(>mm, >mm_nodes[0],
-   num_pages, tbo->page_alignment,
-   0, place->fpfn, place->lpfn,
+   num_pages, tbo->page_alignment, 
0,
+   place->res_start << PAGE_SHIFT,
+   place->res_end << PAGE_SHIFT,
DRM_MM_INSERT_BEST);
spin_unlock(>lock);
if (unlikely(r))
@@ -219,7 +220,7 @@ static bool amdgpu_gtt_mgr_intersects(struct 
ttm_resource_manager *man,
  const struct ttm_place *place,
  size_t size)
 {
-   return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
+   return !place->res_end || amdgpu_gtt_mgr_has_gart_addr(res);
 }
 
 /**
@@ -237,7 +238,7 @@ static bool amdgpu_gtt_mgr_compatible(struct 
ttm_resource_manager *man,
  const struct ttm_place *place,
  size_t size)
 {
-   return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
+   return !place->res_end || amdgpu_gtt_mgr_has_gart_addr(res);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 283e8fe608ce..2926389e21d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -130,15 +130,15 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo 
*abo, u32 domain)
u32 c = 0;
 
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
-   unsigned visible_pfn = adev->gmc.visible_vram_size >> 
PAGE_SHIFT;
+   u64 visible_pfn = adev->gmc.visible_vram_size;
 
-   places[c].fpfn = 0;
-   places[c].lpfn = 0;
-   places[c].mem_type = TTM_PL_VRAM;
+   places[c].res_start = 0;
+   places[c].res_end = 0;
+   places[c].res_type = TTM_PL_VRAM;
places[c].flags = 0;
 
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
-   places[c].lpfn = visible_pfn;
+   places[c].res_end = visible_pfn;
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 
@@ -148,9 +148,9 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, 
u32 domain)
}
 
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
-   places[c].fpfn = 0;
-   places[c].lpfn = 0;
-   places[c].m

[PATCH 6/6] drm/amdgpu: Cleanup the GDS, GWS and OA allocations

2023-02-08 Thread Somalapuram Amaranath
Change the size of GDS, GWS and OA from pages to bytes.
The initialized gds_size, gws_size and oa_size in bytes,
remove PAGE_SHIFT in amdgpu_ttm_init_on_chip().
:
Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c| 12 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c|  3 +--
 3 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index c3d9d75143f4..4641b25956fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -142,16 +142,16 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, 
struct amdgpu_bo *gds,
  struct amdgpu_bo *gws, struct amdgpu_bo *oa)
 {
if (gds) {
-   job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
-   job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
+   job->gds_base = amdgpu_bo_gpu_offset(gds);
+   job->gds_size = amdgpu_bo_size(gds);
}
if (gws) {
-   job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
-   job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
+   job->gws_base = amdgpu_bo_gpu_offset(gws);
+   job->gws_size = amdgpu_bo_size(gws);
}
if (oa) {
-   job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
-   job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+   job->oa_base = amdgpu_bo_gpu_offset(oa);
+   job->oa_size = amdgpu_bo_size(oa);
}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index f5d5eee09cea..9285037d6d88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -541,12 +541,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
-   size <<= PAGE_SHIFT;
 
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
-   size = ALIGN(size, 4) << PAGE_SHIFT;
+   size = ALIGN(size, 4);
} else {
/* Memory should be aligned at least to a page size. */
page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index f0dabdfd3780..a8e444a31d8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -77,8 +77,7 @@ static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
uint64_t size)
 {
-   return ttm_range_man_init(>mman.bdev, type,
- false, size << PAGE_SHIFT);
+   return ttm_range_man_init(>mman.bdev, type, false, size);
 }
 
 /**
-- 
2.32.0



[PATCH 5/6] drm/ttm: Change the meaning of the fields in the drm_mm_nodes structure from pfn to bytes

2023-02-08 Thread Somalapuram Amaranath
Change the ttm_range_man_alloc() allocation from pages to size in bytes.
Fix the dependent drm_mm_nodes start and size from pages to bytes.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/i915/i915_scatterlist.c |  6 +++---
 drivers/gpu/drm/ttm/ttm_range_manager.c | 15 +++
 2 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c 
b/drivers/gpu/drm/i915/i915_scatterlist.c
index 756289e43dff..7defda1219d0 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -94,7 +94,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct 
drm_mm_node *node,
if (!rsgt)
return ERR_PTR(-ENOMEM);
 
-   i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
+   i915_refct_sgt_init(rsgt, node->size);
st = >table;
/* restricted by sg_alloc_table */
if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages),
@@ -110,8 +110,8 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct 
drm_mm_node *node,
sg = st->sgl;
st->nents = 0;
prev_end = (resource_size_t)-1;
-   block_size = node->size << PAGE_SHIFT;
-   offset = node->start << PAGE_SHIFT;
+   block_size = node->size;
+   offset = node->start;
 
while (block_size) {
u64 len;
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c 
b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 62fddcc59f02..ff9962f7f81d 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -83,9 +83,10 @@ static int ttm_range_man_alloc(struct ttm_resource_manager 
*man,
 
spin_lock(>lock);
ret = drm_mm_insert_node_in_range(mm, >mm_nodes[0],
- PFN_UP(node->base.size),
- bo->page_alignment, 0,
- place->fpfn, lpfn, mode);
+ node->base.size,
+ bo->page_alignment << PAGE_SHIFT, 0,
+ place->fpfn << PAGE_SHIFT,
+ lpfn << PAGE_SHIFT, mode);
spin_unlock(>lock);
 
if (unlikely(ret)) {
@@ -119,11 +120,10 @@ static bool ttm_range_man_intersects(struct 
ttm_resource_manager *man,
 size_t size)
 {
struct drm_mm_node *node = _ttm_range_mgr_node(res)->mm_nodes[0];
-   u32 num_pages = PFN_UP(size);
 
/* Don't evict BOs outside of the requested placement range */
-   if (place->fpfn >= (node->start + num_pages) ||
-   (place->lpfn && place->lpfn <= node->start))
+   if ((place->fpfn << PAGE_SHIFT) >= (node->start + size) ||
+   (place->lpfn && (place->lpfn << PAGE_SHIFT) <= node->start))
return false;
 
return true;
@@ -135,10 +135,9 @@ static bool ttm_range_man_compatible(struct 
ttm_resource_manager *man,
 size_t size)
 {
struct drm_mm_node *node = _ttm_range_mgr_node(res)->mm_nodes[0];
-   u32 num_pages = PFN_UP(size);
 
if (node->start < place->fpfn ||
-   (place->lpfn && (node->start + num_pages) > place->lpfn))
+   (place->lpfn && (node->start + size) > place->lpfn << PAGE_SHIFT))
return false;
 
return true;
-- 
2.32.0



[PATCH 4/6] drm/ttm: Change the parameters of ttm_range_man_init() from pages to bytes

2023-02-08 Thread Somalapuram Amaranath
Change the parameters of ttm_range_man_init_nocheck()
size from page size to byte size.
Cleanup the PAGE_SHIFT operation on the depended caller functions.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++--
 drivers/gpu/drm/drm_gem_vram_helper.c   | 2 +-
 drivers/gpu/drm/radeon/radeon_ttm.c | 4 ++--
 drivers/gpu/drm/ttm/ttm_range_manager.c | 8 
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 2 +-
 include/drm/ttm/ttm_range_manager.h | 6 +++---
 6 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6b270d4662a3..f0dabdfd3780 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -75,10 +75,10 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device 
*bdev,
 
 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
-   uint64_t size_in_page)
+   uint64_t size)
 {
return ttm_range_man_init(>mman.bdev, type,
- false, size_in_page);
+ false, size << PAGE_SHIFT);
 }
 
 /**
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c 
b/drivers/gpu/drm/drm_gem_vram_helper.c
index e7be562790de..db1915414e4a 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -999,7 +999,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct 
drm_device *dev,
return ret;
 
ret = ttm_range_man_init(>bdev, TTM_PL_VRAM,
-false, vram_size >> PAGE_SHIFT);
+false, vram_size);
if (ret)
return ret;
 
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c 
b/drivers/gpu/drm/radeon/radeon_ttm.c
index 777d38b211d2..aa8785b6b1e8 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -70,13 +70,13 @@ struct radeon_device *radeon_get_rdev(struct ttm_device 
*bdev)
 static int radeon_ttm_init_vram(struct radeon_device *rdev)
 {
return ttm_range_man_init(>mman.bdev, TTM_PL_VRAM,
- false, rdev->mc.real_vram_size >> PAGE_SHIFT);
+ false, rdev->mc.real_vram_size);
 }
 
 static int radeon_ttm_init_gtt(struct radeon_device *rdev)
 {
return ttm_range_man_init(>mman.bdev, TTM_PL_TT,
- true, rdev->mc.gtt_size >> PAGE_SHIFT);
+ true, rdev->mc.gtt_size);
 }
 
 static void radeon_evict_flags(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c 
b/drivers/gpu/drm/ttm/ttm_range_manager.c
index ae11d07eb63a..62fddcc59f02 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -169,7 +169,7 @@ static const struct ttm_resource_manager_func 
ttm_range_manager_func = {
  * @bdev: ttm device
  * @type: memory manager type
  * @use_tt: if the memory manager uses tt
- * @p_size: size of area to be managed in pages.
+ * @size: size of area to be managed in bytes.
  *
  * The range manager is installed for this device in the type slot.
  *
@@ -177,7 +177,7 @@ static const struct ttm_resource_manager_func 
ttm_range_manager_func = {
  */
 int ttm_range_man_init_nocheck(struct ttm_device *bdev,
   unsigned type, bool use_tt,
-  unsigned long p_size)
+  u64 size)
 {
struct ttm_resource_manager *man;
struct ttm_range_manager *rman;
@@ -191,9 +191,9 @@ int ttm_range_man_init_nocheck(struct ttm_device *bdev,
 
man->func = _range_manager_func;
 
-   ttm_resource_manager_init(man, bdev, p_size);
+   ttm_resource_manager_init(man, bdev, size);
 
-   drm_mm_init(>mm, 0, p_size);
+   drm_mm_init(>mm, 0, size);
spin_lock_init(>lock);
 
ttm_set_driver_manager(bdev, type, >manager);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9ad28346aff7..4926e7c73e75 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -700,7 +700,7 @@ static int vmw_vram_manager_init(struct vmw_private 
*dev_priv)
 {
int ret;
ret = ttm_range_man_init(_priv->bdev, TTM_PL_VRAM, false,
-dev_priv->vram_size >> PAGE_SHIFT);
+dev_priv->vram_size);
ttm_resource_manager_set_used(ttm_manager_type(_priv->bdev, 
TTM_PL_VRAM), false);
return ret;
 }
diff --git a/include/drm/ttm/ttm_range_manager.h 
b/include/drm/ttm/ttm_range_manager.h
index 7963b957e9ef..05bffded1b53 100644
--- a/include/drm/ttm/ttm_range_manager.h
+++ b/include/drm/ttm/ttm_range_man

[PATCH 3/6] drm/ttm: Change the meaning of resource->start from pfn to bytes

2023-02-08 Thread Somalapuram Amaranath
Change resource->start from pfn to bytes to
allow allocating objects smaller than a page.
Change all DRM drivers using ttm_resource start and size pfn to bytes.
Change amdgpu_res_first() cur->start, cur->size from pfn to bytes.
Replacing ttm_resource resource->start field with cursor.start.
Change amdgpu_gtt_mgr_new() allocation from pfn to bytes.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 13 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c  |  4 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h  |  8 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 +++---
 .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c   |  6 +-
 drivers/gpu/drm/drm_gem_vram_helper.c   |  2 +-
 drivers/gpu/drm/nouveau/nouveau_bo.c| 13 ++---
 drivers/gpu/drm/nouveau/nouveau_bo0039.c|  4 ++--
 drivers/gpu/drm/nouveau/nouveau_mem.c   | 10 +-
 drivers/gpu/drm/nouveau/nouveau_ttm.c   |  2 +-
 drivers/gpu/drm/nouveau/nv17_fence.c|  2 +-
 drivers/gpu/drm/nouveau/nv50_fence.c|  2 +-
 drivers/gpu/drm/qxl/qxl_drv.h   |  2 +-
 drivers/gpu/drm/qxl/qxl_object.c|  2 +-
 drivers/gpu/drm/qxl/qxl_ttm.c   |  5 ++---
 drivers/gpu/drm/radeon/radeon_object.c  |  6 +++---
 drivers/gpu/drm/radeon/radeon_object.h  |  2 +-
 drivers/gpu/drm/radeon/radeon_ttm.c | 13 ++---
 drivers/gpu/drm/radeon/radeon_vm.c  |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c  |  4 ++--
 drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c  |  3 +--
 23 files changed, 63 insertions(+), 56 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 44367f03316f..a1fbfc5984d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -116,7 +116,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
  struct ttm_resource **res)
 {
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-   uint32_t num_pages = PFN_UP(tbo->base.size);
struct ttm_range_mgr_node *node;
int r;
 
@@ -134,8 +133,10 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
if (place->lpfn) {
spin_lock(>lock);
r = drm_mm_insert_node_in_range(>mm, >mm_nodes[0],
-   num_pages, tbo->page_alignment,
-   0, place->fpfn, place->lpfn,
+   tbo->base.size,
+   tbo->page_alignment << 
PAGE_SHIFT, 0,
+   place->fpfn << PAGE_SHIFT,
+   place->lpfn << PAGE_SHIFT,
DRM_MM_INSERT_BEST);
spin_unlock(>lock);
if (unlikely(r))
@@ -144,7 +145,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
-   node->mm_nodes[0].size = PFN_UP(node->base.size);
+   node->mm_nodes[0].size = node->base.size;
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
 
@@ -285,8 +286,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, 
uint64_t gtt_size)
 
ttm_resource_manager_init(man, >mman.bdev, gtt_size);
 
-   start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
-   size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
+   start = (AMDGPU_GTT_MAX_TRANSFER_SIZE * 
AMDGPU_GTT_NUM_TRANSFER_WINDOWS) << PAGE_SHIFT;
+   size = adev->gmc.gart_size - start;
drm_mm_init(>mm, start, size);
spin_lock_init(>lock);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index d835ee2131d2..f5d5eee09cea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1488,9 +1488,11 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+   struct amdgpu_res_cursor cursor;
uint64_t offset;
 
-   offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+   amdgpu_res_first(bo->tbo.resource, 0, bo->tbo.resource->size, );
+   of

[PATCH 2/6] drm/amdgpu: Remove TTM resource->start visible VRAM condition

2023-02-08 Thread Somalapuram Amaranath
Use amdgpu_bo_in_cpu_visible_vram() instead.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 9 +++--
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 981010de0a28..d835ee2131d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -600,7 +600,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 
if (!amdgpu_gmc_vram_full_visible(>gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
-   bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+   amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 ctx.bytes_moved);
else
@@ -1346,7 +1346,6 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
-   unsigned long offset;
int r;
 
/* Remember that this BO was accessed by the CPU */
@@ -1355,8 +1354,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
 
-   offset = bo->resource->start << PAGE_SHIFT;
-   if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
+   if (amdgpu_bo_in_cpu_visible_vram(abo))
return 0;
 
/* Can't move a pinned BO to visible VRAM */
@@ -1378,10 +1376,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
 
-   offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
-   (offset + bo->base.size) > adev->gmc.visible_vram_size)
+   amdgpu_bo_in_cpu_visible_vram(abo))
return VM_FAULT_SIGBUS;
 
ttm_bo_move_to_lru_tail_unlocked(bo);
-- 
2.32.0



[PATCH 1/6] drm/gem: Remove BUG_ON in drm_gem_private_object_init

2023-02-08 Thread Somalapuram Amaranath
ttm_resource can allocate size in bytes to support less than page size.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/drm_gem.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 59a0bb5ebd85..ee8b5c2b6c60 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -152,8 +152,6 @@ EXPORT_SYMBOL(drm_gem_object_init);
 void drm_gem_private_object_init(struct drm_device *dev,
 struct drm_gem_object *obj, size_t size)
 {
-   BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
obj->dev = dev;
obj->filp = NULL;
 
-- 
2.32.0



[PATCH v4 4/4] drm/amdgpu: Cleanup PAGE_SHIFT operation

2023-01-25 Thread Somalapuram Amaranath
Cleaning up page shift operations.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a97e8236bde9..ffe6a1ab7f9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -930,7 +930,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
-   bo->resource->start = addr >> PAGE_SHIFT;
+   bo->resource->start = addr;
return 0;
}
 
-- 
2.32.0



[PATCH v4 3/4] drm/amdgpu: Movie the amdgpu_gtt_mgr start and size from pages to bytes

2023-01-25 Thread Somalapuram Amaranath
To support GTT manager amdgpu_res_first, amdgpu_res_next
from pages to bytes and clean up PAGE_SHIFT operation.
Change the GTT manager init and allocate from pages to bytes
v1 -> v2: reorder patch sequence
v3 -> v4: reorder patch sequence

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c| 13 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h |  8 
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 44367f03316f..a1fbfc5984d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -116,7 +116,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
  struct ttm_resource **res)
 {
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-   uint32_t num_pages = PFN_UP(tbo->base.size);
struct ttm_range_mgr_node *node;
int r;
 
@@ -134,8 +133,10 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
if (place->lpfn) {
spin_lock(>lock);
r = drm_mm_insert_node_in_range(>mm, >mm_nodes[0],
-   num_pages, tbo->page_alignment,
-   0, place->fpfn, place->lpfn,
+   tbo->base.size,
+   tbo->page_alignment << 
PAGE_SHIFT, 0,
+   place->fpfn << PAGE_SHIFT,
+   place->lpfn << PAGE_SHIFT,
DRM_MM_INSERT_BEST);
spin_unlock(>lock);
if (unlikely(r))
@@ -144,7 +145,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
-   node->mm_nodes[0].size = PFN_UP(node->base.size);
+   node->mm_nodes[0].size = node->base.size;
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
 
@@ -285,8 +286,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, 
uint64_t gtt_size)
 
ttm_resource_manager_init(man, >mman.bdev, gtt_size);
 
-   start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
-   size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
+   start = (AMDGPU_GTT_MAX_TRANSFER_SIZE * 
AMDGPU_GTT_NUM_TRANSFER_WINDOWS) << PAGE_SHIFT;
+   size = adev->gmc.gart_size - start;
drm_mm_init(>mm, start, size);
spin_lock_init(>lock);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 5c4f93ee0c57..5c78f0b09351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -94,8 +94,8 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
 
-   cur->start = (node->start << PAGE_SHIFT) + start;
-   cur->size = min((node->size << PAGE_SHIFT) - start, size);
+   cur->start = node->start + start;
+   cur->size = min(node->size - start, size);
cur->remaining = size;
cur->node = node;
break;
@@ -155,8 +155,8 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor 
*cur, uint64_t size)
node = cur->node;
 
cur->node = ++node;
-   cur->start = node->start << PAGE_SHIFT;
-   cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+   cur->start = node->start;
+   cur->size = min(node->size, cur->remaining);
break;
default:
return;
-- 
2.32.0



[PATCH v4 2/4] drm/amdkfd: Use cursor start instead of ttm resource start

2023-01-25 Thread Somalapuram Amaranath
cleanup PAGE_SHIFT operation and replacing
ttm_resource resource->start with cursor start
using amdgpu_res_first API.
v1 -> v2: reorder patch sequence
v2 -> v3: addressing review comment v2

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c06ada0844ba..9114393d2ee6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -200,8 +200,12 @@ static int add_queue_mes(struct device_queue_manager *dqm, 
struct queue *q,
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
 
if (q->wptr_bo) {
+   struct amdgpu_res_cursor cursor;
+
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE 
- 1);
-   queue_input.wptr_mc_addr = 
((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
+   amdgpu_res_first(q->wptr_bo->tbo.resource, 0,
+q->wptr_bo->tbo.resource->size, );
+   queue_input.wptr_mc_addr = cursor.start + wptr_addr_off;
}
 
queue_input.is_kfd_process = 1;
-- 
2.32.0



[PATCH v4 1/4] drm/amdgpu: Use cursor start instead of ttm resource start

2023-01-25 Thread Somalapuram Amaranath
cleanup PAGE_SHIFT operation and replacing
ttm_resource resource->start with cursor start
using amdgpu_res_first API.
v1 -> v2: reorder patch sequence
v2 -> v3: addressing review comment v2
v3 -> v4: addressing review comment v3

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c| 8 ++--
 2 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25a68de0..2a74039c82eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1491,9 +1491,11 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+   struct amdgpu_res_cursor cursor;
uint64_t offset;
 
-   offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+   amdgpu_res_first(bo->tbo.resource, 0, bo->tbo.resource->size, );
+   offset = cursor.start +
 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
 
return amdgpu_gmc_sign_extend(offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c5ef7f7bdc15..a97e8236bde9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -849,6 +849,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
 {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+   struct amdgpu_res_cursor cursor;
uint64_t flags;
int r;
 
@@ -896,7 +897,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
 
/* bind pages into GART page tables */
-   gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+   amdgpu_res_first(bo_mem, 0, bo_mem->size, );
+   gtt->offset = cursor.start;
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
 gtt->ttm.dma_address, flags);
gtt->bound = true;
@@ -916,6 +918,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
+   struct amdgpu_res_cursor cursor;
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -949,7 +952,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
 
/* Bind pages */
-   gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+   amdgpu_res_first(tmp, 0, tmp->size, );
+   gtt->offset = cursor.start;
amdgpu_ttm_gart_bind(adev, bo, flags);
amdgpu_gart_invalidate_tlb(adev);
ttm_resource_free(bo, >resource);
-- 
2.32.0



[PATCH v3 4/4] drm/amdgpu: Support allocate of amdgpu_gtt_mgr from pages to bytes

2023-01-25 Thread Somalapuram Amaranath
Change the GTT manager init and allocate from pages to bytes
v1 -> v2: reorder patch sequence

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 44367f03316f..a1fbfc5984d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -116,7 +116,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
  struct ttm_resource **res)
 {
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-   uint32_t num_pages = PFN_UP(tbo->base.size);
struct ttm_range_mgr_node *node;
int r;
 
@@ -134,8 +133,10 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
if (place->lpfn) {
spin_lock(>lock);
r = drm_mm_insert_node_in_range(>mm, >mm_nodes[0],
-   num_pages, tbo->page_alignment,
-   0, place->fpfn, place->lpfn,
+   tbo->base.size,
+   tbo->page_alignment << 
PAGE_SHIFT, 0,
+   place->fpfn << PAGE_SHIFT,
+   place->lpfn << PAGE_SHIFT,
DRM_MM_INSERT_BEST);
spin_unlock(>lock);
if (unlikely(r))
@@ -144,7 +145,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
-   node->mm_nodes[0].size = PFN_UP(node->base.size);
+   node->mm_nodes[0].size = node->base.size;
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
 
@@ -285,8 +286,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, 
uint64_t gtt_size)
 
ttm_resource_manager_init(man, >mman.bdev, gtt_size);
 
-   start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
-   size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
+   start = (AMDGPU_GTT_MAX_TRANSFER_SIZE * 
AMDGPU_GTT_NUM_TRANSFER_WINDOWS) << PAGE_SHIFT;
+   size = adev->gmc.gart_size - start;
drm_mm_init(>mm, start, size);
spin_lock_init(>lock);
 
-- 
2.32.0



[PATCH v3 3/4] drm/amdgpu: Movie the amdgpu_gtt_mgr start and size from pages to bytes

2023-01-25 Thread Somalapuram Amaranath
To support GTT manager amdgpu_res_first, amdgpu_res_next
from pages to bytes and clean up PAGE_SHIFT operation.
v1 -> v2: reorder patch sequence

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 5c4f93ee0c57..5c78f0b09351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -94,8 +94,8 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
 
-   cur->start = (node->start << PAGE_SHIFT) + start;
-   cur->size = min((node->size << PAGE_SHIFT) - start, size);
+   cur->start = node->start + start;
+   cur->size = min(node->size - start, size);
cur->remaining = size;
cur->node = node;
break;
@@ -155,8 +155,8 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor 
*cur, uint64_t size)
node = cur->node;
 
cur->node = ++node;
-   cur->start = node->start << PAGE_SHIFT;
-   cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+   cur->start = node->start;
+   cur->size = min(node->size, cur->remaining);
break;
default:
return;
-- 
2.32.0



[PATCH v3 2/4] drm/amdkfd: Use cursor start instead of ttm resource start

2023-01-25 Thread Somalapuram Amaranath
cleanup PAGE_SHIFT operation and replacing
ttm_resource resource->start with cursor start
using amdgpu_res_first API.
v1 -> v2: reorder patch sequence
v2 -> v3: addressing review comment v2

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c06ada0844ba..9114393d2ee6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -200,8 +200,12 @@ static int add_queue_mes(struct device_queue_manager *dqm, 
struct queue *q,
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
 
if (q->wptr_bo) {
+   struct amdgpu_res_cursor cursor;
+
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE 
- 1);
-   queue_input.wptr_mc_addr = 
((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
+   amdgpu_res_first(q->wptr_bo->tbo.resource, 0,
+q->wptr_bo->tbo.resource->size, );
+   queue_input.wptr_mc_addr = cursor.start + wptr_addr_off;
}
 
queue_input.is_kfd_process = 1;
-- 
2.32.0



[PATCH v3 1/4] drm/amdgpu: Use cursor start instead of ttm resource start

2023-01-25 Thread Somalapuram Amaranath
cleanup PAGE_SHIFT operation and replacing
ttm_resource resource->start with cursor start
using amdgpu_res_first API.
v1 -> v2: reorder patch sequence
v2 -> v3: addressing review comment v2

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  4 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c| 10 +++---
 2 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25a68de0..2a74039c82eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1491,9 +1491,11 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+   struct amdgpu_res_cursor cursor;
uint64_t offset;
 
-   offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+   amdgpu_res_first(bo->tbo.resource, 0, bo->tbo.resource->size, );
+   offset = cursor.start +
 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
 
return amdgpu_gmc_sign_extend(offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c5ef7f7bdc15..ffe6a1ab7f9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -849,6 +849,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
 {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+   struct amdgpu_res_cursor cursor;
uint64_t flags;
int r;
 
@@ -896,7 +897,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
 
/* bind pages into GART page tables */
-   gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+   amdgpu_res_first(bo_mem, 0, bo_mem->size, );
+   gtt->offset = cursor.start;
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
 gtt->ttm.dma_address, flags);
gtt->bound = true;
@@ -916,6 +918,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
+   struct amdgpu_res_cursor cursor;
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -927,7 +930,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
-   bo->resource->start = addr >> PAGE_SHIFT;
+   bo->resource->start = addr;
return 0;
}
 
@@ -949,7 +952,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
 
/* Bind pages */
-   gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+   amdgpu_res_first(tmp, 0, tmp->size, );
+   gtt->offset = cursor.start;
amdgpu_ttm_gart_bind(adev, bo, flags);
amdgpu_gart_invalidate_tlb(adev);
ttm_resource_free(bo, >resource);
-- 
2.32.0



[PATCH v2 4/4] drm/amdgpu: Support allocate of amdgpu_gtt_mgr from pages to bytes

2023-01-25 Thread Somalapuram Amaranath
Change the GTT manager init and allocate from pages to bytes
v1 -> v2: reorder patch sequence

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 44367f03316f..a1fbfc5984d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -116,7 +116,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
  struct ttm_resource **res)
 {
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-   uint32_t num_pages = PFN_UP(tbo->base.size);
struct ttm_range_mgr_node *node;
int r;
 
@@ -134,8 +133,10 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
if (place->lpfn) {
spin_lock(>lock);
r = drm_mm_insert_node_in_range(>mm, >mm_nodes[0],
-   num_pages, tbo->page_alignment,
-   0, place->fpfn, place->lpfn,
+   tbo->base.size,
+   tbo->page_alignment << 
PAGE_SHIFT, 0,
+   place->fpfn << PAGE_SHIFT,
+   place->lpfn << PAGE_SHIFT,
DRM_MM_INSERT_BEST);
spin_unlock(>lock);
if (unlikely(r))
@@ -144,7 +145,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
-   node->mm_nodes[0].size = PFN_UP(node->base.size);
+   node->mm_nodes[0].size = node->base.size;
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
 
@@ -285,8 +286,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, 
uint64_t gtt_size)
 
ttm_resource_manager_init(man, >mman.bdev, gtt_size);
 
-   start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
-   size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
+   start = (AMDGPU_GTT_MAX_TRANSFER_SIZE * 
AMDGPU_GTT_NUM_TRANSFER_WINDOWS) << PAGE_SHIFT;
+   size = adev->gmc.gart_size - start;
drm_mm_init(>mm, start, size);
spin_lock_init(>lock);
 
-- 
2.32.0



[PATCH v2 2/4] drm/amdkfd: Use cursor start instead of ttm resource start

2023-01-25 Thread Somalapuram Amaranath
cleanup PAGE_SHIFT operation and replacing
ttm_resource resource->start with cursor start
using amdgpu_res_first API
v1 -> v2: reorder patch sequence

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c06ada0844ba..f87ce4f1cb93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -200,8 +200,11 @@ static int add_queue_mes(struct device_queue_manager *dqm, 
struct queue *q,
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
 
if (q->wptr_bo) {
+   struct amdgpu_res_cursor cursor;
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE 
- 1);
-   queue_input.wptr_mc_addr = 
((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
+   amdgpu_res_first(q->wptr_bo->tbo.resource, 0,
+q->wptr_bo->tbo.resource->size, );
+   queue_input.wptr_mc_addr = cursor.start + wptr_addr_off;
}
 
queue_input.is_kfd_process = 1;
-- 
2.32.0



[PATCH v2 3/4] drm/amdgpu: Movie the amdgpu_gtt_mgr start and size from pages to bytes

2023-01-25 Thread Somalapuram Amaranath
To support GTT manager amdgpu_res_first, amdgpu_res_next
from pages to bytes and clean up PAGE_SHIFT operation.
v1 -> v2: reorder patch sequence

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 5c4f93ee0c57..5c78f0b09351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -94,8 +94,8 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
 
-   cur->start = (node->start << PAGE_SHIFT) + start;
-   cur->size = min((node->size << PAGE_SHIFT) - start, size);
+   cur->start = node->start + start;
+   cur->size = min(node->size - start, size);
cur->remaining = size;
cur->node = node;
break;
@@ -155,8 +155,8 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor 
*cur, uint64_t size)
node = cur->node;
 
cur->node = ++node;
-   cur->start = node->start << PAGE_SHIFT;
-   cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+   cur->start = node->start;
+   cur->size = min(node->size, cur->remaining);
break;
default:
return;
-- 
2.32.0



[PATCH v2 1/4] drm/amdgpu: Use cursor start instead of ttm resource start

2023-01-25 Thread Somalapuram Amaranath
cleanup PAGE_SHIFT operation and replacing
ttm_resource resource->start with cursor start
using amdgpu_res_first API.
v1 -> v2: reorder patch sequence

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 11 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c| 10 +++---
 2 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25a68de0..2ab67ab204df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1346,6 +1346,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+   struct amdgpu_res_cursor cursor;
unsigned long offset;
int r;
 
@@ -1355,7 +1356,8 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
 
-   offset = bo->resource->start << PAGE_SHIFT;
+   amdgpu_res_first(bo->resource, 0, bo->resource->size, );
+   offset = cursor.start;
if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
return 0;
 
@@ -1378,7 +1380,8 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
 
-   offset = bo->resource->start << PAGE_SHIFT;
+   amdgpu_res_first(bo->resource, 0, bo->resource->size, );
+   offset = cursor.start;
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
(offset + bo->base.size) > adev->gmc.visible_vram_size)
@@ -1491,9 +1494,11 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+   struct amdgpu_res_cursor cursor;
uint64_t offset;
 
-   offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+   amdgpu_res_first(bo->tbo.resource, 0, bo->tbo.resource->size, );
+   offset = cursor.start +
 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
 
return amdgpu_gmc_sign_extend(offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c5ef7f7bdc15..ffe6a1ab7f9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -849,6 +849,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
 {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+   struct amdgpu_res_cursor cursor;
uint64_t flags;
int r;
 
@@ -896,7 +897,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
 
/* bind pages into GART page tables */
-   gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+   amdgpu_res_first(bo_mem, 0, bo_mem->size, );
+   gtt->offset = cursor.start;
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
 gtt->ttm.dma_address, flags);
gtt->bound = true;
@@ -916,6 +918,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
+   struct amdgpu_res_cursor cursor;
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -927,7 +930,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
-   bo->resource->start = addr >> PAGE_SHIFT;
+   bo->resource->start = addr;
return 0;
}
 
@@ -949,7 +952,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
 
/* Bind pages */
-   gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+   amdgpu_res_first(tmp, 0, tmp->size, );
+   gtt->offset = cursor.start;
amdgpu_ttm_gart_bind(adev, bo, flags);
amdgpu_gart_invalidate_tlb(adev);
ttm_resource_free(bo, >resource);
-- 
2.32.0



[PATCH v4 4/4] drm/i915: Clean up page shift operation

2023-01-25 Thread Somalapuram Amaranath
Remove page shift operations as ttm_resource moved
from num_pages to size_t size in bytes.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/i915/i915_scatterlist.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c 
b/drivers/gpu/drm/i915/i915_scatterlist.c
index 114e5e39aa72..bd7aaf7738f4 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -94,7 +94,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct 
drm_mm_node *node,
if (!rsgt)
return ERR_PTR(-ENOMEM);
 
-   i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
+   i915_refct_sgt_init(rsgt, node->size);
st = >table;
if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
   GFP_KERNEL)) {
@@ -105,8 +105,8 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct 
drm_mm_node *node,
sg = st->sgl;
st->nents = 0;
prev_end = (resource_size_t)-1;
-   block_size = node->size << PAGE_SHIFT;
-   offset = node->start << PAGE_SHIFT;
+   block_size = node->size;
+   offset = node->start;
 
while (block_size) {
u64 len;
-- 
2.32.0



[PATCH v4 3/4] drm/amdgpu: GDS/GWS/OA cleanup the page shift operation

2023-01-25 Thread Somalapuram Amaranath
Remove page shift operations as ttm_resource moved
from num_pages to size_t size in bytes.
v1 – v4: adding missing related to amdgpu_ttm_init_on_chip

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c| 12 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c|  6 +++---
 3 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 9e549923622b..2732d89c8468 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -142,16 +142,16 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, 
struct amdgpu_bo *gds,
  struct amdgpu_bo *gws, struct amdgpu_bo *oa)
 {
if (gds) {
-   job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
-   job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
+   job->gds_base = amdgpu_bo_gpu_offset(gds);
+   job->gds_size = amdgpu_bo_size(gds);
}
if (gws) {
-   job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
-   job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
+   job->gws_base = amdgpu_bo_gpu_offset(gws);
+   job->gws_size = amdgpu_bo_size(gws);
}
if (oa) {
-   job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
-   job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+   job->oa_base = amdgpu_bo_gpu_offset(oa);
+   job->oa_size = amdgpu_bo_size(oa);
}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2ab67ab204df..bbd0a4550fbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -541,12 +541,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
-   size <<= PAGE_SHIFT;
 
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
-   size = ALIGN(size, 4) << PAGE_SHIFT;
+   size = ALIGN(size, 4);
} else {
/* Memory should be aligned at least to a page size. */
page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ffe6a1ab7f9a..c1500875b4ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1849,19 +1849,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
 
/* Initialize various on-chip memory pools */
-   r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
+   r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size << 
PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GDS heap.\n");
return r;
}
 
-   r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
+   r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size << 
PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing gws heap.\n");
return r;
}
 
-   r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
+   r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size << 
PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing oa heap.\n");
return r;
-- 
2.32.0



[PATCH v4 2/4] drm/ttm: Clean up page shift operation

2023-01-25 Thread Somalapuram Amaranath
Remove page shift operations as ttm_resource moved
from num_pages to size_t size in bytes.
v1 -> v2: fix missing page shift to fpfn and lpfn
v2 -> v3: separate patches based on driver module

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/ttm/ttm_range_manager.c | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c 
b/drivers/gpu/drm/ttm/ttm_range_manager.c
index ae11d07eb63a..3703cbc6d368 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -83,9 +83,10 @@ static int ttm_range_man_alloc(struct ttm_resource_manager 
*man,
 
spin_lock(>lock);
ret = drm_mm_insert_node_in_range(mm, >mm_nodes[0],
- PFN_UP(node->base.size),
+ node->base.size,
  bo->page_alignment, 0,
- place->fpfn, lpfn, mode);
+ place->fpfn << PAGE_SHIFT,
+ lpfn << PAGE_SHIFT, mode);
spin_unlock(>lock);
 
if (unlikely(ret)) {
@@ -119,11 +120,10 @@ static bool ttm_range_man_intersects(struct 
ttm_resource_manager *man,
 size_t size)
 {
struct drm_mm_node *node = _ttm_range_mgr_node(res)->mm_nodes[0];
-   u32 num_pages = PFN_UP(size);
 
/* Don't evict BOs outside of the requested placement range */
-   if (place->fpfn >= (node->start + num_pages) ||
-   (place->lpfn && place->lpfn <= node->start))
+   if ((place->fpfn << PAGE_SHIFT) >= (node->start + size) ||
+   (place->lpfn && (place->lpfn << PAGE_SHIFT) <= node->start))
return false;
 
return true;
@@ -135,10 +135,9 @@ static bool ttm_range_man_compatible(struct 
ttm_resource_manager *man,
 size_t size)
 {
struct drm_mm_node *node = _ttm_range_mgr_node(res)->mm_nodes[0];
-   u32 num_pages = PFN_UP(size);
 
if (node->start < place->fpfn ||
-   (place->lpfn && (node->start + num_pages) > place->lpfn))
+   (place->lpfn && (node->start + size) > place->lpfn << PAGE_SHIFT))
return false;
 
return true;
-- 
2.32.0



[PATCH v4 1/4] drm/gem: Remove BUG_ON in drm_gem_private_object_init

2023-01-25 Thread Somalapuram Amaranath
ttm_resource can allocate size in bytes to support less than page size

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/drm_gem.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 59a0bb5ebd85..ee8b5c2b6c60 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -152,8 +152,6 @@ EXPORT_SYMBOL(drm_gem_object_init);
 void drm_gem_private_object_init(struct drm_device *dev,
 struct drm_gem_object *obj, size_t size)
 {
-   BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
obj->dev = dev;
obj->filp = NULL;
 
-- 
2.32.0



[PATCH 4/4] drm/amdkfd: Use cursor start instead of ttm resource start

2023-01-25 Thread Somalapuram Amaranath
cleanup PAGE_SHIFT operation and replacing
ttm_resource resource->start with cursor start
using amdgpu_res_first API

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c06ada0844ba..f87ce4f1cb93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -200,8 +200,11 @@ static int add_queue_mes(struct device_queue_manager *dqm, 
struct queue *q,
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
 
if (q->wptr_bo) {
+   struct amdgpu_res_cursor cursor;
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE 
- 1);
-   queue_input.wptr_mc_addr = 
((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
+   amdgpu_res_first(q->wptr_bo->tbo.resource, 0,
+q->wptr_bo->tbo.resource->size, );
+   queue_input.wptr_mc_addr = cursor.start + wptr_addr_off;
}
 
queue_input.is_kfd_process = 1;
-- 
2.32.0



[PATCH 3/4] drm/amdgpu: Use cursor start instead of ttm resource start

2023-01-25 Thread Somalapuram Amaranath
cleanup PAGE_SHIFT operation and replacing
ttm_resource resource->start with cursor start
using amdgpu_res_first API.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 11 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c| 10 +++---
 2 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25a68de0..2ab67ab204df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1346,6 +1346,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+   struct amdgpu_res_cursor cursor;
unsigned long offset;
int r;
 
@@ -1355,7 +1356,8 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
 
-   offset = bo->resource->start << PAGE_SHIFT;
+   amdgpu_res_first(bo->resource, 0, bo->resource->size, );
+   offset = cursor.start;
if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
return 0;
 
@@ -1378,7 +1380,8 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
 
-   offset = bo->resource->start << PAGE_SHIFT;
+   amdgpu_res_first(bo->resource, 0, bo->resource->size, );
+   offset = cursor.start;
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
(offset + bo->base.size) > adev->gmc.visible_vram_size)
@@ -1491,9 +1494,11 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+   struct amdgpu_res_cursor cursor;
uint64_t offset;
 
-   offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+   amdgpu_res_first(bo->tbo.resource, 0, bo->tbo.resource->size, );
+   offset = cursor.start +
 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
 
return amdgpu_gmc_sign_extend(offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c5ef7f7bdc15..ffe6a1ab7f9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -849,6 +849,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
 {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+   struct amdgpu_res_cursor cursor;
uint64_t flags;
int r;
 
@@ -896,7 +897,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
 
/* bind pages into GART page tables */
-   gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+   amdgpu_res_first(bo_mem, 0, bo_mem->size, );
+   gtt->offset = cursor.start;
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
 gtt->ttm.dma_address, flags);
gtt->bound = true;
@@ -916,6 +918,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
+   struct amdgpu_res_cursor cursor;
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -927,7 +930,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
-   bo->resource->start = addr >> PAGE_SHIFT;
+   bo->resource->start = addr;
return 0;
}
 
@@ -949,7 +952,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
 
/* Bind pages */
-   gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+   amdgpu_res_first(tmp, 0, tmp->size, );
+   gtt->offset = cursor.start;
amdgpu_ttm_gart_bind(adev, bo, flags);
amdgpu_gart_invalidate_tlb(adev);
ttm_resource_free(bo, >resource);
-- 
2.32.0



[PATCH 2/4] drm/amdgpu: Support allocate of amdgpu_gtt_mgr from pages to bytes

2023-01-25 Thread Somalapuram Amaranath
Change the GTT manager init and allocate from pages to bytes

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 44367f03316f..a1fbfc5984d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -116,7 +116,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
  struct ttm_resource **res)
 {
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-   uint32_t num_pages = PFN_UP(tbo->base.size);
struct ttm_range_mgr_node *node;
int r;
 
@@ -134,8 +133,10 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
if (place->lpfn) {
spin_lock(>lock);
r = drm_mm_insert_node_in_range(>mm, >mm_nodes[0],
-   num_pages, tbo->page_alignment,
-   0, place->fpfn, place->lpfn,
+   tbo->base.size,
+   tbo->page_alignment << 
PAGE_SHIFT, 0,
+   place->fpfn << PAGE_SHIFT,
+   place->lpfn << PAGE_SHIFT,
DRM_MM_INSERT_BEST);
spin_unlock(>lock);
if (unlikely(r))
@@ -144,7 +145,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
-   node->mm_nodes[0].size = PFN_UP(node->base.size);
+   node->mm_nodes[0].size = node->base.size;
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
 
@@ -285,8 +286,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, 
uint64_t gtt_size)
 
ttm_resource_manager_init(man, >mman.bdev, gtt_size);
 
-   start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
-   size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
+   start = (AMDGPU_GTT_MAX_TRANSFER_SIZE * 
AMDGPU_GTT_NUM_TRANSFER_WINDOWS) << PAGE_SHIFT;
+   size = adev->gmc.gart_size - start;
drm_mm_init(>mm, start, size);
spin_lock_init(>lock);
 
-- 
2.32.0



[PATCH 1/4] drm/amdgpu: Movie the amdgpu_gtt_mgr start and size from pages to bytes

2023-01-25 Thread Somalapuram Amaranath
To support GTT manager amdgpu_res_first, amdgpu_res_next
from pages to bytes and clean up PAGE_SHIFT operation.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 5c4f93ee0c57..5c78f0b09351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -94,8 +94,8 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
 
-   cur->start = (node->start << PAGE_SHIFT) + start;
-   cur->size = min((node->size << PAGE_SHIFT) - start, size);
+   cur->start = node->start + start;
+   cur->size = min(node->size - start, size);
cur->remaining = size;
cur->node = node;
break;
@@ -155,8 +155,8 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor 
*cur, uint64_t size)
node = cur->node;
 
cur->node = ++node;
-   cur->start = node->start << PAGE_SHIFT;
-   cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+   cur->start = node->start;
+   cur->size = min(node->size, cur->remaining);
break;
default:
return;
-- 
2.32.0



[PATCH v3 4/4] drm/i915: Clean up page shift operation

2023-01-10 Thread Somalapuram Amaranath
Remove page shift operations as ttm_resource moved
from num_pages to size_t size in bytes.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/i915/i915_scatterlist.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c 
b/drivers/gpu/drm/i915/i915_scatterlist.c
index 114e5e39aa72..bd7aaf7738f4 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -94,7 +94,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct 
drm_mm_node *node,
if (!rsgt)
return ERR_PTR(-ENOMEM);
 
-   i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
+   i915_refct_sgt_init(rsgt, node->size);
st = >table;
if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
   GFP_KERNEL)) {
@@ -105,8 +105,8 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct 
drm_mm_node *node,
sg = st->sgl;
st->nents = 0;
prev_end = (resource_size_t)-1;
-   block_size = node->size << PAGE_SHIFT;
-   offset = node->start << PAGE_SHIFT;
+   block_size = node->size;
+   offset = node->start;
 
while (block_size) {
u64 len;
-- 
2.32.0



[PATCH v3 3/4] drm/amdgpu: Clean up page shift operation and GWS and OA

2023-01-10 Thread Somalapuram Amaranath
Remove page shift operations as ttm_resource moved
from num_pages to size_t size in bytes.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  4 +---
 drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 12 ++--
 2 files changed, 7 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 974e85d8b6cc..19ad365dc159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -541,12 +541,10 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
-   size <<= PAGE_SHIFT;
-
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
-   size = ALIGN(size, 4) << PAGE_SHIFT;
+   size = ALIGN(size, 4);
} else {
/* Memory should be aligned at least to a page size. */
page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 5c4f93ee0c57..f92b61350efe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -91,11 +91,11 @@ static inline void amdgpu_res_first(struct ttm_resource 
*res,
break;
case TTM_PL_TT:
node = to_ttm_range_mgr_node(res)->mm_nodes;
-   while (start >= node->size << PAGE_SHIFT)
-   start -= node++->size << PAGE_SHIFT;
+   while (start >= node->size)
+   start -= node++->size;
 
-   cur->start = (node->start << PAGE_SHIFT) + start;
-   cur->size = min((node->size << PAGE_SHIFT) - start, size);
+   cur->start = (node->start) + start;
+   cur->size = min(node->size - start, size);
cur->remaining = size;
cur->node = node;
break;
@@ -155,8 +155,8 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor 
*cur, uint64_t size)
node = cur->node;
 
cur->node = ++node;
-   cur->start = node->start << PAGE_SHIFT;
-   cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+   cur->start = node->start;
+   cur->size = min(node->size, cur->remaining);
break;
default:
return;
-- 
2.32.0



[PATCH v3 1/4] drm/ttm: Clean up page shift operation

2023-01-10 Thread Somalapuram Amaranath
Remove page shift operations as ttm_resource moved
from num_pages to size_t size in bytes.
v1 -> v2: fix missing page shift to fpfn and lpfn
v2 -> v3: separate patch’s based on driver module

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/ttm/ttm_range_manager.c | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c 
b/drivers/gpu/drm/ttm/ttm_range_manager.c
index ae11d07eb63a..3703cbc6d368 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -83,9 +83,10 @@ static int ttm_range_man_alloc(struct ttm_resource_manager 
*man,
 
spin_lock(>lock);
ret = drm_mm_insert_node_in_range(mm, >mm_nodes[0],
- PFN_UP(node->base.size),
+ node->base.size,
  bo->page_alignment, 0,
- place->fpfn, lpfn, mode);
+ place->fpfn << PAGE_SHIFT,
+ lpfn << PAGE_SHIFT, mode);
spin_unlock(>lock);
 
if (unlikely(ret)) {
@@ -119,11 +120,10 @@ static bool ttm_range_man_intersects(struct 
ttm_resource_manager *man,
 size_t size)
 {
struct drm_mm_node *node = _ttm_range_mgr_node(res)->mm_nodes[0];
-   u32 num_pages = PFN_UP(size);
 
/* Don't evict BOs outside of the requested placement range */
-   if (place->fpfn >= (node->start + num_pages) ||
-   (place->lpfn && place->lpfn <= node->start))
+   if ((place->fpfn << PAGE_SHIFT) >= (node->start + size) ||
+   (place->lpfn && (place->lpfn << PAGE_SHIFT) <= node->start))
return false;
 
return true;
@@ -135,10 +135,9 @@ static bool ttm_range_man_compatible(struct 
ttm_resource_manager *man,
 size_t size)
 {
struct drm_mm_node *node = _ttm_range_mgr_node(res)->mm_nodes[0];
-   u32 num_pages = PFN_UP(size);
 
if (node->start < place->fpfn ||
-   (place->lpfn && (node->start + num_pages) > place->lpfn))
+   (place->lpfn && (node->start + size) > place->lpfn << PAGE_SHIFT))
return false;
 
return true;
-- 
2.32.0



[PATCH v3 2/4] drm/gem: Remove BUG_ON in drm_gem_private_object_init

2023-01-10 Thread Somalapuram Amaranath
ttm_resource allocate size in bytes to support less than page size

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/drm_gem.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 59a0bb5ebd85..ee8b5c2b6c60 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -152,8 +152,6 @@ EXPORT_SYMBOL(drm_gem_object_init);
 void drm_gem_private_object_init(struct drm_device *dev,
 struct drm_gem_object *obj, size_t size)
 {
-   BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
obj->dev = dev;
obj->filp = NULL;
 
-- 
2.32.0



[PATCH v1] drm/ttm: Clean up page shift operation

2022-11-21 Thread Somalapuram Amaranath
Remove page shift operations as ttm_resource moved
from num_pages to size_t size in bytes.
v1 -> v2: fix missing page shift to fpfn and lpfn

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  4 +---
 drivers/gpu/drm/ttm/ttm_range_manager.c| 13 ++---
 2 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 974e85d8b6cc..19ad365dc159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -541,12 +541,10 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
-   size <<= PAGE_SHIFT;
-
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
-   size = ALIGN(size, 4) << PAGE_SHIFT;
+   size = ALIGN(size, 4);
} else {
/* Memory should be aligned at least to a page size. */
page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c 
b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 0a8bc0b7f380..6ac38092dd2a 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -83,9 +83,10 @@ static int ttm_range_man_alloc(struct ttm_resource_manager 
*man,
 
spin_lock(>lock);
ret = drm_mm_insert_node_in_range(mm, >mm_nodes[0],
- PFN_UP(node->base.size),
+ node->base.size,
  bo->page_alignment, 0,
- place->fpfn, lpfn, mode);
+ place->fpfn << PAGE_SHIFT,
+ lpfn << PAGE_SHIFT, mode);
spin_unlock(>lock);
 
if (unlikely(ret)) {
@@ -119,11 +120,10 @@ static bool ttm_range_man_intersects(struct 
ttm_resource_manager *man,
 size_t size)
 {
struct drm_mm_node *node = _ttm_range_mgr_node(res)->mm_nodes[0];
-   u32 num_pages = PFN_UP(size);
 
/* Don't evict BOs outside of the requested placement range */
-   if (place->fpfn >= (node->start + num_pages) ||
-   (place->lpfn && place->lpfn <= node->start))
+   if ((place->fpfn << PAGE_SHIFT) >= (node->start + size) ||
+   (place->lpfn && (place->lpfn << PAGE_SHIFT) <= node->start))
return false;
 
return true;
@@ -135,10 +135,9 @@ static bool ttm_range_man_compatible(struct 
ttm_resource_manager *man,
 size_t size)
 {
struct drm_mm_node *node = _ttm_range_mgr_node(res)->mm_nodes[0];
-   u32 num_pages = PFN_UP(size);
 
if (node->start < place->fpfn ||
-   (place->lpfn && (node->start + num_pages) > place->lpfn))
+   (place->lpfn && (node->start + size) > place->lpfn << PAGE_SHIFT))
return false;
 
return true;
-- 
2.32.0



Re: [PATCH 2/2] drm/gem: Remove BUG_ON in drm_gem_private_object_init

2022-11-21 Thread Somalapuram, Amaranath



On 11/16/2022 2:50 PM, Arunpravin Paneer Selvam wrote:

Hi Amar,

On 11/16/2022 2:20 PM, Somalapuram Amaranath wrote:

ttm_resource allocate size in bytes i.e less than page size.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/drm_gem.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index b8db675e7fb5..a346e3b7f9a8 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(drm_gem_object_init);
  void drm_gem_private_object_init(struct drm_device *dev,
   struct drm_gem_object *obj, size_t size)
  {
-    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+    //BUG_ON((size & (PAGE_SIZE - 1)) != 0);

This line is added by mistake?


No this breaks when the size is less than page size.

Now we support size in bytes.



Regards,
Arun

    obj->dev = dev;
  obj->filp = NULL;




[PATCH 2/2] drm/gem: Remove BUG_ON in drm_gem_private_object_init

2022-11-16 Thread Somalapuram Amaranath
ttm_resource allocate size in bytes i.e less than page size.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/drm_gem.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index b8db675e7fb5..a346e3b7f9a8 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(drm_gem_object_init);
 void drm_gem_private_object_init(struct drm_device *dev,
 struct drm_gem_object *obj, size_t size)
 {
-   BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+   //BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 
obj->dev = dev;
obj->filp = NULL;
-- 
2.32.0



[PATCH 1/2] drm/ttm: Clean up page shift operation

2022-11-16 Thread Somalapuram Amaranath
remove page shift operations as ttm_resource moved
from num_pages to size_t size in bytes.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +---
 drivers/gpu/drm/ttm/ttm_range_manager.c| 2 +-
 2 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 974e85d8b6cc..19ad365dc159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -541,12 +541,10 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
-   size <<= PAGE_SHIFT;
-
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
-   size = ALIGN(size, 4) << PAGE_SHIFT;
+   size = ALIGN(size, 4);
} else {
/* Memory should be aligned at least to a page size. */
page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c 
b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 0a8bc0b7f380..4c7cba4ffdbf 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -83,7 +83,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager 
*man,
 
spin_lock(>lock);
ret = drm_mm_insert_node_in_range(mm, >mm_nodes[0],
- PFN_UP(node->base.size),
+ node->base.size,
  bo->page_alignment, 0,
  place->fpfn, lpfn, mode);
spin_unlock(>lock);
-- 
2.32.0



[PATCH v3] drm/ttm: rework on ttm_resource to use size_t type

2022-10-27 Thread Somalapuram Amaranath
Change ttm_resource structure from num_pages to size_t size in bytes.
v1 -> v2: change PFN_UP(dst_mem->size) to ttm->num_pages
v1 -> v2: change bo->resource->size to bo->base.size at some places
v1 -> v2: remove the local variable
v1 -> v2: cleanup cmp_size_smaller_first()
v2 -> v3: adding missing PFN_UP in ttm_bo_vm_fault_reserved

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c|  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c|  6 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c   |  8 
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c|  2 +-
 drivers/gpu/drm/i915/i915_scatterlist.c|  4 ++--
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c  | 12 ++--
 drivers/gpu/drm/i915/intel_region_ttm.c|  2 +-
 drivers/gpu/drm/nouveau/nouveau_bo.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo0039.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo5039.c   |  2 +-
 drivers/gpu/drm/nouveau/nouveau_bo74c1.c   |  2 +-
 drivers/gpu/drm/nouveau/nouveau_bo85b5.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo9039.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo90b5.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_boa0b5.c   |  2 +-
 drivers/gpu/drm/nouveau/nouveau_gem.c  |  5 ++---
 drivers/gpu/drm/nouveau/nouveau_mem.c  |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_ttm.c  |  2 +-
 drivers/gpu/drm/radeon/radeon_cs.c |  7 +--
 drivers/gpu/drm/radeon/radeon_object.c |  4 ++--
 drivers/gpu/drm/radeon/radeon_trace.h  |  2 +-
 drivers/gpu/drm/radeon/radeon_ttm.c|  4 ++--
 drivers/gpu/drm/ttm/ttm_bo.c   |  3 ---
 drivers/gpu/drm/ttm/ttm_bo_util.c  |  6 +++---
 drivers/gpu/drm/ttm/ttm_bo_vm.c|  4 ++--
 drivers/gpu/drm/ttm/ttm_range_manager.c|  2 +-
 drivers/gpu/drm/ttm/ttm_resource.c | 14 ++
 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c   |  4 ++--
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c |  6 +++---
 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c|  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c|  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c  |  6 +++---
 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c|  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c |  6 +++---
 include/drm/ttm/ttm_resource.h |  4 ++--
 38 files changed, 79 insertions(+), 81 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 1f3302aebeff..44367f03316f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -144,7 +144,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
-   node->mm_nodes[0].size = node->base.num_pages;
+   node->mm_nodes[0].size = PFN_UP(node->base.size);
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2e8f6cd7a729..974e85d8b6cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -542,6 +542,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
size <<= PAGE_SHIFT;
+
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
@@ -776,7 +777,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
 
-   r = ttm_bo_kmap(>tbo, 0, bo->tbo.resource->num_pages, >kmap);
+   r = ttm_bo_kmap(>tbo, 0, PFN_UP(bo->tbo.base.size), >kmap);
if (r)
return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 6546552e596c..5c4f93ee0c57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -62,7 +62,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
if (!res)
goto fallback;
 
-   BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
+   BUG_ON(start + size > res->size);
 
cur->mem_type = res->mem_type;
 
@@ -110,7 +110,7 @@ static inline void amdgpu_res_first(struct ttm_resource 
*res,
cur->size = size;
  

[PATCH v2] drm/ttm: rework on ttm_resource to use size_t type

2022-10-25 Thread Somalapuram Amaranath
Change ttm_resource structure from num_pages to size_t size in bytes.
v1 -> v2: change PFN_UP(dst_mem->size) to ttm->num_pages
v1 -> v2: change bo->resource->size to bo->base.size at some places
v1 -> v2: remove the local variable
v1 -> v2: cleanup cmp_size_smaller_first()

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c|  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c|  6 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c   |  8 
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c|  2 +-
 drivers/gpu/drm/i915/i915_scatterlist.c|  4 ++--
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c  | 12 ++--
 drivers/gpu/drm/i915/intel_region_ttm.c|  2 +-
 drivers/gpu/drm/nouveau/nouveau_bo.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo0039.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo5039.c   |  2 +-
 drivers/gpu/drm/nouveau/nouveau_bo74c1.c   |  2 +-
 drivers/gpu/drm/nouveau/nouveau_bo85b5.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo9039.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo90b5.c   |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_boa0b5.c   |  2 +-
 drivers/gpu/drm/nouveau/nouveau_gem.c  |  5 ++---
 drivers/gpu/drm/nouveau/nouveau_mem.c  |  4 ++--
 drivers/gpu/drm/nouveau/nouveau_ttm.c  |  2 +-
 drivers/gpu/drm/radeon/radeon_cs.c |  7 +--
 drivers/gpu/drm/radeon/radeon_object.c |  4 ++--
 drivers/gpu/drm/radeon/radeon_trace.h  |  2 +-
 drivers/gpu/drm/radeon/radeon_ttm.c|  4 ++--
 drivers/gpu/drm/ttm/ttm_bo.c   |  3 ---
 drivers/gpu/drm/ttm/ttm_bo_util.c  |  6 +++---
 drivers/gpu/drm/ttm/ttm_bo_vm.c|  4 ++--
 drivers/gpu/drm/ttm/ttm_range_manager.c|  2 +-
 drivers/gpu/drm/ttm/ttm_resource.c | 14 ++
 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c   |  4 ++--
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c |  6 +++---
 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c|  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c|  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c  |  6 +++---
 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c|  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c |  6 +++---
 include/drm/ttm/ttm_resource.h |  4 ++--
 38 files changed, 79 insertions(+), 81 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 1f3302aebeff..44367f03316f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -144,7 +144,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
-   node->mm_nodes[0].size = node->base.num_pages;
+   node->mm_nodes[0].size = PFN_UP(node->base.size);
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2e8f6cd7a729..974e85d8b6cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -542,6 +542,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
size <<= PAGE_SHIFT;
+
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
@@ -776,7 +777,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
 
-   r = ttm_bo_kmap(>tbo, 0, bo->tbo.resource->num_pages, >kmap);
+   r = ttm_bo_kmap(>tbo, 0, PFN_UP(bo->tbo.base.size), >kmap);
if (r)
return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 6546552e596c..5c4f93ee0c57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -62,7 +62,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
if (!res)
goto fallback;
 
-   BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
+   BUG_ON(start + size > res->size);
 
cur->mem_type = res->mem_type;
 
@@ -110,7 +110,7 @@ static inline void amdgpu_res_first(struct ttm_resource 
*res,
cur->size = size;
cur->remaining = size;
cur->node = NULL;
-   W

[PATCH 6/6] drm/vmwgfx: fix’s on ttm_resource rework to use size_t type

2022-10-19 Thread Somalapuram Amaranath
Fix the ttm_resource from num_pages to size_t size.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c  | 4 ++--
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c| 6 +++---
 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c   | 2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c   | 2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 6 +++---
 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c   | 2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c| 6 +++---
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 09fe20e918f9..c52c7bf1485b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -483,8 +483,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
-   d.dst_num_pages = dst->resource->num_pages;
-   d.src_num_pages = src->resource->num_pages;
+   d.dst_num_pages = PFN_UP(dst->resource->size);
+   d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
d.diff = diff;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 822251aaab0a..134a13a50861 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -194,7 +194,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private 
*dev_priv,
int ret = 0;
 
place = vmw_vram_placement.placement[0];
-   place.lpfn = bo->resource->num_pages;
+   place.lpfn = PFN_UP(bo->resource->size);
placement.num_placement = 1;
placement.placement = 
placement.num_busy_placement = 1;
@@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private 
*dev_priv,
 * that situation.
 */
if (bo->resource->mem_type == TTM_PL_VRAM &&
-   bo->resource->start < bo->resource->num_pages &&
+   bo->resource->start < PFN_UP(bo->resource->size) &&
bo->resource->start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
@@ -352,7 +352,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual)
return virtual;
 
-   ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, >map);
+   ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->resource->size), >map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 79b30dc9d825..c5ed49241f9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -427,7 +427,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, 
size_t new_size)
 * Do a page by page copy of COTables. This eliminates slow vmap()s.
 * This should really be a TTM utility.
 */
-   for (i = 0; i < old_bo->resource->num_pages; ++i) {
+   for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
bool dummy;
 
ret = ttm_bo_kmap(old_bo, i, 1, _map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f085dbd4736d..2fb53aa57a56 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1047,7 +1047,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private 
*dev_priv,
 
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
-   if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
+   if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 60e3cc537f36..abd5e3323ebf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -71,7 +71,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager 
*man,
spin_lock(>lock);
 
if (gman->max_gmr_pages > 0) {
-   gman->used_gmr_pages += (*res)->num_pages;
+   gman->used_gmr_pages += PFN_UP((*res)->size);
/*
 * Because the graphics memory is a soft limit we can try to
 * expand it instead of letting the userspace apps crash.
@@ -114,7 +114,7 @@ static int vmw_gmrid_man_get_node(struct 
ttm_resource_manage

[PATCH 5/6] drm/radeon: fix’s on ttm_resource rework to use size_t type

2022-10-19 Thread Somalapuram Amaranath
Fix the ttm_resource from num_pages to size_t size.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/radeon/radeon_cs.c | 4 ++--
 drivers/gpu/drm/radeon/radeon_object.c | 4 ++--
 drivers/gpu/drm/radeon/radeon_trace.h  | 2 +-
 drivers/gpu/drm/radeon/radeon_ttm.c| 4 ++--
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_cs.c 
b/drivers/gpu/drm/radeon/radeon_cs.c
index 446f7bae54c4..4c930f0cf132 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -400,8 +400,8 @@ static int cmp_size_smaller_first(void *priv, const struct 
list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, 
tv.head);
 
/* Sort A before B if A is smaller. */
-   return (int)la->robj->tbo.resource->num_pages -
-   (int)lb->robj->tbo.resource->num_pages;
+   return (int)PFN_UP(la->robj->tbo.resource->size) -
+   (int)PFN_UP(lb->robj->tbo.resource->size);
 }
 
 /**
diff --git a/drivers/gpu/drm/radeon/radeon_object.c 
b/drivers/gpu/drm/radeon/radeon_object.c
index 00c33b24d5d3..710d04fcbea6 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -232,7 +232,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
}
return 0;
}
-   r = ttm_bo_kmap(>tbo, 0, bo->tbo.resource->num_pages, >kmap);
+   r = ttm_bo_kmap(>tbo, 0, PFN_UP(bo->tbo.resource->size), >kmap);
if (r) {
return r;
}
@@ -737,7 +737,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
 
-   size = bo->resource->num_pages << PAGE_SHIFT;
+   size = bo->resource->size;
offset = bo->resource->start << PAGE_SHIFT;
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h 
b/drivers/gpu/drm/radeon/radeon_trace.h
index c9fed5f2b870..22676617e1a5 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
 
TP_fast_assign(
   __entry->bo = bo;
-  __entry->pages = bo->tbo.resource->num_pages;
+  __entry->pages = PFN_UP(bo->tbo.resource->size);
   ),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
 );
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c 
b/drivers/gpu/drm/radeon/radeon_ttm.c
index d33fec488713..fff48306c05f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -181,7 +181,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
 
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 
-   num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+   num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
fence = radeon_copy(rdev, old_start, new_start, num_pages, 
bo->base.resv);
if (IS_ERR(fence))
return PTR_ERR(fence);
@@ -268,7 +268,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, 
bool evict,
 static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct 
ttm_resource *mem)
 {
struct radeon_device *rdev = radeon_get_rdev(bdev);
-   size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+   size_t bus_size = (size_t)mem->size;
 
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
-- 
2.32.0



[PATCH 4/6] drm/nouveau: fix’s on ttm_resource rework to use size_t type

2022-10-19 Thread Somalapuram Amaranath
Fix the ttm_resource from num_pages to size_t size.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/nouveau/nouveau_bo.c | 4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo0039.c | 4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo5039.c | 2 +-
 drivers/gpu/drm/nouveau/nouveau_bo74c1.c | 2 +-
 drivers/gpu/drm/nouveau/nouveau_bo85b5.c | 4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo9039.c | 4 ++--
 drivers/gpu/drm/nouveau/nouveau_bo90b5.c | 4 ++--
 drivers/gpu/drm/nouveau/nouveau_boa0b5.c | 2 +-
 drivers/gpu/drm/nouveau/nouveau_gem.c| 5 ++---
 drivers/gpu/drm/nouveau/nouveau_mem.c| 4 ++--
 drivers/gpu/drm/nouveau/nouveau_ttm.c| 2 +-
 11 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c 
b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 126b3c6e12f9..16ca4a141866 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -532,7 +532,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
 
-   ret = ttm_bo_kmap(>bo, 0, nvbo->bo.resource->num_pages, 
>kmap);
+   ret = ttm_bo_kmap(>bo, 0, PFN_UP(nvbo->bo.resource->size), 
>kmap);
 
ttm_bo_unreserve(>bo);
return ret;
@@ -1236,7 +1236,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct 
ttm_buffer_object *bo)
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
-   bo->resource->start + bo->resource->num_pages < mappable)
+   bo->resource->start + PFN_UP(bo->resource->size) < mappable)
return 0;
 
for (i = 0; i < nvbo->placement.num_placement; ++i) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo0039.c 
b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
index 7390132129fe..e2ce44adaa5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo0039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
@@ -52,7 +52,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct 
ttm_buffer_object *bo,
u32 src_offset = old_reg->start << PAGE_SHIFT;
u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
u32 dst_offset = new_reg->start << PAGE_SHIFT;
-   u32 page_count = new_reg->num_pages;
+   u32 page_count = PFN_UP(new_reg->size);
int ret;
 
ret = PUSH_WAIT(push, 3);
@@ -62,7 +62,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct 
ttm_buffer_object *bo,
PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma,
   SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma);
 
-   page_count = new_reg->num_pages;
+   page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo5039.c 
b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
index 4c75c7b3804c..c6cf3629a9f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo5039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
@@ -41,7 +41,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct 
ttm_buffer_object *bo,
 {
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
-   u64 length = (new_reg->num_pages << PAGE_SHIFT);
+   u64 length = new_reg->size;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
int src_tiled = !!mem->kind;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c 
b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
index ed6c09d67840..9b7ba31fae13 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
@@ -44,7 +44,7 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct 
ttm_buffer_object *bo,
if (ret)
return ret;
 
-   PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->num_pages << PAGE_SHIFT,
+   PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->size,
0x0308, upper_32_bits(mem->vma[0].addr),
0x030c, lower_32_bits(mem->vma[0].addr),
0x0310, upper_32_bits(mem->vma[1].addr),
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c 
b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
index dec29b2d8bb2..a15a38a87a95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
@@ -44,10 +44,10 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct 
ttm_buffer_object *bo,
struct nvif_push *push = chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
-   u32 page_count = new_reg->num_pages;
+   u32 page_count = PFN_UP(new_reg->size);
int ret;
 
-   page_count = new_reg->num_pages;
+   

[PATCH 3/6] drm/i915: fix’s on ttm_resource rework to use size_t type

2022-10-19 Thread Somalapuram Amaranath
Fix the ttm_resource from num_pages to size_t size.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c   |  2 +-
 drivers/gpu/drm/i915/i915_scatterlist.c   |  4 ++--
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 12 ++--
 drivers/gpu/drm/i915/intel_region_ttm.c   |  2 +-
 4 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c 
b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 4f861782c3e8..7a1e92c11946 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -649,7 +649,7 @@ bool i915_ttm_resource_mappable(struct ttm_resource *res)
if (!i915_ttm_cpu_maps_iomem(res))
return true;
 
-   return bman_res->used_visible_size == bman_res->base.num_pages;
+   return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
 }
 
 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct 
ttm_resource *mem)
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c 
b/drivers/gpu/drm/i915/i915_scatterlist.c
index dcc081874ec8..114e5e39aa72 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -158,7 +158,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct 
ttm_resource *res,
 u32 page_alignment)
 {
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
-   const u64 size = res->num_pages << PAGE_SHIFT;
+   const u64 size = res->size;
const u32 max_segment = round_down(UINT_MAX, page_alignment);
struct drm_buddy *mm = bman_res->mm;
struct list_head *blocks = _res->blocks;
@@ -177,7 +177,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct 
ttm_resource *res,
 
i915_refct_sgt_init(rsgt, size);
st = >table;
-   if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
+   if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) {
i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c 
b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index e19452f0e100..7e611476c7a4 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -62,8 +62,8 @@ static int i915_ttm_buddy_man_alloc(struct 
ttm_resource_manager *man,
if (place->fpfn || lpfn != man->size)
bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
 
-   GEM_BUG_ON(!bman_res->base.num_pages);
-   size = bman_res->base.num_pages << PAGE_SHIFT;
+   GEM_BUG_ON(!bman_res->base.size);
+   size = bman_res->base.size;
 
min_page_size = bman->default_page_size;
if (bo->page_alignment)
@@ -72,7 +72,7 @@ static int i915_ttm_buddy_man_alloc(struct 
ttm_resource_manager *man,
GEM_BUG_ON(min_page_size < mm->chunk_size);
GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
 
-   if (place->fpfn + bman_res->base.num_pages != place->lpfn &&
+   if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
unsigned long pages;
 
@@ -108,7 +108,7 @@ static int i915_ttm_buddy_man_alloc(struct 
ttm_resource_manager *man,
goto err_free_blocks;
 
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-   u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
+   u64 original_size = (u64)bman_res->base.size;
 
drm_buddy_block_trim(mm,
 original_size,
@@ -116,7 +116,7 @@ static int i915_ttm_buddy_man_alloc(struct 
ttm_resource_manager *man,
}
 
if (lpfn <= bman->visible_size) {
-   bman_res->used_visible_size = bman_res->base.num_pages;
+   bman_res->used_visible_size = PFN_UP(bman_res->base.size);
} else {
struct drm_buddy_block *block;
 
@@ -228,7 +228,7 @@ static bool i915_ttm_buddy_man_compatible(struct 
ttm_resource_manager *man,
 
if (!place->fpfn &&
place->lpfn == i915_ttm_buddy_man_visible_size(man))
-   return bman_res->used_visible_size == res->num_pages;
+   return bman_res->used_visible_size == PFN_UP(res->size);
 
/* Check each drm buddy block individually */
list_for_each_entry(block, _res->blocks, link) {
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c 
b/drivers/gpu/drm/i915/intel_region_ttm.c
index 575d67bc6ffe..cf89d0c2a2d9 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -244,7 +244,7 @@ void intel_region_ttm_resource_free(struct 
intel_memo

[PATCH 2/6] drm/amd: fix’s on ttm_resource rework to use size_t type

2022-10-19 Thread Somalapuram Amaranath
Fix the ttm_resource from num_pages to size_t size.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c| 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c| 6 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c   | 8 
 6 files changed, 13 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 1f3302aebeff..44367f03316f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -144,7 +144,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager 
*man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
-   node->mm_nodes[0].size = node->base.num_pages;
+   node->mm_nodes[0].size = PFN_UP(node->base.size);
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2e8f6cd7a729..e51f80bb1d07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -542,6 +542,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
size <<= PAGE_SHIFT;
+
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
@@ -776,7 +777,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
 
-   r = ttm_bo_kmap(>tbo, 0, bo->tbo.resource->num_pages, >kmap);
+   r = ttm_bo_kmap(>tbo, 0, PFN_UP(bo->tbo.resource->size), >kmap);
if (r)
return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 6546552e596c..5c4f93ee0c57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -62,7 +62,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
if (!res)
goto fallback;
 
-   BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
+   BUG_ON(start + size > res->size);
 
cur->mem_type = res->mem_type;
 
@@ -110,7 +110,7 @@ static inline void amdgpu_res_first(struct ttm_resource 
*res,
cur->size = size;
cur->remaining = size;
cur->node = NULL;
-   WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
+   WARN_ON(res && start + size > res->size);
return;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 5e6ddc7e101c..677ad2016976 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
 
TP_fast_assign(
   __entry->bo = bo;
-  __entry->pages = bo->tbo.resource->num_pages;
+  __entry->pages = PFN_UP(bo->tbo.resource->size);
   __entry->type = bo->tbo.resource->mem_type;
   __entry->prefer = bo->preferred_domains;
   __entry->allow = bo->allowed_domains;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dc262d2c2925..36066965346f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -381,7 +381,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
dst.offset = 0;
 
r = amdgpu_ttm_copy_mem_to_mem(adev, , ,
-  new_mem->num_pages << PAGE_SHIFT,
+  new_mem->size,
   amdgpu_bo_encrypted(abo),
   bo->base.resv, );
if (r)
@@ -424,7 +424,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
   struct ttm_resource *mem)
 {
-   u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+   u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
u64 end;
 
@@ -568,7 +568,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device 
*bdev,
 struct ttm_resource *mem)
 {
struc

[PATCH 1/6] drm/ttm: rework on ttm_resource to use size_t type

2022-10-19 Thread Somalapuram Amaranath
Change ttm_resource structure from num_pages to size_t size in bytes.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/ttm/ttm_bo.c| 4 ++--
 drivers/gpu/drm/ttm/ttm_bo_util.c   | 6 +++---
 drivers/gpu/drm/ttm/ttm_bo_vm.c | 4 ++--
 drivers/gpu/drm/ttm/ttm_range_manager.c | 2 +-
 drivers/gpu/drm/ttm/ttm_resource.c  | 8 
 include/drm/ttm/ttm_resource.h  | 2 +-
 6 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7c8e8be774f1..394ccb13eaed 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -51,8 +51,8 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object 
*bo,
struct ttm_resource_manager *man;
int i, mem_type;
 
-   drm_printf(, "No space for %p (%lu pages, %zuK, %zuM)\n",
-  bo, bo->resource->num_pages, bo->base.size >> 10,
+   drm_printf(, "No space for %p (%lu size, %zuK, %zuM)\n",
+  bo, bo->resource->size, bo->base.size >> 10,
   bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fa04e62202c1..da5493f789df 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
-   ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
+   ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, 
src_iter);
 
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
@@ -357,9 +357,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
 
map->virtual = NULL;
map->bo = bo;
-   if (num_pages > bo->resource->num_pages)
+   if (num_pages > PFN_UP(bo->resource->size))
return -EINVAL;
-   if ((start_page + num_pages) > bo->resource->num_pages)
+   if ((start_page + num_pages) > PFN_UP(bo->resource->size))
return -EINVAL;
 
ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 38119311284d..876e7d07273c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -217,7 +217,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(>base.vma_node);
 
-   if (unlikely(page_offset >= bo->resource->num_pages))
+   if (unlikely(page_offset >= PFN_UP(bo->resource->size)))
return VM_FAULT_SIGBUS;
 
prot = ttm_io_prot(bo, bo->resource, prot);
@@ -412,7 +412,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned 
long addr,
 << PAGE_SHIFT);
int ret;
 
-   if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
+   if (len < 1 || (offset + len) > bo->resource->size)
return -EIO;
 
ret = ttm_bo_reserve(bo, true, false, NULL);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c 
b/drivers/gpu/drm/ttm/ttm_range_manager.c
index f7c16c46cfbc..0a8bc0b7f380 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -83,7 +83,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager 
*man,
 
spin_lock(>lock);
ret = drm_mm_insert_node_in_range(mm, >mm_nodes[0],
- node->base.num_pages,
+ PFN_UP(node->base.size),
  bo->page_alignment, 0,
  place->fpfn, lpfn, mode);
spin_unlock(>lock);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c 
b/drivers/gpu/drm/ttm/ttm_resource.c
index a729c32a1e48..f9cce0727d40 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -177,7 +177,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
 
res->start = 0;
-   res->num_pages = PFN_UP(bo->base.size);
+   res->size = bo->base.size;
res->mem_type = place->mem_type;
res->placement = place->flags;
res->bus.addr = NULL;
@@ -192,7 +192,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
list_add

Re: [PATCH] amdgpu: add context creation flags in CS IOCTL

2022-08-08 Thread Somalapuram, Amaranath



On 8/2/2022 7:25 PM, Shashank Sharma wrote:

This patch adds:
- A new input parameter "flags" in the amdgpu_ctx_create2 call.
- Some new flags defining workload type hints.
- Some change in the caller function of amdgpu_ctx_create2, to
   accomodate this new parameter.

The idea is to pass the workload hints while context creation, so
that kernel GPU scheduler can pass this information to GPU FW, which in
turn can adjust the GPU characterstics as per the workload type.

Signed-off-by: Shashank Sharma 
Cc: Alex Deucher 
Cc: Marek Olsak 
Cc: Christian Koenig 
Cc: Amarnath Somalapuram 
---
  amdgpu/amdgpu.h  |  2 ++
  amdgpu/amdgpu_cs.c   |  5 -
  include/drm/amdgpu_drm.h | 10 +-
  3 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index b118dd48..1ebb46e6 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -874,6 +874,7 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
   *
   * \param   dev  - \c [in] Device handle. See #amdgpu_device_initialize()
   * \param   priority - \c [in] Context creation flags. See 
AMDGPU_CTX_PRIORITY_*
+ * \param   flags- \c [in] Context flags. See AMDGPU_CTX_FLAGS_*
   * \param   context  - \c [out] GPU Context handle
   *
   * \return   0 on success\n
@@ -884,6 +885,7 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
  */
  int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
 uint32_t priority,
+uint32_t flags,
 amdgpu_context_handle *context);
  /**
   * Create GPU execution Context
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index fad484bf..d4723ea5 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -44,12 +44,14 @@ static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
   *
   * \param   dev  - \c [in] Device handle. See #amdgpu_device_initialize()
   * \param   priority - \c [in] Context creation flags. See 
AMDGPU_CTX_PRIORITY_*
+ * \param   flags- \c [in] Context flags. See AMDGPU_CTX_FLAGS_*
   * \param   context  - \c [out] GPU Context handle
   *
   * \return  0 on success otherwise POSIX Error code
  */
  drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
 uint32_t priority,
+uint32_t flags,
 amdgpu_context_handle *context)
  {
struct amdgpu_context *gpu_context;
@@ -74,6 +76,7 @@ drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
memset(, 0, sizeof(args));
args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
args.in.priority = priority;
+   args.in.flags = flags;
  
  	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, , sizeof(args));

if (r)
@@ -97,7 +100,7 @@ error:
  drm_public int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
amdgpu_context_handle *context)
  {
-   return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, context);
+   return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, 0, 
context);


How do we set workload hint from application, amdgpu_cs_ctx_create show 
have flag ?


Regards,
S.Amarnath

  }
  
  /**

diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index 0cbd1540..d9fb1f20 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -238,10 +238,18 @@ union drm_amdgpu_bo_list {
  #define AMDGPU_CTX_PRIORITY_HIGH512
  #define AMDGPU_CTX_PRIORITY_VERY_HIGH   1023
  
+/* GPU context workload hint bitmask */

+#define AMDGPU_CTX_FLAGS_WORKLOAD_HINT_MASK0xFF
+#define AMDGPU_CTX_FLAGS_WORKLOAD_HINT_NONE0
+#define AMDGPU_CTX_FLAGS_WORKLOAD_HINT_3D  (1 << 1)
+#define AMDGPU_CTX_FLAGS_WORKLOAD_HINT_VIDEO   (1 << 2)
+#define AMDGPU_CTX_FLAGS_WORKLOAD_HINT_VR  (1 << 3)
+#define AMDGPU_CTX_FLAGS_WORKLOAD_HINT_COMPUTE (1 << 4)
+
  struct drm_amdgpu_ctx_in {
/** AMDGPU_CTX_OP_* */
__u32   op;
-   /** For future use, no flags defined so far */
+   /** AMDGPU_CTX_FLAGS_* */
__u32   flags;
__u32   ctx_id;
/** AMDGPU_CTX_PRIORITY_* */


Re: [PATCH -next] drm/amdgpu: double free error and freeing uninitialized null pointer

2022-07-15 Thread Somalapuram, Amaranath


On 7/14/2022 9:13 PM, André Almeida wrote:

Às 12:06 de 14/07/22, Sebin Sebastian escreveu:

On Tue, Jul 12, 2022 at 12:14:27PM -0300, André Almeida wrote:

Hi Sebin,

Às 10:29 de 10/07/22, Sebin Sebastian escreveu:

Fix two coverity warning's double free and and an uninitialized pointer
read. Both tmp and new are pointing at same address and both are freed
which leads to double free. Freeing tmp in the condition after new is
assigned with new address fixes the double free issue. new is not
initialized to null which also leads to a free on an uninitialized
pointer.
Coverity issue: 1518665 (uninitialized pointer read)
1518679 (double free)

What are those numbers?


These numbers are the issue ID's for the errors that are being reported
by the coverity static analyzer tool.


I see, but I don't know which tool was used, so those seem like random
number to me. I would just remove this part of your commit message, but
if you want to keep it, you need to at least mention what's the tool.


new variable is not needed to initialize.

The only condition double free happens is:

tmp = new;
    if (sscanf(reg_offset, "%X %n", [i], ) != 1) {
    ret = -EINVAL;
    goto error_free; *//    if it hits this*
    }/
/

and can be avoided like:

 error_free:
-   kfree(tmp);
+   if (tmp != new)
+   kfree(tmp);
    kfree(new);
    return ret;
 }


Regards,

S.Amarnath


Signed-off-by: Sebin Sebastian
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 8 +---
  1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f3b3c688e4e7..d82fe0e1b06b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1660,7 +1660,7 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,
  {
struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
char reg_offset[11];
-   uint32_t *new, *tmp = NULL;
+   uint32_t *new = NULL, *tmp = NULL;
int ret, i = 0, len = 0;
  
  	do {

@@ -1692,17 +1692,19 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,
goto error_free;
}

If the `if (!new) {` above this line is true, will be tmp freed?


Yes, It doesn't seem to free tmp here. Should I free tmp immediately
after the do while loop and remove `kfree(tmp)` from the `if (ret)`
block? Thanks for pointing out the errors.

If you free immediately after the while loop, then you would risk a use
after free here:

swap(adev->reset_dump_reg_list, tmp);

So this isn't the solution either.


ret = down_write_killable(>reset_domain->sem);
-   if (ret)
+   if (ret) {
+   kfree(tmp);
goto error_free;
+   }
  
  	swap(adev->reset_dump_reg_list, tmp);

swap(adev->reset_dump_reg_value, new);
adev->num_regs = i;
up_write(>reset_domain->sem);
+   kfree(tmp);
ret = size;
  
  error_free:

-   kfree(tmp);
kfree(new);
return ret;
  }

[PATCH v3 2/2] drm/amdgpu: adding device coredump support

2022-06-02 Thread Somalapuram Amaranath
Added device coredump information:
- Kernel version
- Module
- Time
- VRAM status
- Guilty process name and PID
- GPU register dumps
v1 -> v2: Variable name change
v1 -> v2: NULL check
v1 -> v2: Code alignment
v1 -> v2: Adding dummy amdgpu_devcoredump_free
v1 -> v2: memset reset_task_info to zero
v2 -> v3: add CONFIG_DEV_COREDUMP for variables
v2 -> v3: remove NULL check on amdgpu_devcoredump_read

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  5 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 64 ++
 2 files changed, 69 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c79d9992b113..1bfbaf65d414 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1044,6 +1044,11 @@ struct amdgpu_device {
uint32_t*reset_dump_reg_list;
uint32_t*reset_dump_reg_value;
int num_regs;
+#ifdef CONFIG_DEV_COREDUMP
+   struct amdgpu_task_info reset_task_info;
+   boolreset_vram_lost;
+   struct timespec64   reset_time;
+#endif
 
struct amdgpu_reset_domain  *reset_domain;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 89c6db03e84b..f1def74aaad0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -32,6 +32,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include 
 #include 
@@ -4734,6 +4736,59 @@ static int amdgpu_reset_reg_dumps(struct amdgpu_device 
*adev)
return 0;
 }
 
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
+   size_t count, void *data, size_t datalen)
+{
+   struct drm_printer p;
+   struct amdgpu_device *adev = data;
+   struct drm_print_iterator iter;
+   int i;
+
+   iter.data = buffer;
+   iter.offset = 0;
+   iter.start = offset;
+   iter.remain = count;
+
+   p = drm_coredump_printer();
+
+   drm_printf(, " AMDGPU Device Coredump \n");
+   drm_printf(, "kernel: " UTS_RELEASE "\n");
+   drm_printf(, "module: " KBUILD_MODNAME "\n");
+   drm_printf(, "time: %lld.%09ld\n", adev->reset_time.tv_sec, 
adev->reset_time.tv_nsec);
+   if (adev->reset_task_info.pid)
+   drm_printf(, "process_name: %s PID: %d\n",
+  adev->reset_task_info.process_name,
+  adev->reset_task_info.pid);
+
+   if (adev->reset_vram_lost)
+   drm_printf(, "VRAM is lost due to GPU reset!\n");
+   if (adev->num_regs) {
+   drm_printf(, "AMDGPU register dumps:\nOffset: Value:\n");
+
+   for (i = 0; i < adev->num_regs; i++)
+   drm_printf(, "0x%08x: 0x%08x\n",
+  adev->reset_dump_reg_list[i],
+  adev->reset_dump_reg_value[i]);
+   }
+
+   return count - iter.remain;
+}
+
+static void amdgpu_devcoredump_free(void *data)
+{
+}
+
+static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
+{
+   struct drm_device *dev = adev_to_drm(adev);
+
+   ktime_get_ts64(>reset_time);
+   dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+ amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+}
+#endif
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4818,6 +4873,15 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
goto out;
 
vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);
+#ifdef CONFIG_DEV_COREDUMP
+   tmp_adev->reset_vram_lost = vram_lost;
+   memset(_adev->reset_task_info, 0,
+   
sizeof(tmp_adev->reset_task_info));
+   if (reset_context->job && 
reset_context->job->vm)
+   tmp_adev->reset_task_info =
+   
reset_context->job->vm->task_info;
+   amdgpu_reset_capture_coredumpm(tmp_adev);
+#endif
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU 
reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
-- 
2.32.0



[PATCH v3 1/2] drm/amdgpu: save the reset dump register value for devcoredump

2022-06-02 Thread Somalapuram Amaranath
Allocate memory for register value and use the same values for devcoredump.
v1 -> v2: Change krealloc_array() to kmalloc_array()
v2 -> v3: Fix alignment

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 7 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  | 6 +++---
 3 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 76df583663c7..c79d9992b113 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1042,6 +1042,7 @@ struct amdgpu_device {
 
/* reset dump register */
uint32_t*reset_dump_reg_list;
+   uint32_t*reset_dump_reg_value;
int num_regs;
 
struct amdgpu_reset_domain  *reset_domain;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index eedb12f6b8a3..f3ac7912c29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1709,17 +1709,24 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,
i++;
} while (len < size);
 
+   new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
+   if (!new) {
+   ret = -ENOMEM;
+   goto error_free;
+   }
ret = down_write_killable(>reset_domain->sem);
if (ret)
goto error_free;
 
swap(adev->reset_dump_reg_list, tmp);
+   swap(adev->reset_dump_reg_value, new);
adev->num_regs = i;
up_write(>reset_domain->sem);
ret = size;
 
 error_free:
kfree(tmp);
+   kfree(new);
return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4daa0e893965..89c6db03e84b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4720,15 +4720,15 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
 
 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
 {
-   uint32_t reg_value;
int i;
 
lockdep_assert_held(>reset_domain->sem);
dump_stack();
 
for (i = 0; i < adev->num_regs; i++) {
-   reg_value = RREG32(adev->reset_dump_reg_list[i]);
-   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   adev->reset_dump_reg_value[i] = 
RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
+adev->reset_dump_reg_value[i]);
}
 
return 0;
-- 
2.32.0



Re: [PATCH v2 2/2] drm/amdgpu: adding device coredump support

2022-05-31 Thread Somalapuram, Amaranath


On 5/26/2022 3:56 PM, Wang, Yang(Kevin) wrote:


[AMD Official Use Only - General]





*From:* amd-gfx  on behalf of 
Somalapuram Amaranath 

*Sent:* Thursday, May 26, 2022 5:48 PM
*To:* amd-gfx@lists.freedesktop.org 
*Cc:* Deucher, Alexander ; Somalapuram, 
Amaranath ; Koenig, Christian 
; Sharma, Shashank 

*Subject:* [PATCH v2 2/2] drm/amdgpu: adding device coredump support
Added device coredump information:
- Kernel version
- Module
- Time
- VRAM status
- Guilty process name and PID
- GPU register dumps
v1 -> v2: Variable name change
v1 -> v2: NULL check
v1 -> v2: Code alignment
v1 -> v2: Adding dummy amdgpu_devcoredump_free
v1 -> v2: memset reset_task_info to zero

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  3 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 67 ++
 2 files changed, 70 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index c79d9992b113..25a7b2c74928 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1044,6 +1044,9 @@ struct amdgpu_device {
 uint32_t *reset_dump_reg_list;
 uint32_t *reset_dump_reg_value;
 int num_regs;
+   struct amdgpu_task_info reset_task_info;
+   bool    reset_vram_lost;
+   struct timespec64   reset_time;

[kevin]:
the CONFIG_DEV_COREDUMP check is needed for above variable to avoid 
compiler warning when coredump feautre is not enabled.



Agreed.

 struct amdgpu_reset_domain  *reset_domain;

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 866b4980a6fa..ca97afe5be63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -32,6 +32,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 

 #include 
 #include 
@@ -4734,6 +4736,62 @@ static int amdgpu_reset_reg_dumps(struct 
amdgpu_device *adev)

 return 0;
 }

+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
+   size_t count, void *data, size_t datalen)
+{
+   struct drm_printer p;
+   struct amdgpu_device *adev = data;
+   struct drm_print_iterator iter;
+   int i;
+
+   if (adev == NULL)
+   return 0;
[kevin]:
 this check is not needed, because this private data is passed by our 
driver as below:


In my testing if the reset is unsuccessful amdgpu_devcoredump_read will 
not be called.


Shashank: Any inputs on this.


Regards,

S.Amarnath


 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+
+   iter.data = buffer;
+   iter.offset = 0;
+   iter.start = offset;
+   iter.remain = count;
+
+   p = drm_coredump_printer();
+
+   drm_printf(, " AMDGPU Device Coredump \n");
+   drm_printf(, "kernel: " UTS_RELEASE "\n");
+   drm_printf(, "module: " KBUILD_MODNAME "\n");
+   drm_printf(, "time: %lld.%09ld\n", adev->reset_time.tv_sec, 
adev->reset_time.tv_nsec);

+   if (adev->reset_task_info.pid)
+   drm_printf(, "process_name: %s PID: %d\n",
+ adev->reset_task_info.process_name,
+ adev->reset_task_info.pid);
+
+   if (adev->reset_vram_lost)
+   drm_printf(, "VRAM is lost due to GPU reset!\n");
+   if (adev->num_regs) {
+   drm_printf(, "AMDGPU register dumps:\nOffset: 
Value:\n");

+
+   for (i = 0; i < adev->num_regs; i++)
+   drm_printf(, "0x%08x: 0x%08x\n",
+ adev->reset_dump_reg_list[i],
+ adev->reset_dump_reg_value[i]);
+   }
+
+   return count - iter.remain;
+}
+
+static void amdgpu_devcoredump_free(void *data)
+{
+}
+
+static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
+{
+   struct drm_device *dev = adev_to_drm(adev);
+
+   ktime_get_ts64(>reset_time);
+   dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+   amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+}
+#endif
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
  struct amdgpu_reset_context *reset_context)
 {
@@ -4818,6 +4876,15 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,

 goto out;

 vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);

+#ifdef CONFIG_DEV_COREDUMP
+ tmp_adev->reset_vram_lost = vram_lost;
+ memset(_adev->reset_task_info, 0,
+ sizeof(tmp_adev->reset_task_info));
+   if (

Re: [PATCH v2 1/2] drm/amdgpu: save the reset dump register value for devcoredump

2022-05-31 Thread Somalapuram, Amaranath


On 5/26/2022 3:24 PM, Sharma, Shashank wrote:

Hey Amar,

On 5/26/2022 11:48 AM, Somalapuram Amaranath wrote:
Allocate memory for register value and use the same values for 
devcoredump.

v1 -> V2: Change krealloc_array() to kmalloc_array()

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 7 +++
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  | 6 +++---
  3 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index 76df583663c7..c79d9992b113 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1042,6 +1042,7 @@ struct amdgpu_device {
    /* reset dump register */
  uint32_t    *reset_dump_reg_list;
+    uint32_t    *reset_dump_reg_value;
  int num_regs;
    struct amdgpu_reset_domain    *reset_domain;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index eedb12f6b8a3..f3ac7912c29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1709,17 +1709,24 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,

  i++;
  } while (len < size);
  +    new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
+    if (!new) {
+    ret = -ENOMEM;
+    goto error_free;
+    }
  ret = down_write_killable(>reset_domain->sem);
  if (ret)
  goto error_free;
    swap(adev->reset_dump_reg_list, tmp);
+    swap(adev->reset_dump_reg_value, new);
  adev->num_regs = i;
  up_write(>reset_domain->sem);
  ret = size;
    error_free:
  kfree(tmp);
+    kfree(new);
  return ret;
  }
  diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 4daa0e893965..866b4980a6fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4720,15 +4720,15 @@ int amdgpu_device_pre_asic_reset(struct 
amdgpu_device *adev,

    static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
  {
-    uint32_t reg_value;
  int i;
    lockdep_assert_held(>reset_domain->sem);
  dump_stack();
    for (i = 0; i < adev->num_regs; i++) {
-    reg_value = RREG32(adev->reset_dump_reg_list[i]);
- trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], reg_value);
+    adev->reset_dump_reg_value[i] = 
RREG32(adev->reset_dump_reg_list[i]);

+ trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
+    adev->reset_dump_reg_value[i]);


Alignment is showing spaces in email. But its 2 tabs in code.


Regards,
S.Amarnath


Please fix the alignment with the line above, after that, please feel 
free to use:

Reviewed-by: Shashank Sharma 

- Shashank

  }
    return 0;

[PATCH v2 2/2] drm/amdgpu: adding device coredump support

2022-05-26 Thread Somalapuram Amaranath
Added device coredump information:
- Kernel version
- Module
- Time
- VRAM status
- Guilty process name and PID
- GPU register dumps
v1 -> v2: Variable name change
v1 -> v2: NULL check
v1 -> v2: Code alignment
v1 -> v2: Adding dummy amdgpu_devcoredump_free
v1 -> v2: memset reset_task_info to zero

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  3 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 67 ++
 2 files changed, 70 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c79d9992b113..25a7b2c74928 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1044,6 +1044,9 @@ struct amdgpu_device {
uint32_t*reset_dump_reg_list;
uint32_t*reset_dump_reg_value;
int num_regs;
+   struct amdgpu_task_info reset_task_info;
+   boolreset_vram_lost;
+   struct timespec64   reset_time;
 
struct amdgpu_reset_domain  *reset_domain;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 866b4980a6fa..ca97afe5be63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -32,6 +32,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include 
 #include 
@@ -4734,6 +4736,62 @@ static int amdgpu_reset_reg_dumps(struct amdgpu_device 
*adev)
return 0;
 }
 
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
+   size_t count, void *data, size_t datalen)
+{
+   struct drm_printer p;
+   struct amdgpu_device *adev = data;
+   struct drm_print_iterator iter;
+   int i;
+
+   if (adev == NULL)
+   return 0;
+
+   iter.data = buffer;
+   iter.offset = 0;
+   iter.start = offset;
+   iter.remain = count;
+
+   p = drm_coredump_printer();
+
+   drm_printf(, " AMDGPU Device Coredump \n");
+   drm_printf(, "kernel: " UTS_RELEASE "\n");
+   drm_printf(, "module: " KBUILD_MODNAME "\n");
+   drm_printf(, "time: %lld.%09ld\n", adev->reset_time.tv_sec, 
adev->reset_time.tv_nsec);
+   if (adev->reset_task_info.pid)
+   drm_printf(, "process_name: %s PID: %d\n",
+  adev->reset_task_info.process_name,
+  adev->reset_task_info.pid);
+
+   if (adev->reset_vram_lost)
+   drm_printf(, "VRAM is lost due to GPU reset!\n");
+   if (adev->num_regs) {
+   drm_printf(, "AMDGPU register dumps:\nOffset: Value:\n");
+
+   for (i = 0; i < adev->num_regs; i++)
+   drm_printf(, "0x%08x: 0x%08x\n",
+  adev->reset_dump_reg_list[i],
+  adev->reset_dump_reg_value[i]);
+   }
+
+   return count - iter.remain;
+}
+
+static void amdgpu_devcoredump_free(void *data)
+{
+}
+
+static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
+{
+   struct drm_device *dev = adev_to_drm(adev);
+
+   ktime_get_ts64(>reset_time);
+   dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+   amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+}
+#endif
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4818,6 +4876,15 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
goto out;
 
vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);
+#ifdef CONFIG_DEV_COREDUMP
+   tmp_adev->reset_vram_lost = vram_lost;
+   memset(_adev->reset_task_info, 0,
+   
sizeof(tmp_adev->reset_task_info));
+   if (reset_context->job && 
reset_context->job->vm)
+   tmp_adev->reset_task_info =
+   
reset_context->job->vm->task_info;
+   amdgpu_reset_capture_coredumpm(tmp_adev);
+#endif
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU 
reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
-- 
2.32.0



[PATCH v2 1/2] drm/amdgpu: save the reset dump register value for devcoredump

2022-05-26 Thread Somalapuram Amaranath
Allocate memory for register value and use the same values for devcoredump.
v1 -> V2: Change krealloc_array() to kmalloc_array()

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 7 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  | 6 +++---
 3 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 76df583663c7..c79d9992b113 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1042,6 +1042,7 @@ struct amdgpu_device {
 
/* reset dump register */
uint32_t*reset_dump_reg_list;
+   uint32_t*reset_dump_reg_value;
int num_regs;
 
struct amdgpu_reset_domain  *reset_domain;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index eedb12f6b8a3..f3ac7912c29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1709,17 +1709,24 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,
i++;
} while (len < size);
 
+   new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
+   if (!new) {
+   ret = -ENOMEM;
+   goto error_free;
+   }
ret = down_write_killable(>reset_domain->sem);
if (ret)
goto error_free;
 
swap(adev->reset_dump_reg_list, tmp);
+   swap(adev->reset_dump_reg_value, new);
adev->num_regs = i;
up_write(>reset_domain->sem);
ret = size;
 
 error_free:
kfree(tmp);
+   kfree(new);
return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4daa0e893965..866b4980a6fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4720,15 +4720,15 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
 
 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
 {
-   uint32_t reg_value;
int i;
 
lockdep_assert_held(>reset_domain->sem);
dump_stack();
 
for (i = 0; i < adev->num_regs; i++) {
-   reg_value = RREG32(adev->reset_dump_reg_list[i]);
-   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   adev->reset_dump_reg_value[i] = 
RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
+   adev->reset_dump_reg_value[i]);
}
 
return 0;
-- 
2.32.0



Re: [PATCH v1 2/2] drm/amdgpu: adding device coredump support

2022-05-24 Thread Somalapuram, Amaranath



On 5/24/2022 8:34 PM, Sharma, Shashank wrote:



On 5/24/2022 3:18 PM, Somalapuram, Amaranath wrote:


On 5/24/2022 6:20 PM, Sharma, Shashank wrote:



On 5/24/2022 2:10 PM, Somalapuram, Amaranath wrote:


On 5/24/2022 3:23 PM, Sharma, Shashank wrote:



On 5/24/2022 8:42 AM, Somalapuram, Amaranath wrote:


On 5/20/2022 7:52 PM, Sharma, Shashank wrote:



On 5/20/2022 3:49 PM, Somalapuram Amaranath wrote:

Added device coredump information:
- Kernel version
- Module
- Time
- VRAM status
- Guilty process name and PID
- GPU register dumps

Signed-off-by: Somalapuram Amaranath 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  3 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 59 
++

  2 files changed, 62 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index c79d9992b113..f28d9c563f74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1044,6 +1044,9 @@ struct amdgpu_device {
  uint32_t *reset_dump_reg_list;
  uint32_t    *reset_dump_reg_value;
  int num_regs;
+    struct amdgpu_task_info reset_context_task_info;
+    bool reset_context_vram_lost;


How about drop the 'context' from name and just reset_task_info 
and reset_vram_lost ?

OK.



+    struct timespec64 reset_time;
    struct amdgpu_reset_domain    *reset_domain;
  diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 963c897a76e6..f9b710e741a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -32,6 +32,8 @@
  #include 
  #include 
  #include 
+#include 
+#include 
    #include 
  #include 
@@ -4733,6 +4735,55 @@ static int amdgpu_reset_reg_dumps(struct 
amdgpu_device *adev)

  return 0;
  }
  +#ifdef CONFIG_DEV_COREDUMP
+static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t 
offset,

+    size_t count, void *data, size_t datalen)
+{
+    struct drm_printer p;
+    struct amdgpu_device *adev = data;
+    struct drm_print_iterator iter;
+    int i;
+


A NULL check for 'buffer' here could prevent a segfault later.


Agreed.

+    iter.data = buffer;
+    iter.offset = 0;
+    iter.start = offset;
+    iter.remain = count;
+
+    p = drm_coredump_printer();
+
+    drm_printf(, " AMDGPU Device Coredump \n");
+    drm_printf(, "kernel: " UTS_RELEASE "\n");
+    drm_printf(, "module: " KBUILD_MODNAME "\n");
+    drm_printf(, "time: %lld.%09ld\n", 
adev->reset_time.tv_sec, adev->reset_time.tv_nsec);

+    if (adev->reset_context_task_info.pid)
+    drm_printf(, "process_name: %s PID: %d\n",
+ adev->reset_context_task_info.process_name,
+ adev->reset_context_task_info.pid);

Please fix the alignment of print variables.


I will cross check this.

+
+    if (adev->reset_context_vram_lost)
+    drm_printf(, "VRAM is lost due to GPU reset!\n");
+    if (adev->num_regs) {
+    drm_printf(, "AMDGPU register dumps:\nOffset: 
Value:\n");

+
+    for (i = 0; i < adev->num_regs; i++)
+    drm_printf(, "0x%08x: 0x%08x\n",
+ adev->reset_dump_reg_list[i],
+ adev->reset_dump_reg_value[i]);
+    }
+
+    return count - iter.remain;
+}
+
+static void amdgpu_reset_capture_coredumpm(struct 
amdgpu_device *adev)

+{
+    struct drm_device *dev = adev_to_drm(adev);
+
+    ktime_get_ts64(>reset_time);
+    dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+    amdgpu_devcoredump_read, NULL);
instead of registering NULL as free function, I would prefer you 
to have a dummy no_op free function registered, which we can 
consume if something changes.

you mean something like this (function without any code):
staticvoidamdgpu_devcoredump_free(void*data)
{
}


Yes, precisely.


+}
+#endif
+
  int amdgpu_do_asic_reset(struct list_head *device_list_handle,
   struct amdgpu_reset_context *reset_context)
  {
@@ -4817,6 +4868,14 @@ int amdgpu_do_asic_reset(struct 
list_head *device_list_handle,

  goto out;
    vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);

+#ifdef CONFIG_DEV_COREDUMP
+ tmp_adev->reset_context_vram_lost = vram_lost;
+ tmp_adev->reset_context_task_info.pid = 0;

why is the PID hardcoded to 0 ?
in case of reset context reset_context->job->vm is null 
(possibility that reset can be non VM related).
If we don't set tmp_adev->reset_context_task_info.pid = 0, it 
will show previous reset valid PID.




But when the VM is not NULL, are we updating this PID somewhere ? 
I did not see that happening in this series.

This is the only place where PID get updated.
For example sequence of operation like:
1st reset:
-valid VM and tmp_adev->reset_context_task_info.pid is set to some 
valid PID

2nd reset:
-invalid VM
-tm

Re: [PATCH v1 2/2] drm/amdgpu: adding device coredump support

2022-05-24 Thread Somalapuram, Amaranath



On 5/24/2022 6:20 PM, Sharma, Shashank wrote:



On 5/24/2022 2:10 PM, Somalapuram, Amaranath wrote:


On 5/24/2022 3:23 PM, Sharma, Shashank wrote:



On 5/24/2022 8:42 AM, Somalapuram, Amaranath wrote:


On 5/20/2022 7:52 PM, Sharma, Shashank wrote:



On 5/20/2022 3:49 PM, Somalapuram Amaranath wrote:

Added device coredump information:
- Kernel version
- Module
- Time
- VRAM status
- Guilty process name and PID
- GPU register dumps

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  3 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 59 
++

  2 files changed, 62 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index c79d9992b113..f28d9c563f74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1044,6 +1044,9 @@ struct amdgpu_device {
  uint32_t *reset_dump_reg_list;
  uint32_t    *reset_dump_reg_value;
  int num_regs;
+    struct amdgpu_task_info reset_context_task_info;
+    bool reset_context_vram_lost;


How about drop the 'context' from name and just reset_task_info 
and reset_vram_lost ?

OK.



+    struct timespec64 reset_time;
    struct amdgpu_reset_domain    *reset_domain;
  diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 963c897a76e6..f9b710e741a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -32,6 +32,8 @@
  #include 
  #include 
  #include 
+#include 
+#include 
    #include 
  #include 
@@ -4733,6 +4735,55 @@ static int amdgpu_reset_reg_dumps(struct 
amdgpu_device *adev)

  return 0;
  }
  +#ifdef CONFIG_DEV_COREDUMP
+static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
+    size_t count, void *data, size_t datalen)
+{
+    struct drm_printer p;
+    struct amdgpu_device *adev = data;
+    struct drm_print_iterator iter;
+    int i;
+


A NULL check for 'buffer' here could prevent a segfault later.


Agreed.

+    iter.data = buffer;
+    iter.offset = 0;
+    iter.start = offset;
+    iter.remain = count;
+
+    p = drm_coredump_printer();
+
+    drm_printf(, " AMDGPU Device Coredump \n");
+    drm_printf(, "kernel: " UTS_RELEASE "\n");
+    drm_printf(, "module: " KBUILD_MODNAME "\n");
+    drm_printf(, "time: %lld.%09ld\n", 
adev->reset_time.tv_sec, adev->reset_time.tv_nsec);

+    if (adev->reset_context_task_info.pid)
+    drm_printf(, "process_name: %s PID: %d\n",
+ adev->reset_context_task_info.process_name,
+ adev->reset_context_task_info.pid);

Please fix the alignment of print variables.


I will cross check this.

+
+    if (adev->reset_context_vram_lost)
+    drm_printf(, "VRAM is lost due to GPU reset!\n");
+    if (adev->num_regs) {
+    drm_printf(, "AMDGPU register dumps:\nOffset: Value:\n");
+
+    for (i = 0; i < adev->num_regs; i++)
+    drm_printf(, "0x%08x: 0x%08x\n",
+    adev->reset_dump_reg_list[i],
+    adev->reset_dump_reg_value[i]);
+    }
+
+    return count - iter.remain;
+}
+
+static void amdgpu_reset_capture_coredumpm(struct amdgpu_device 
*adev)

+{
+    struct drm_device *dev = adev_to_drm(adev);
+
+    ktime_get_ts64(>reset_time);
+    dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+    amdgpu_devcoredump_read, NULL);
instead of registering NULL as free function, I would prefer you 
to have a dummy no_op free function registered, which we can 
consume if something changes.

you mean something like this (function without any code):
staticvoidamdgpu_devcoredump_free(void*data)
{
}


Yes, precisely.


+}
+#endif
+
  int amdgpu_do_asic_reset(struct list_head *device_list_handle,
   struct amdgpu_reset_context *reset_context)
  {
@@ -4817,6 +4868,14 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,

  goto out;
    vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);

+#ifdef CONFIG_DEV_COREDUMP
+    tmp_adev->reset_context_vram_lost = vram_lost;
+ tmp_adev->reset_context_task_info.pid = 0;

why is the PID hardcoded to 0 ?
in case of reset context reset_context->job->vm  is null 
(possibility that reset can be non VM related).
If we don't set tmp_adev->reset_context_task_info.pid = 0, it will 
show previous reset valid PID.




But when the VM is not NULL, are we updating this PID somewhere ? I 
did not see that happening in this series.

This is the only place where PID get updated.
For example sequence of operation like:
1st reset:
-valid VM and tmp_adev->reset_context_task_info.pid is set to some 
valid PID

2nd reset:
-invalid VM
-tmp_adev context will remain same (adev context will be same 

Re: [PATCH v1 2/2] drm/amdgpu: adding device coredump support

2022-05-24 Thread Somalapuram, Amaranath



On 5/24/2022 3:23 PM, Sharma, Shashank wrote:



On 5/24/2022 8:42 AM, Somalapuram, Amaranath wrote:


On 5/20/2022 7:52 PM, Sharma, Shashank wrote:



On 5/20/2022 3:49 PM, Somalapuram Amaranath wrote:

Added device coredump information:
- Kernel version
- Module
- Time
- VRAM status
- Guilty process name and PID
- GPU register dumps

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  3 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 59 
++

  2 files changed, 62 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index c79d9992b113..f28d9c563f74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1044,6 +1044,9 @@ struct amdgpu_device {
  uint32_t    *reset_dump_reg_list;
  uint32_t    *reset_dump_reg_value;
  int num_regs;
+    struct amdgpu_task_info reset_context_task_info;
+    bool reset_context_vram_lost;


How about drop the 'context' from name and just reset_task_info and 
reset_vram_lost ?

OK.



+    struct timespec64 reset_time;
    struct amdgpu_reset_domain    *reset_domain;
  diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 963c897a76e6..f9b710e741a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -32,6 +32,8 @@
  #include 
  #include 
  #include 
+#include 
+#include 
    #include 
  #include 
@@ -4733,6 +4735,55 @@ static int amdgpu_reset_reg_dumps(struct 
amdgpu_device *adev)

  return 0;
  }
  +#ifdef CONFIG_DEV_COREDUMP
+static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
+    size_t count, void *data, size_t datalen)
+{
+    struct drm_printer p;
+    struct amdgpu_device *adev = data;
+    struct drm_print_iterator iter;
+    int i;
+


A NULL check for 'buffer' here could prevent a segfault later.


Agreed.

+    iter.data = buffer;
+    iter.offset = 0;
+    iter.start = offset;
+    iter.remain = count;
+
+    p = drm_coredump_printer();
+
+    drm_printf(, " AMDGPU Device Coredump \n");
+    drm_printf(, "kernel: " UTS_RELEASE "\n");
+    drm_printf(, "module: " KBUILD_MODNAME "\n");
+    drm_printf(, "time: %lld.%09ld\n", adev->reset_time.tv_sec, 
adev->reset_time.tv_nsec);

+    if (adev->reset_context_task_info.pid)
+    drm_printf(, "process_name: %s PID: %d\n",
+ adev->reset_context_task_info.process_name,
+ adev->reset_context_task_info.pid);

Please fix the alignment of print variables.


I will cross check this.

+
+    if (adev->reset_context_vram_lost)
+    drm_printf(, "VRAM is lost due to GPU reset!\n");
+    if (adev->num_regs) {
+    drm_printf(, "AMDGPU register dumps:\nOffset: Value:\n");
+
+    for (i = 0; i < adev->num_regs; i++)
+    drm_printf(, "0x%08x: 0x%08x\n",
+    adev->reset_dump_reg_list[i],
+    adev->reset_dump_reg_value[i]);
+    }
+
+    return count - iter.remain;
+}
+
+static void amdgpu_reset_capture_coredumpm(struct amdgpu_device 
*adev)

+{
+    struct drm_device *dev = adev_to_drm(adev);
+
+    ktime_get_ts64(>reset_time);
+    dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+    amdgpu_devcoredump_read, NULL);
instead of registering NULL as free function, I would prefer you to 
have a dummy no_op free function registered, which we can consume if 
something changes.

you mean something like this (function without any code):
staticvoidamdgpu_devcoredump_free(void*data)
{
}


Yes, precisely.


+}
+#endif
+
  int amdgpu_do_asic_reset(struct list_head *device_list_handle,
   struct amdgpu_reset_context *reset_context)
  {
@@ -4817,6 +4868,14 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,

  goto out;
    vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);

+#ifdef CONFIG_DEV_COREDUMP
+    tmp_adev->reset_context_vram_lost = vram_lost;
+    tmp_adev->reset_context_task_info.pid = 0;

why is the PID hardcoded to 0 ?
in case of reset context reset_context->job->vm  is null (possibility 
that reset can be non VM related).
If we don't set tmp_adev->reset_context_task_info.pid = 0, it will 
show previous reset valid PID.




But when the VM is not NULL, are we updating this PID somewhere ? I 
did not see that happening in this series.

This is the only place where PID get updated.
For example sequence of operation like:
1st reset:
-valid VM and tmp_adev->reset_context_task_info.pid is set to some valid PID
2nd reset:
-invalid VM
-tmp_adev context will remain same (adev context will be same after 
successful  GPU reset sequence).

-tmp_adev->reset_

Re: [PATCH v1 1/2] drm/amdgpu: save the reset dump register value for devcoredump

2022-05-24 Thread Somalapuram, Amaranath



On 5/24/2022 3:25 PM, Sharma, Shashank wrote:



On 5/24/2022 8:12 AM, Somalapuram, Amaranath wrote:


On 5/20/2022 7:36 PM, Sharma, Shashank wrote:

Hey Amar,

On 5/20/2022 3:49 PM, Somalapuram Amaranath wrote:
Allocate memory for register value and use the same values for 
devcoredump.

Remove dump_stack reset register dumps.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 9 -
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  | 7 +++
  3 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index 76df583663c7..c79d9992b113 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1042,6 +1042,7 @@ struct amdgpu_device {
    /* reset dump register */
  uint32_t    *reset_dump_reg_list;
+    uint32_t    *reset_dump_reg_value;
  int num_regs;
    struct amdgpu_reset_domain    *reset_domain;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index eedb12f6b8a3..942fdbd316f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1683,7 +1683,7 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,

  {
  struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

  char reg_offset[11];
-    uint32_t *new, *tmp = NULL;
+    uint32_t *new, *tmp = NULL, *tmp_value = NULL;
  int ret, i = 0, len = 0;
    do {
@@ -1709,17 +1709,24 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,

  i++;
  } while (len < size);
  +    new = krealloc_array(tmp_value, i, sizeof(uint32_t), 
GFP_KERNEL);


tmp_value is initialized to NULL, which means krealloc_array() will 
behave like kmalloc_array(), is there any particular reason we are 
adding this variable at all just to use krealloc_array(), and why 
not use kmalloc_array() directly ?


I thought of using kmalloc_array() (got little confused on next write 
cycle), I agree to use kmalloc_array().


Regards,
S.Amarnath



+    if (!new) {
+    ret = -ENOMEM;
+    goto error_free;
+    }
  ret = down_write_killable(>reset_domain->sem);
  if (ret)
  goto error_free;
    swap(adev->reset_dump_reg_list, tmp);
+    swap(adev->reset_dump_reg_value, new);
  adev->num_regs = i;
  up_write(>reset_domain->sem);
  ret = size;
    error_free:
  kfree(tmp);
+    kfree(new);
  return ret;
  }
  diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 4daa0e893965..963c897a76e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4720,15 +4720,14 @@ int amdgpu_device_pre_asic_reset(struct 
amdgpu_device *adev,

    static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
  {
-    uint32_t reg_value;
  int i;
lockdep_assert_held(>reset_domain->sem);
-    dump_stack();
This should be a part of different patch, where you can give some 
background on why are we removing this.



Will make different patch for this.

You missed this comment.
- Shashank


    for (i = 0; i < adev->num_regs; i++) {
-    reg_value = RREG32(adev->reset_dump_reg_list[i]);
- trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+    adev->reset_dump_reg_value[i] = 
RREG32(adev->reset_dump_reg_list[i]);

+ trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
+    adev->reset_dump_reg_value[i]);
  }
    return 0;


- Shashank


Re: [PATCH v1 2/2] drm/amdgpu: adding device coredump support

2022-05-24 Thread Somalapuram, Amaranath


On 5/20/2022 7:52 PM, Sharma, Shashank wrote:



On 5/20/2022 3:49 PM, Somalapuram Amaranath wrote:

Added device coredump information:
- Kernel version
- Module
- Time
- VRAM status
- Guilty process name and PID
- GPU register dumps

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  3 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 59 ++
  2 files changed, 62 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index c79d9992b113..f28d9c563f74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1044,6 +1044,9 @@ struct amdgpu_device {
  uint32_t    *reset_dump_reg_list;
  uint32_t    *reset_dump_reg_value;
  int num_regs;
+    struct amdgpu_task_info reset_context_task_info;
+    bool    reset_context_vram_lost;


How about drop the 'context' from name and just reset_task_info and 
reset_vram_lost ?

OK.



+    struct timespec64 reset_time;
    struct amdgpu_reset_domain    *reset_domain;
  diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 963c897a76e6..f9b710e741a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -32,6 +32,8 @@
  #include 
  #include 
  #include 
+#include 
+#include 
    #include 
  #include 
@@ -4733,6 +4735,55 @@ static int amdgpu_reset_reg_dumps(struct 
amdgpu_device *adev)

  return 0;
  }
  +#ifdef CONFIG_DEV_COREDUMP
+static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
+    size_t count, void *data, size_t datalen)
+{
+    struct drm_printer p;
+    struct amdgpu_device *adev = data;
+    struct drm_print_iterator iter;
+    int i;
+


A NULL check for 'buffer' here could prevent a segfault later.


Agreed.

+    iter.data = buffer;
+    iter.offset = 0;
+    iter.start = offset;
+    iter.remain = count;
+
+    p = drm_coredump_printer();
+
+    drm_printf(, " AMDGPU Device Coredump \n");
+    drm_printf(, "kernel: " UTS_RELEASE "\n");
+    drm_printf(, "module: " KBUILD_MODNAME "\n");
+    drm_printf(, "time: %lld.%09ld\n", adev->reset_time.tv_sec, 
adev->reset_time.tv_nsec);

+    if (adev->reset_context_task_info.pid)
+    drm_printf(, "process_name: %s PID: %d\n",
+ adev->reset_context_task_info.process_name,
+ adev->reset_context_task_info.pid);

Please fix the alignment of print variables.


I will cross check this.

+
+    if (adev->reset_context_vram_lost)
+    drm_printf(, "VRAM is lost due to GPU reset!\n");
+    if (adev->num_regs) {
+    drm_printf(, "AMDGPU register dumps:\nOffset: Value:\n");
+
+    for (i = 0; i < adev->num_regs; i++)
+    drm_printf(, "0x%08x: 0x%08x\n",
+    adev->reset_dump_reg_list[i],
+    adev->reset_dump_reg_value[i]);
+    }
+
+    return count - iter.remain;
+}
+
+static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
+{
+    struct drm_device *dev = adev_to_drm(adev);
+
+    ktime_get_ts64(>reset_time);
+    dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+    amdgpu_devcoredump_read, NULL);
instead of registering NULL as free function, I would prefer you to 
have a dummy no_op free function registered, which we can consume if 
something changes.

you mean something like this (function without any code):
staticvoidamdgpu_devcoredump_free(void*data)
{
}

+}
+#endif
+
  int amdgpu_do_asic_reset(struct list_head *device_list_handle,
   struct amdgpu_reset_context *reset_context)
  {
@@ -4817,6 +4868,14 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,

  goto out;
    vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+#ifdef CONFIG_DEV_COREDUMP
+    tmp_adev->reset_context_vram_lost = vram_lost;
+    tmp_adev->reset_context_task_info.pid = 0;

why is the PID hardcoded to 0 ?
in case of reset context reset_context->job->vm  is null (possibility 
that reset can be non VM related).
If we don't set tmp_adev->reset_context_task_info.pid = 0, it will show 
previous reset valid PID.



Regards,
S.Amarnath

+    if (reset_context->job && reset_context->job->vm)
+    tmp_adev->reset_context_task_info =
+ reset_context->job->vm->task_info;
+    amdgpu_reset_capture_coredumpm(tmp_adev);
+#endif
  if (vram_lost) {
  DRM_INFO("VRAM is lost due to GPU reset!\n");


- Shashank
 amdgpu_inc_vram_lost(tmp_adev);

Re: [PATCH v1 1/2] drm/amdgpu: save the reset dump register value for devcoredump

2022-05-24 Thread Somalapuram, Amaranath



On 5/20/2022 7:36 PM, Sharma, Shashank wrote:

Hey Amar,

On 5/20/2022 3:49 PM, Somalapuram Amaranath wrote:
Allocate memory for register value and use the same values for 
devcoredump.

Remove dump_stack reset register dumps.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 9 -
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  | 7 +++
  3 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index 76df583663c7..c79d9992b113 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1042,6 +1042,7 @@ struct amdgpu_device {
    /* reset dump register */
  uint32_t    *reset_dump_reg_list;
+    uint32_t    *reset_dump_reg_value;
  int num_regs;
    struct amdgpu_reset_domain    *reset_domain;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index eedb12f6b8a3..942fdbd316f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1683,7 +1683,7 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,

  {
  struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

  char reg_offset[11];
-    uint32_t *new, *tmp = NULL;
+    uint32_t *new, *tmp = NULL, *tmp_value = NULL;
  int ret, i = 0, len = 0;
    do {
@@ -1709,17 +1709,24 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,

  i++;
  } while (len < size);
  +    new = krealloc_array(tmp_value, i, sizeof(uint32_t), GFP_KERNEL);


tmp_value is initialized to NULL, which means krealloc_array() will 
behave like kmalloc_array(), is there any particular reason we are 
adding this variable at all just to use krealloc_array(), and why not 
use kmalloc_array() directly ?


I thought of using kmalloc_array() (got little confused on next write 
cycle), I agree to use kmalloc_array().


Regards,
S.Amarnath



+    if (!new) {
+    ret = -ENOMEM;
+    goto error_free;
+    }
  ret = down_write_killable(>reset_domain->sem);
  if (ret)
  goto error_free;
    swap(adev->reset_dump_reg_list, tmp);
+    swap(adev->reset_dump_reg_value, new);
  adev->num_regs = i;
  up_write(>reset_domain->sem);
  ret = size;
    error_free:
  kfree(tmp);
+    kfree(new);
  return ret;
  }
  diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 4daa0e893965..963c897a76e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4720,15 +4720,14 @@ int amdgpu_device_pre_asic_reset(struct 
amdgpu_device *adev,

    static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
  {
-    uint32_t reg_value;
  int i;
    lockdep_assert_held(>reset_domain->sem);
-    dump_stack();
This should be a part of different patch, where you can give some 
background on why are we removing this.

    for (i = 0; i < adev->num_regs; i++) {
-    reg_value = RREG32(adev->reset_dump_reg_list[i]);
- trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], reg_value);
+    adev->reset_dump_reg_value[i] = 
RREG32(adev->reset_dump_reg_list[i]);

+ trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
+    adev->reset_dump_reg_value[i]);
  }
    return 0;


- Shashank


[PATCH v1 2/2] drm/amdgpu: adding device coredump support

2022-05-20 Thread Somalapuram Amaranath
Added device coredump information:
- Kernel version
- Module
- Time
- VRAM status
- Guilty process name and PID
- GPU register dumps

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  3 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 59 ++
 2 files changed, 62 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c79d9992b113..f28d9c563f74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1044,6 +1044,9 @@ struct amdgpu_device {
uint32_t*reset_dump_reg_list;
uint32_t*reset_dump_reg_value;
int num_regs;
+   struct amdgpu_task_info reset_context_task_info;
+   boolreset_context_vram_lost;
+   struct timespec64   reset_time;
 
struct amdgpu_reset_domain  *reset_domain;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 963c897a76e6..f9b710e741a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -32,6 +32,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include 
 #include 
@@ -4733,6 +4735,55 @@ static int amdgpu_reset_reg_dumps(struct amdgpu_device 
*adev)
return 0;
 }
 
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
+   size_t count, void *data, size_t datalen)
+{
+   struct drm_printer p;
+   struct amdgpu_device *adev = data;
+   struct drm_print_iterator iter;
+   int i;
+
+   iter.data = buffer;
+   iter.offset = 0;
+   iter.start = offset;
+   iter.remain = count;
+
+   p = drm_coredump_printer();
+
+   drm_printf(, " AMDGPU Device Coredump \n");
+   drm_printf(, "kernel: " UTS_RELEASE "\n");
+   drm_printf(, "module: " KBUILD_MODNAME "\n");
+   drm_printf(, "time: %lld.%09ld\n", adev->reset_time.tv_sec, 
adev->reset_time.tv_nsec);
+   if (adev->reset_context_task_info.pid)
+   drm_printf(, "process_name: %s PID: %d\n",
+   
adev->reset_context_task_info.process_name,
+   
adev->reset_context_task_info.pid);
+
+   if (adev->reset_context_vram_lost)
+   drm_printf(, "VRAM is lost due to GPU reset!\n");
+   if (adev->num_regs) {
+   drm_printf(, "AMDGPU register dumps:\nOffset: Value:\n");
+
+   for (i = 0; i < adev->num_regs; i++)
+   drm_printf(, "0x%08x: 0x%08x\n",
+   adev->reset_dump_reg_list[i],
+   adev->reset_dump_reg_value[i]);
+   }
+
+   return count - iter.remain;
+}
+
+static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
+{
+   struct drm_device *dev = adev_to_drm(adev);
+
+   ktime_get_ts64(>reset_time);
+   dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+   amdgpu_devcoredump_read, NULL);
+}
+#endif
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4817,6 +4868,14 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
goto out;
 
vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);
+#ifdef CONFIG_DEV_COREDUMP
+   tmp_adev->reset_context_vram_lost = vram_lost;
+   tmp_adev->reset_context_task_info.pid = 0;
+   if (reset_context->job && 
reset_context->job->vm)
+   tmp_adev->reset_context_task_info =
+   
reset_context->job->vm->task_info;
+   amdgpu_reset_capture_coredumpm(tmp_adev);
+#endif
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU 
reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
-- 
2.32.0



[PATCH v1 1/2] drm/amdgpu: save the reset dump register value for devcoredump

2022-05-20 Thread Somalapuram Amaranath
Allocate memory for register value and use the same values for devcoredump.
Remove dump_stack reset register dumps.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 9 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  | 7 +++
 3 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 76df583663c7..c79d9992b113 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1042,6 +1042,7 @@ struct amdgpu_device {
 
/* reset dump register */
uint32_t*reset_dump_reg_list;
+   uint32_t*reset_dump_reg_value;
int num_regs;
 
struct amdgpu_reset_domain  *reset_domain;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index eedb12f6b8a3..942fdbd316f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1683,7 +1683,7 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,
 {
struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
char reg_offset[11];
-   uint32_t *new, *tmp = NULL;
+   uint32_t *new, *tmp = NULL, *tmp_value = NULL;
int ret, i = 0, len = 0;
 
do {
@@ -1709,17 +1709,24 @@ static ssize_t 
amdgpu_reset_dump_register_list_write(struct file *f,
i++;
} while (len < size);
 
+   new = krealloc_array(tmp_value, i, sizeof(uint32_t), GFP_KERNEL);
+   if (!new) {
+   ret = -ENOMEM;
+   goto error_free;
+   }
ret = down_write_killable(>reset_domain->sem);
if (ret)
goto error_free;
 
swap(adev->reset_dump_reg_list, tmp);
+   swap(adev->reset_dump_reg_value, new);
adev->num_regs = i;
up_write(>reset_domain->sem);
ret = size;
 
 error_free:
kfree(tmp);
+   kfree(new);
return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4daa0e893965..963c897a76e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4720,15 +4720,14 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
 
 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
 {
-   uint32_t reg_value;
int i;
 
lockdep_assert_held(>reset_domain->sem);
-   dump_stack();
 
for (i = 0; i < adev->num_regs; i++) {
-   reg_value = RREG32(adev->reset_dump_reg_list[i]);
-   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   adev->reset_dump_reg_value[i] = 
RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
+   adev->reset_dump_reg_value[i]);
}
 
return 0;
-- 
2.32.0



[PATCH] drm/amdgpu: schedule GPU reset event work function

2022-03-10 Thread Somalapuram Amaranath
Schedule work function with valid PID, process name and
vram lost status during a GPU reset/recovery.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 13 +
 1 file changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 02843479e1ef..7ca4f52e393f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4805,6 +4805,19 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
goto out;
 
vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);
+   if (reset_context->job && 
reset_context->job->vm) {
+   tmp_adev->reset_event_info.pid =
+   
reset_context->job->vm->task_info.pid;
+   
memset(tmp_adev->reset_event_info.pname, 0, TASK_COMM_LEN);
+   strcpy(tmp_adev->reset_event_info.pname,
+   
reset_context->job->vm->task_info.process_name);
+   } else {
+   tmp_adev->reset_event_info.pid = 0;
+   
memset(tmp_adev->reset_event_info.pname, 0, TASK_COMM_LEN);
+   }
+
+   tmp_adev->reset_event_info.flags = vram_lost;
+   schedule_work(_adev->gpu_reset_event_work);
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU 
reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
-- 
2.32.0



Re: [PATCH 2/2] drm/amdgpu: add work function for GPU reset event

2022-03-08 Thread Somalapuram, Amaranath


On 3/8/2022 10:00 PM, Sharma, Shashank wrote:

Hello Andrey

On 3/8/2022 5:26 PM, Andrey Grodzovsky wrote:


On 2022-03-07 11:26, Shashank Sharma wrote:

From: Shashank Sharma 

This patch adds a work function, which will get scheduled
in event of a GPU reset, and will send a uevent to user with
some reset context infomration, like a PID and some flags.



Where is the actual scheduling of the work function ? Shouldn't
there be a patch for that too ?



Yes, Amar is working on that patch, on top of these patches. They 
should be out soon. I thought it was a good idea to get quick feedback 
on the basic patches before we build something on top of it.



schedule_work() will be called in the function amdgpu_do_asic_reset ()

after getting vram_lost info:

vram_lost = amdgpu_device_check_vram_lost(tmp_adev);

update  amdgpu_reset_event_ctx and call schedule_work()

 * vram_lost
 * reset_context->job->vm->task_info.process_name
 * reset_context->job->vm->task_info.pid

Regards,
S.Amarnath

- Shashank


Andrey




The userspace can do some recovery and post-processing work
based on this event.

V2:
- Changed the name of the work to gpu_reset_event_work
   (Christian)
- Added a structure to accommodate some additional information
   (like a PID and some flags)

Cc: Alexander Deucher 
Cc: Christian Koenig 
Signed-off-by: Shashank Sharma 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  7 +++
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 +++
  2 files changed, 26 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index d8b854fcbffa..7df219fe363f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -813,6 +813,11 @@ struct amd_powerplay {
  #define AMDGPU_RESET_MAGIC_NUM 64
  #define AMDGPU_MAX_DF_PERFMONS 4
  #define AMDGPU_PRODUCT_NAME_LEN 64
+struct amdgpu_reset_event_ctx {
+    uint64_t pid;
+    uint32_t flags;
+};
+
  struct amdgpu_device {
  struct device    *dev;
  struct pci_dev    *pdev;
@@ -1063,6 +1068,7 @@ struct amdgpu_device {
  int asic_reset_res;
  struct work_struct    xgmi_reset_work;
+    struct work_struct    gpu_reset_event_work;
  struct list_head    reset_list;
  long    gfx_timeout;
@@ -1097,6 +1103,7 @@ struct amdgpu_device {
  pci_channel_state_t    pci_channel_state;
  struct amdgpu_reset_control *reset_cntl;
+    struct amdgpu_reset_event_ctx   reset_event_ctx;
  uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
  bool    ram_is_direct_mapped;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index ed077de426d9..c43d099da06d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -73,6 +73,7 @@
  #include 
  #include 
+#include 
  MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -3277,6 +3278,23 @@ bool amdgpu_device_has_dc_support(struct 
amdgpu_device *adev)

  return amdgpu_device_asic_has_dc_support(adev->asic_type);
  }
+static void amdgpu_device_reset_event_func(struct work_struct *__work)
+{
+    struct amdgpu_device *adev = container_of(__work, struct 
amdgpu_device,

+  gpu_reset_event_work);
+    struct amdgpu_reset_event_ctx *event_ctx = >reset_event_ctx;
+
+    /*
+ * A GPU reset has happened, indicate the userspace and pass the
+ * following information:
+ *    - pid of the process involved,
+ *    - if the VRAM is valid or not,
+ *    - indicate that userspace may want to collect the ftrace 
event

+ * data from the trace event.
+ */
+    drm_sysfs_reset_event(>ddev, event_ctx->pid, 
event_ctx->flags);

+}
+
  static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
  {
  struct amdgpu_device *adev =
@@ -3525,6 +3543,7 @@ int amdgpu_device_init(struct amdgpu_device 
*adev,

    amdgpu_device_delay_enable_gfx_off);
  INIT_WORK(>xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+    INIT_WORK(>gpu_reset_event_work, 
amdgpu_device_reset_event_func);

  adev->gfx.gfx_off_req_count = 1;
  adev->pm.ac_power = power_supply_is_system_supplied() > 0;

Re: [PATCH 1/2] drm: Add GPU reset sysfs event

2022-03-08 Thread Somalapuram, Amaranath



On 3/8/2022 10:27 PM, Sharma, Shashank wrote:



On 3/8/2022 5:55 PM, Andrey Grodzovsky wrote:
You can read on their side here - 
https://www.phoronix.com/scan.php?page=news_item=AMD-STB-Linux-5.17 
and see their patch. THey don't have as clean
interface as we do to retrieve the buffer and currently it's 
hard-coded for debugfs dump but it looks like pretty straight forward 
to expose their buffer to external

client like amdgpu.


Customer requirement is to get reset notification for there daemon with 
other info (like PID process name vram status).


Regards,
S.Amarnath

Noted, thanks for the pointer.
- Shashank


Andrey

On 2022-03-08 11:46, Sharma, Shashank wrote:
I have a very limited understanding of PMC driver and its 
interfaces, so I would just go ahead and rely on Andrey's 
judgement/recommendation on this :)


- Shashank

On 3/8/2022 5:39 PM, Andrey Grodzovsky wrote:
As long as PMC driver provides clear interface to retrieve the info 
there should be no issue to call either amdgpu interface or PMC 
interface using IS_APU (or something alike in the code)

We probably should add a wrapper function around this logic in amdgpu.

Andrey

On 2022-03-08 11:36, Lazar, Lijo wrote:


[AMD Official Use Only]


+Mario

I guess that means the functionality needs to be present in amdgpu 
for APUs also. Presently, this is taken care by PMC driver for APUs.


Thanks,
Lijo
 

*From:* amd-gfx  on behalf 
of Andrey Grodzovsky 

*Sent:* Tuesday, March 8, 2022 9:55:03 PM
*To:* Shashank Sharma ; 
amd-gfx@lists.freedesktop.org 
*Cc:* Deucher, Alexander ; Somalapuram, 
Amaranath ; Koenig, Christian 
; Sharma, Shashank 


*Subject:* Re: [PATCH 1/2] drm: Add GPU reset sysfs event

On 2022-03-07 11:26, Shashank Sharma wrote:
> From: Shashank Sharma 
>
> This patch adds a new sysfs event, which will indicate
> the userland about a GPU reset, and can also provide
> some information like:
> - which PID was involved in the GPU reset
> - what was the GPU status (using flags)
>
> This patch also introduces the first flag of the flags
> bitmap, which can be appended as and when required.


I am reminding again about another important piece of info which 
you can add

here and that is Smart Trace Buffer dump [1]. The buffer size is HW
specific but
from what I see there is no problem to just amend it as part of 
envp[]

initialization.
bellow.

The interface to get the buffer is smu_stb_collect_info and usage 
can be

seen from
frebugfs interface in smu_stb_debugfs_open

[1] - 
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fwww.spinics.net%2Flists%2Famd-gfx%2Fmsg70751.htmldata=04%7C01%7Clijo.lazar%40amd.com%7C80bc3f07e2d0441d44a108da012036dc%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637823535167679490%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000sdata=53l7KlTf%2BICKkZkLVwFh6nRTjkAh%2FDpOat5DRoyKIx0%3Dreserved=0 
<https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fwww.spinics.net%2Flists%2Famd-gfx%2Fmsg70751.htmldata=04%7C01%7Clijo.lazar%40amd.com%7C80bc3f07e2d0441d44a108da012036dc%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637823535167679490%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000sdata=53l7KlTf%2BICKkZkLVwFh6nRTjkAh%2FDpOat5DRoyKIx0%3Dreserved=0> 



Andrey


>
> Cc: Alexandar Deucher 
> Cc: Christian Koenig 
> Signed-off-by: Shashank Sharma 
> ---
>   drivers/gpu/drm/drm_sysfs.c | 24 
>   include/drm/drm_sysfs.h |  3 +++
>   2 files changed, 27 insertions(+)
>
> diff --git a/drivers/gpu/drm/drm_sysfs.c 
b/drivers/gpu/drm/drm_sysfs.c

> index 430e00b16eec..52a015161431 100644
> --- a/drivers/gpu/drm/drm_sysfs.c
> +++ b/drivers/gpu/drm/drm_sysfs.c
> @@ -409,6 +409,30 @@ void drm_sysfs_hotplug_event(struct 
drm_device *dev)

>   }
>   EXPORT_SYMBOL(drm_sysfs_hotplug_event);
>
> +/**
> + * drm_sysfs_reset_event - generate a DRM uevent to indicate 
GPU reset

> + * @dev: DRM device
> + * @pid: The process ID involve with the reset
> + * @flags: Any other information about the GPU status
> + *
> + * Send a uevent for the DRM device specified by @dev. This 
indicates
> + * user that a GPU reset has occurred, so that the interested 
client

> + * can take any recovery or profiling measure, when required.
> + */
> +void drm_sysfs_reset_event(struct drm_device *dev, uint64_t 
pid, uint32_t flags)

> +{
> + unsigned char pid_str[21], flags_str[15];
> + unsigned char reset_str[] = "RESET=1";
> + char *envp[] = { reset_str, pid_str, flags_str, NULL };
> +
> + DRM_DEBUG("generating reset event\n");
> +
> + snprintf(pid_str, ARRAY_SIZE(pid_str), "PID=%lu", pid);
> + 

Re: [PATCH 1/2] drm: Add GPU reset sysfs event

2022-03-08 Thread Somalapuram, Amaranath



On 3/8/2022 5:26 PM, Sharma, Shashank wrote:



On 3/8/2022 11:32 AM, Christian König wrote:

Am 08.03.22 um 10:31 schrieb Sharma, Shashank:



On 3/8/2022 8:06 AM, Christian König wrote:

Am 07.03.22 um 17:26 schrieb Shashank Sharma:

From: Shashank Sharma 

This patch adds a new sysfs event, which will indicate
the userland about a GPU reset, and can also provide
some information like:
- which PID was involved in the GPU reset
- what was the GPU status (using flags)

This patch also introduces the first flag of the flags
bitmap, which can be appended as and when required.


Make sure to CC the dri-devel mailing list when reviewing this.

Got it,

I was also curious if we want to move the reset_ctx structure itself 
to DRM layer, like

drm_reset_event_ctx {
u32 pid;
u32 flags;
char process_name[64];
};


I was entertaining that thought as well.

But if we do this I would go even a step further and also move the 
reset work item into the DRM layer as well.


You might also look like into migrating the exiting i915 code which 
uses udev to signal GPU resets to this function as well.



Hi Christian,

Can we access adev in common drm (even if we can access adev it will not 
be common code)


move work function to drm need to be protected(i.e reset_domain->sem), 
adding something like reset_sem to drm_device?


Regards,

S.Amarnath


Regards,
Christian.


That seems like a good idea, let me quickly dive into i915 and check 
this out.


Shashank




and then:
void drm_sysfs_reset_event(struct drm_device *dev, 
drm_reset_event_ctx *ctx);






Cc: Alexandar Deucher 
Cc: Christian Koenig 
Signed-off-by: Shashank Sharma 
---
  drivers/gpu/drm/drm_sysfs.c | 24 
  include/drm/drm_sysfs.h |  3 +++
  2 files changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/drm_sysfs.c 
b/drivers/gpu/drm/drm_sysfs.c

index 430e00b16eec..52a015161431 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -409,6 +409,30 @@ void drm_sysfs_hotplug_event(struct 
drm_device *dev)

  }
  EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+/**
+ * drm_sysfs_reset_event - generate a DRM uevent to indicate GPU 
reset

+ * @dev: DRM device
+ * @pid: The process ID involve with the reset
+ * @flags: Any other information about the GPU status
+ *
+ * Send a uevent for the DRM device specified by @dev. This 
indicates

+ * user that a GPU reset has occurred, so that the interested client
+ * can take any recovery or profiling measure, when required.
+ */
+void drm_sysfs_reset_event(struct drm_device *dev, uint64_t pid, 
uint32_t flags)


The PID is usually only 32bit, but even better would be to use pid_t.


+{
+    unsigned char pid_str[21], flags_str[15];
+    unsigned char reset_str[] = "RESET=1";
+    char *envp[] = { reset_str, pid_str, flags_str, NULL };
+
+    DRM_DEBUG("generating reset event\n");
+
+    snprintf(pid_str, ARRAY_SIZE(pid_str), "PID=%lu", pid);
+    snprintf(flags_str, ARRAY_SIZE(flags_str), "FLAGS=%u", flags);
+ kobject_uevent_env(>primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_sysfs_reset_event);
+
  /**
   * drm_sysfs_connector_hotplug_event - generate a DRM uevent for 
any connector

   * change
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index 6273cac44e47..63f00fe8054c 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -2,6 +2,8 @@
  #ifndef _DRM_SYSFS_H_
  #define _DRM_SYSFS_H_
+#define DRM_GPU_RESET_FLAG_VRAM_VALID (1 << 0)


Probably better to define that the other way around, e.g. 
DRM_GPU_RESET_FLAG_VRAM_LOST.


Apart from that looks good to me.


Got it, noted.
- Shashank


Christian.


+
  struct drm_device;
  struct device;
  struct drm_connector;
@@ -11,6 +13,7 @@ int drm_class_device_register(struct device *dev);
  void drm_class_device_unregister(struct device *dev);
  void drm_sysfs_hotplug_event(struct drm_device *dev);
+void drm_sysfs_reset_event(struct drm_device *dev, uint64_t pid, 
uint32_t reset_flags);
  void drm_sysfs_connector_hotplug_event(struct drm_connector 
*connector);
  void drm_sysfs_connector_status_event(struct drm_connector 
*connector,

    struct drm_property *property);






Re: [PATCH 1/2] drm: Add GPU reset sysfs event

2022-03-07 Thread Somalapuram, Amaranath


On 3/7/2022 9:56 PM, Shashank Sharma wrote:

From: Shashank Sharma

This patch adds a new sysfs event, which will indicate
the userland about a GPU reset, and can also provide
some information like:
- which PID was involved in the GPU reset
- what was the GPU status (using flags)

This patch also introduces the first flag of the flags
bitmap, which can be appended as and when required.

Cc: Alexandar Deucher
Cc: Christian Koenig
Signed-off-by: Shashank Sharma
---
  drivers/gpu/drm/drm_sysfs.c | 24 
  include/drm/drm_sysfs.h |  3 +++
  2 files changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 430e00b16eec..52a015161431 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -409,6 +409,30 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
  }
  EXPORT_SYMBOL(drm_sysfs_hotplug_event);
  
+/**

+ * drm_sysfs_reset_event - generate a DRM uevent to indicate GPU reset
+ * @dev: DRM device
+ * @pid: The process ID involve with the reset
+ * @flags: Any other information about the GPU status
+ *
+ * Send a uevent for the DRM device specified by @dev. This indicates
+ * user that a GPU reset has occurred, so that the interested client
+ * can take any recovery or profiling measure, when required.
+ */
+void drm_sysfs_reset_event(struct drm_device *dev, uint64_t pid, uint32_t 
flags)

we need process name
char  process_name[TASK_COMM_LEN];

+{
+   unsigned char pid_str[21], flags_str[15];
+   unsigned char reset_str[] = "RESET=1";
+   char *envp[] = { reset_str, pid_str, flags_str, NULL };
+
+   DRM_DEBUG("generating reset event\n");
+
+   snprintf(pid_str, ARRAY_SIZE(pid_str), "PID=%lu", pid);
+   snprintf(flags_str, ARRAY_SIZE(flags_str), "FLAGS=%u", flags);
+   kobject_uevent_env(>primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_sysfs_reset_event);
+
  /**
   * drm_sysfs_connector_hotplug_event - generate a DRM uevent for any connector
   * change
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index 6273cac44e47..63f00fe8054c 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -2,6 +2,8 @@
  #ifndef _DRM_SYSFS_H_
  #define _DRM_SYSFS_H_
  
+#define DRM_GPU_RESET_FLAG_VRAM_VALID (1 << 0)

+
  struct drm_device;
  struct device;
  struct drm_connector;
@@ -11,6 +13,7 @@ int drm_class_device_register(struct device *dev);
  void drm_class_device_unregister(struct device *dev);
  
  void drm_sysfs_hotplug_event(struct drm_device *dev);

+void drm_sysfs_reset_event(struct drm_device *dev, uint64_t pid, uint32_t 
reset_flags);
  void drm_sysfs_connector_hotplug_event(struct drm_connector *connector);
  void drm_sysfs_connector_status_event(struct drm_connector *connector,
  struct drm_property *property);

Re: [PATCH 2/2] drm/amdgpu: add work function for GPU reset event

2022-03-07 Thread Somalapuram, Amaranath



On 3/7/2022 9:56 PM, Shashank Sharma wrote:

From: Shashank Sharma 

This patch adds a work function, which will get scheduled
in event of a GPU reset, and will send a uevent to user with
some reset context infomration, like a PID and some flags.

The userspace can do some recovery and post-processing work
based on this event.

V2:
- Changed the name of the work to gpu_reset_event_work
   (Christian)
- Added a structure to accommodate some additional information
   (like a PID and some flags)

Cc: Alexander Deucher 
Cc: Christian Koenig 
Signed-off-by: Shashank Sharma 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  7 +++
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 +++
  2 files changed, 26 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d8b854fcbffa..7df219fe363f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -813,6 +813,11 @@ struct amd_powerplay {
  #define AMDGPU_RESET_MAGIC_NUM 64
  #define AMDGPU_MAX_DF_PERFMONS 4
  #define AMDGPU_PRODUCT_NAME_LEN 64
+struct amdgpu_reset_event_ctx {
+   uint64_t pid;
+   uint32_t flags;
+};
+
  struct amdgpu_device {
struct device   *dev;
struct pci_dev  *pdev;
@@ -1063,6 +1068,7 @@ struct amdgpu_device {
  
  	int asic_reset_res;

struct work_struct  xgmi_reset_work;
+   struct work_struct  gpu_reset_event_work;
struct list_headreset_list;
  
  	longgfx_timeout;

@@ -1097,6 +1103,7 @@ struct amdgpu_device {
pci_channel_state_t pci_channel_state;
  
  	struct amdgpu_reset_control *reset_cntl;

+   struct amdgpu_reset_event_ctx   reset_event_ctx;
uint32_t
ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
  
  	boolram_is_direct_mapped;

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ed077de426d9..c43d099da06d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -73,6 +73,7 @@
  #include 
  
  #include 

+#include 
  
  MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");

  MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -3277,6 +3278,23 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device 
*adev)
return amdgpu_device_asic_has_dc_support(adev->asic_type);
  }
  
+static void amdgpu_device_reset_event_func(struct work_struct *__work)

+{
+   struct amdgpu_device *adev = container_of(__work, struct amdgpu_device,
+ gpu_reset_event_work);


I am trying same thing but adev context is lost.

schedule_work() in amdgpu_do_asic_reset after getting/reading vram_lost 
= amdgpu_device_check_vram_lost(tmp_adev);


Regards,

S.Amarnath


+   struct amdgpu_reset_event_ctx *event_ctx = >reset_event_ctx;
+
+   /*
+* A GPU reset has happened, indicate the userspace and pass the
+* following information:
+*  - pid of the process involved,
+*  - if the VRAM is valid or not,
+*  - indicate that userspace may want to collect the ftrace event
+* data from the trace event.
+*/
+   drm_sysfs_reset_event(>ddev, event_ctx->pid, event_ctx->flags);
+}
+
  static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
  {
struct amdgpu_device *adev =
@@ -3525,6 +3543,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
  amdgpu_device_delay_enable_gfx_off);
  
  	INIT_WORK(>xgmi_reset_work, amdgpu_device_xgmi_reset_func);

+   INIT_WORK(>gpu_reset_event_work, amdgpu_device_reset_event_func);
  
  	adev->gfx.gfx_off_req_count = 1;

adev->pm.ac_power = power_supply_is_system_supplied() > 0;


[PATCH v13 2/2] drm/amdgpu: add reset register dump trace on GPU

2022-02-22 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 33 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..7c48fd716adb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,22 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   uint32_t reg_value;
+   int i;
+
+   lockdep_assert_held(>reset_sem);
+   dump_stack();
+
+   for (i = 0; i < adev->num_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4544,6 +4560,7 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



[PATCH v13 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-22 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 82 +
 2 files changed, 86 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int num_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..b91f21cec269 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,86 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[12];
+   int i, ret, len = 0;
+
+   if (*pos)
+   return 0;
+
+   memset(reg_offset, 0, 12);
+   ret = down_read_killable(>reset_sem);
+   if (ret)
+   return ret;
+
+   for (i = 0; i < adev->num_regs; i++) {
+   sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]);
+   up_read(>reset_sem);
+   if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
+   return -EFAULT;
+
+   len += strlen(reg_offset);
+   ret = down_read_killable(>reset_sem);
+   if (ret)
+   return ret;
+   }
+
+   up_read(>reset_sem);
+   *pos += len;
+
+   return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   uint32_t *tmp;
+   int ret, i = 0, len = 0;
+
+   do {
+   memset(reg_offset, 0, 11);
+   if (copy_from_user(reg_offset, buf + len,
+   min(10, ((int)size-len {
+   ret = -EFAULT;
+   goto error_free;
+   }
+
+   tmp = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
+   if (sscanf(reg_offset, "%X %n", [i], ) != 1) {
+   return -EINVAL;
+   goto error_free;
+   }
+
+   len += ret;
+   i++;
+   } while (len < size);
+
+   ret = down_write_killable(>reset_sem);
+   if (ret)
+   goto error_free;
+
+   swap(adev->reset_dump_reg_list, tmp);
+   adev->num_regs = i;
+   up_write(>reset_sem);
+   ret = size;
+
+error_free:
+   kfree(tmp);
+   return ret;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1752,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



Re: [PATCH v12 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-22 Thread Somalapuram, Amaranath



On 2/22/2022 9:08 PM, Christian König wrote:

Am 22.02.22 um 16:34 schrieb Somalapuram Amaranath:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 83 +
  2 files changed, 87 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
    struct amdgpu_reset_control *reset_cntl;
  uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+    /* reset dump register */
+    uint32_t    *reset_dump_reg_list;
+    int num_regs;
  };
    static inline struct amdgpu_device *drm_to_adev(struct drm_device 
*ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index 164d6a9e9fbb..0cc80aa1b5ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,87 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
  DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
  amdgpu_debugfs_sclk_set, "%llu\n");
  +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+    char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char reg_offset[11];
+    int i, ret, len = 0;
+
+    if (*pos)
+    return 0;
+
+    ret = down_read_killable(>reset_sem);
+    if (ret)
+    return ret;
+
+    for (i = 0; i < adev->num_regs; i++) {
+    sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]);
+    up_read(>reset_sem);
+    ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+    if (ret)
+    return -EFAULT;
+
+    len += strlen(reg_offset);
+    ret = down_read_killable(>reset_sem);
+    if (ret)
+    return ret;
+    }
+
+    up_read(>reset_sem);
+    if (ret)
+    return ret;


That if and return now looks superfluous.


+
+    *pos += len;
+
+    return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+    const char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char reg_offset[12];
+    uint32_t *tmp;
+    int ret, i = 0, len = 0;
+
+    do {
+    memset(reg_offset, 0, 12);
+    if (copy_from_user(reg_offset, buf + len,
+    min(11, ((int)size-len {
+    ret = -EFAULT;
+    goto error_free;
+    }
+
+    tmp = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
+    if (sscanf(reg_offset, "%X %n", [i], ) != 1)


Does this also work when we write the registers separated with 
newlines into the debugfs file?

Yes. tested with with both.


Regards,
Christian.


+    goto error_free;
+
+    len += ret;
+    i++;
+    } while (len < size);
+
+    ret = down_write_killable(>reset_sem);
+    if (ret)
+    goto error_free;
+
+    swap(adev->reset_dump_reg_list, tmp);
+    adev->num_regs = i;
+    up_write(>reset_sem);
+    ret = size;
+
+error_free:
+    kfree(tmp);
+    return ret;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+    .owner = THIS_MODULE,
+    .read = amdgpu_reset_dump_register_list_read,
+    .write = amdgpu_reset_dump_register_list_write,
+    .llseek = default_llseek
+};
+
  int amdgpu_debugfs_init(struct amdgpu_device *adev)
  {
  struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1753,8 @@ int amdgpu_debugfs_init(struct amdgpu_device 
*adev)

  _debugfs_test_ib_fops);
  debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
  _debugfs_vm_info_fops);
+    debugfs_create_file("amdgpu_reset_dump_register_list", 0644, 
root, adev,

+    _reset_dump_register_list);
    adev->debugfs_vbios_blob.data = adev->bios;
  adev->debugfs_vbios_blob.size = adev->bios_size;




[PATCH v12 2/2] drm/amdgpu: add reset register dump trace on GPU

2022-02-22 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 33 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..7c48fd716adb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,22 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   uint32_t reg_value;
+   int i;
+
+   lockdep_assert_held(>reset_sem);
+   dump_stack();
+
+   for (i = 0; i < adev->num_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4544,6 +4560,7 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



[PATCH v12 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-22 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 83 +
 2 files changed, 87 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int num_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..0cc80aa1b5ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,87 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   int i, ret, len = 0;
+
+   if (*pos)
+   return 0;
+
+   ret = down_read_killable(>reset_sem);
+   if (ret)
+   return ret;
+
+   for (i = 0; i < adev->num_regs; i++) {
+   sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]);
+   up_read(>reset_sem);
+   ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+   if (ret)
+   return -EFAULT;
+
+   len += strlen(reg_offset);
+   ret = down_read_killable(>reset_sem);
+   if (ret)
+   return ret;
+   }
+
+   up_read(>reset_sem);
+   if (ret)
+   return ret;
+
+   *pos += len;
+
+   return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[12];
+   uint32_t *tmp;
+   int ret, i = 0, len = 0;
+
+   do {
+   memset(reg_offset, 0, 12);
+   if (copy_from_user(reg_offset, buf + len,
+   min(11, ((int)size-len {
+   ret = -EFAULT;
+   goto error_free;
+   }
+
+   tmp = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
+   if (sscanf(reg_offset, "%X %n", [i], ) != 1)
+   goto error_free;
+
+   len += ret;
+   i++;
+   } while (len < size);
+
+   ret = down_write_killable(>reset_sem);
+   if (ret)
+   goto error_free;
+
+   swap(adev->reset_dump_reg_list, tmp);
+   adev->num_regs = i;
+   up_write(>reset_sem);
+   ret = size;
+
+error_free:
+   kfree(tmp);
+   return ret;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1753,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



[PATCH v11 2/2] drm/amdgpu: add reset register dump trace on GPU

2022-02-22 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 33 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..7c48fd716adb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,22 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   uint32_t reg_value;
+   int i;
+
+   lockdep_assert_held(>reset_sem);
+   dump_stack();
+
+   for (i = 0; i < adev->num_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4544,6 +4560,7 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



[PATCH v11 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-22 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 81 +
 2 files changed, 85 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int num_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..df6d9fb69657 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,85 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   int i, ret, len = 0;
+
+   if (*pos)
+   return 0;
+
+   ret = down_read_killable(>reset_sem);
+   if (ret)
+   return ret;
+
+   for (i = 0; i < adev->num_regs; i++) {
+   sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]);
+   up_read(>reset_sem);
+   ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+   if (ret)
+   return ret;
+
+   len += strlen(reg_offset);
+   ret = down_read_killable(>reset_sem);
+   if (ret)
+   return ret;
+   }
+
+   up_read(>reset_sem);
+   if (ret)
+   return ret;
+
+   *pos += len;
+
+   return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[12];
+   uint32_t *tmp;
+   int ret, i = 0, len = 0;
+
+   do {
+   memset(reg_offset, 0, 12);
+   ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));
+   if (ret)
+   goto error_free;
+
+   tmp = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
+   if (sscanf(reg_offset, "%X %n", [i], ) != 1)
+   goto error_free;
+
+   len += ret;
+   i++;
+   } while (len < size);
+
+   ret = down_write_killable(>reset_sem);
+   if (ret)
+   goto error_free;
+
+   swap(adev->reset_dump_reg_list, tmp);
+   adev->num_regs = i;
+   up_write(>reset_sem);
+   ret = size;
+
+error_free:
+   kfree(tmp);
+   return ret;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1751,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



[PATCH v10 2/2] drm/amdgpu: add reset register dump trace on GPU reset

2022-02-22 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 16 
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 32 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..0eedcd4e2227 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,21 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   uint32_t reg_value;
+   int i;
+
+   lockdep_assert_held(>reset_sem);
+
+   for (i = 0; i < adev->num_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4544,6 +4559,7 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



[PATCH v10 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-22 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 92 +
 2 files changed, 96 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int num_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..733ee54efa34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,96 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   int i, ret, len = 0;
+
+   if (*pos)
+   return 0;
+
+   ret = down_read_killable(>reset_sem);
+   if (ret)
+   return ret;
+
+   for (i = 0; i < adev->num_regs; i++) {
+   sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+   up_read(>reset_sem);
+   ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+   if (ret)
+   goto error;
+
+   len += strlen(reg_offset);
+   ret = down_read_killable(>reset_sem);
+   if (ret)
+   return ret;
+   }
+
+   up_read(>reset_sem);
+   ret = copy_to_user(buf + len, "\n", 1);
+   if (ret)
+   return -EFAULT;
+
+   len++;
+   *pos += len;
+
+   return len;
+error:
+   up_read(>reset_sem);
+   return -EFAULT;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char *reg_offset, *reg, reg_temp[11];
+   uint32_t *tmp;
+   int ret, i = 0, len = 0;
+
+   do {
+   reg_offset = reg_temp;
+   memset(reg_offset, 0, 11);
+   ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));
+   if (ret)
+   goto error_free;
+
+   reg = strsep(_offset, " ");
+   tmp = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
+   ret = kstrtouint(reg, 16, [i]);
+   if (ret)
+   goto error_free;
+
+   len += strlen(reg) + 1;
+   i++;
+
+   } while (len < size);
+
+   ret = down_write_killable(>reset_sem);
+   if (ret)
+   goto error_free;
+
+   swap(adev->reset_dump_reg_list, tmp);
+   adev->num_regs = i;
+   up_write(>reset_sem);
+   ret = size;
+
+error_free:
+   kfree(tmp);
+   return ret;
+}
+
+
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1762,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



Re: [PATCH v9 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-21 Thread Somalapuram, Amaranath
[AMD Official Use Only]



On 2/21/2022 7:58 PM, Christian König wrote:
Am 21.02.22 um 15:19 schrieb Somalapuram, Amaranath:

[AMD Official Use Only]


On 2/21/2022 7:09 PM, Christian König wrote:


Am 21.02.22 um 14:34 schrieb Somalapuram Amaranath:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
<mailto:amaranath.somalapu...@amd.com>
---
   drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
   drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 96 +
   2 files changed, 100 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 struct amdgpu_reset_control *reset_cntl;
   uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+/* reset dump register */
+uint32_t*reset_dump_reg_list;
+int num_regs;
   };
 static inline struct amdgpu_device *drm_to_adev(struct drm_device
*ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..69c0a28deeac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,100 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
   DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
   amdgpu_debugfs_sclk_set, "%llu\n");
   +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+char __user *buf, size_t size, loff_t *pos)
+{
+struct amdgpu_device *adev = (struct amdgpu_device
*)file_inode(f)->i_private;
+char reg_offset[11];
+int i, ret, len = 0;
+
+if (*pos)
+return 0;
+
+ret = down_read_killable(>reset_sem);
+
+if (ret)
+return ret;
We usually don't have an empty line between function call and checking
the return code.


+
+for (i = 0; i < adev->num_regs; i++) {
+down_read(>reset_sem);
That here will just crash because we have already locked the semaphore
before the loop.
unfortunately it did not crash. Sorry I misunderstood your earlier comments.

+sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+up_read(>reset_sem);
+ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+
+if (ret)
+goto error;
+
+len += strlen(reg_offset);
And here the down_read_killable() is missing.


+}
+
+up_read(>reset_sem);
+ret = copy_to_user(buf + len, "\n", 1);
+
+if (ret)
+return -EFAULT;
+
+len++;
+*pos += len;
+
+return len;
+error:
+up_read(>reset_sem);
+return -EFAULT;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+const char __user *buf, size_t size, loff_t *pos)
+{
+struct amdgpu_device *adev = (struct amdgpu_device
*)file_inode(f)->i_private;
+char *reg_offset, *reg, reg_temp[11];
+uint32_t *tmp;
+int ret, i = 0, len = 0;
+
+do {
+reg_offset = reg_temp;
I think you can just drop the reg_offset variable;
strsep takes only pointer as input, this is workaround.

Ah, now I see what you are doing here.

Please don't do it like that. Better use memchr() instead.

memchr will not work. I couldn't find any other string API I can use.

other references similar to code:
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;

if (amdgpu_virtual_display) {
const char *pci_address_name = pci_name(adev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;

pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
pciaddstr_tmp = pciaddstr;
while ((pciaddname_tmp = strsep(_tmp, ";"))) {
pciaddname = strsep(_tmp, ",");


+memset(reg_offset, 0, 11);
+ret = copy_from_user(reg_offset, buf + len, min(11,
((int)size-len)));
+
+if (ret)
+goto error_free;
+
+reg = strsep(_offset, " ");
+tmp = krealloc_array(tmp, 1, sizeof(uint32_t), GFP_KERNEL);
That must be krealloc_array(tmp, i, ... not krealloc_array(tmp, 1, ... !
I thought it will append (if not it should have crashed or some kernel dump)

No, krealloc_array works similar to realloc() in userspace.

You need to give it the full size of the necessary space.

Regards,
Christian.


Regards,
Christian.


+ret = kstrtouint(reg, 16, [i]);
+
+if (ret)
+goto error_free;
+
+len += strlen(reg) + 1;
+i++;
+
+} while (len < size);
+
+ret = down_write_killable(>reset_sem);
+
+if (ret)
+goto error_free;
+
+swap(adev->reset_dump_reg_list, tmp);
+

Re: [PATCH v9 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-21 Thread Somalapuram, Amaranath
[AMD Official Use Only]


On 2/21/2022 7:09 PM, Christian König wrote:
>
>
> Am 21.02.22 um 14:34 schrieb Somalapuram Amaranath:
>> List of register populated for dump collection during the GPU reset.
>>
>> Signed-off-by: Somalapuram Amaranath 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 96 +
>>   2 files changed, 100 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index b85b67a88a3d..6e35f2c4c869 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -1097,6 +1097,10 @@ struct amdgpu_device {
>>     struct amdgpu_reset_control *reset_cntl;
>>   uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
>> +
>> +    /* reset dump register */
>> +    uint32_t    *reset_dump_reg_list;
>> +    int num_regs;
>>   };
>>     static inline struct amdgpu_device *drm_to_adev(struct drm_device 
>> *ddev)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
>> index 164d6a9e9fbb..69c0a28deeac 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
>> @@ -1609,6 +1609,100 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
>>   DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
>>   amdgpu_debugfs_sclk_set, "%llu\n");
>>   +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
>> +    char __user *buf, size_t size, loff_t *pos)
>> +{
>> +    struct amdgpu_device *adev = (struct amdgpu_device 
>> *)file_inode(f)->i_private;
>> +    char reg_offset[11];
>> +    int i, ret, len = 0;
>> +
>> +    if (*pos)
>> +    return 0;
>> +
>> +    ret = down_read_killable(>reset_sem);
>> +
>> +    if (ret)
>> +    return ret;
>
> We usually don't have an empty line between function call and checking 
> the return code.
>
>> +
>> +    for (i = 0; i < adev->num_regs; i++) {
>> +    down_read(>reset_sem);
>
> That here will just crash because we have already locked the semaphore 
> before the loop.
>
unfortunately it did not crash. Sorry I misunderstood your earlier comments.
>> +    sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
>> +    up_read(>reset_sem);
>> +    ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
>> +
>> +    if (ret)
>> +    goto error;
>> +
>> +    len += strlen(reg_offset);
>
> And here the down_read_killable() is missing.
>
>> +    }
>> +
>> +    up_read(>reset_sem);
>> +    ret = copy_to_user(buf + len, "\n", 1);
>> +
>> +    if (ret)
>> +    return -EFAULT;
>> +
>> +    len++;
>> +    *pos += len;
>> +
>> +    return len;
>> +error:
>> +    up_read(>reset_sem);
>> +    return -EFAULT;
>> +}
>> +
>> +static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
>> +    const char __user *buf, size_t size, loff_t *pos)
>> +{
>> +    struct amdgpu_device *adev = (struct amdgpu_device 
>> *)file_inode(f)->i_private;
>> +    char *reg_offset, *reg, reg_temp[11];
>> +    uint32_t *tmp;
>> +    int ret, i = 0, len = 0;
>> +
>> +    do {
>> +    reg_offset = reg_temp;
>
> I think you can just drop the reg_offset variable;
>
strsep takes only pointer as input, this is workaround.
>> +    memset(reg_offset, 0, 11);
>> +    ret = copy_from_user(reg_offset, buf + len, min(11, 
>> ((int)size-len)));
>> +
>> +    if (ret)
>> +    goto error_free;
>> +
>> +    reg = strsep(_offset, " ");
>> +    tmp = krealloc_array(tmp, 1, sizeof(uint32_t), GFP_KERNEL);
>
> That must be krealloc_array(tmp, i, ... not krealloc_array(tmp, 1, ... !
I thought it will append (if not it should have crashed or some kernel dump)
> Regards,
> Christian.
>
>> +    ret = kstrtouint(reg, 16, [i]);
>> +
>> +    if (ret)
>> +    goto error_free;
>> +
>> +    len += strlen(reg) + 1;
>> +    i++;
>> +
>> +    } while (len < size);
>> +
>> +    ret = down_write_killable(>reset_sem);
>> +
>> +    if (ret)
>> +    goto error_free;
>>

[PATCH v9 2/2] drm/amdgpu: add reset register dump trace on GPU reset

2022-02-21 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 16 
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 32 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..0eedcd4e2227 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,21 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   uint32_t reg_value;
+   int i;
+
+   lockdep_assert_held(>reset_sem);
+
+   for (i = 0; i < adev->num_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4544,6 +4559,7 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



[PATCH v9 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-21 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 96 +
 2 files changed, 100 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int num_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..69c0a28deeac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,100 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   int i, ret, len = 0;
+
+   if (*pos)
+   return 0;
+
+   ret = down_read_killable(>reset_sem);
+
+   if (ret)
+   return ret;
+
+   for (i = 0; i < adev->num_regs; i++) {
+   down_read(>reset_sem);
+   sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+   up_read(>reset_sem);
+   ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+
+   if (ret)
+   goto error;
+
+   len += strlen(reg_offset);
+   }
+
+   up_read(>reset_sem);
+   ret = copy_to_user(buf + len, "\n", 1);
+
+   if (ret)
+   return -EFAULT;
+
+   len++;
+   *pos += len;
+
+   return len;
+error:
+   up_read(>reset_sem);
+   return -EFAULT;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char *reg_offset, *reg, reg_temp[11];
+   uint32_t *tmp;
+   int ret, i = 0, len = 0;
+
+   do {
+   reg_offset = reg_temp;
+   memset(reg_offset, 0, 11);
+   ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));
+
+   if (ret)
+   goto error_free;
+
+   reg = strsep(_offset, " ");
+   tmp = krealloc_array(tmp, 1, sizeof(uint32_t), GFP_KERNEL);
+   ret = kstrtouint(reg, 16, [i]);
+
+   if (ret)
+   goto error_free;
+
+   len += strlen(reg) + 1;
+   i++;
+
+   } while (len < size);
+
+   ret = down_write_killable(>reset_sem);
+
+   if (ret)
+   goto error_free;
+
+   swap(adev->reset_dump_reg_list, tmp);
+   adev->num_regs = i;
+   up_write(>reset_sem);
+   ret = size;
+
+error_free:
+   kfree(tmp);
+   return ret;
+}
+
+
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1766,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



Re: [PATCH v8 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-21 Thread Somalapuram, Amaranath
[AMD Official Use Only]


On 2/21/2022 2:45 PM, Christian König wrote:
>
>
> Am 21.02.22 um 08:15 schrieb Somalapuram Amaranath:
>> List of register populated for dump collection during the GPU reset.
>>
>> Signed-off-by: Somalapuram Amaranath 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h |   4 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 114 
>>   2 files changed, 118 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index b85b67a88a3d..6e35f2c4c869 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -1097,6 +1097,10 @@ struct amdgpu_device {
>>     struct amdgpu_reset_control *reset_cntl;
>>   uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
>> +
>> +    /* reset dump register */
>> +    uint32_t    *reset_dump_reg_list;
>> +    int num_regs;
>>   };
>>     static inline struct amdgpu_device *drm_to_adev(struct drm_device 
>> *ddev)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
>> index 164d6a9e9fbb..14ad9610f805 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
>> @@ -1609,6 +1609,118 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
>>   DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
>>   amdgpu_debugfs_sclk_set, "%llu\n");
>>   +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
>> +    char __user *buf, size_t size, loff_t *pos)
>> +{
>> +    struct amdgpu_device *adev = (struct amdgpu_device 
>> *)file_inode(f)->i_private;
>> +    char reg_offset[11];
>> +    uint32_t num_regs;
>> +    int i, ret, len = 0;
>> +
>> +    if (*pos)
>> +    return 0;
>> +
>> +    ret = down_read_killable(>reset_sem);
>> +
>> +    if (ret)
>> +    return ret;
>> +
>> +    num_regs = adev->num_regs;
>> +
>> +    up_read(>reset_sem);
>> +
>> +    if (num_regs == 0)
>> +    return 0;
>
> I think we should drop that cause it just avoids the final \n.
>
ok.
>> +
>> +    for (i = 0; i < num_regs; i++) {
>
> That's pretty close, but one problem is still that it is possible that 
> the number of register becomes much smaller while this loop runs.
>
> Try it like this instead:
>
> down_read_killable(...)
> for (i = 0; i < adev->num_regs; ++i) {
>     sprintf(...)
>     up_read(...);
>
>     copy_to_user(
>
>     down_read_killable(...)
> }
> up_read().
>
I created local num_regs to avoid lock ousted the loop. I guess you me 
to remove ?

so we can hold up_read inside the loop ?

>> +
>> +    ret = down_read_killable(>reset_sem);
>> +
>> +    if (ret)
>> +    return ret;
>> +
>> +    sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
>> +
>> +    up_read(>reset_sem);
>> +
>> +    ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
>> +
>> +    if (ret)
>> +    return -EFAULT;
>> +
>> +    len += strlen(reg_offset);
>> +    }
>> +
>> +    ret = copy_to_user(buf + len, "\n", 1);
>> +
>> +    if (ret)
>> +    return -EFAULT;
>> +
>> +    len++;
>> +    *pos += len;
>> +
>> +    return len;
>> +}
>> +
>> +static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
>> +    const char __user *buf, size_t size, loff_t *pos)
>> +{
>> +    struct amdgpu_device *adev = (struct amdgpu_device 
>> *)file_inode(f)->i_private;
>> +    char *reg_offset, *reg, reg_temp[11];
>> +    uint32_t *tmp_list;
>> +    int ret, i = 0, len = 0;
>> +
>> +    do {
>> +    reg_offset = reg_temp;
>> +    memset(reg_offset, 0, 11);
>> +    ret = copy_from_user(reg_offset, buf + len, min(11, 
>> ((int)size-len)));
>> +
>> +    if (ret) {
>> +    kfree(tmp_list);
>> +    return -EFAULT;
>> +    }
>> +
>> +    reg = strsep(_offset, " ");
>> +    tmp_list = krealloc_array(tmp_list,
>> +    1, sizeof(uint32_t), GFP_KERNEL);
>> +    ret = kstrtouint(reg, 16, _list[i]);
>> +
>> +    if (ret) {
>> +    kfree(tmp_list);
>>

[PATCH v8 2/2] drm/amdgpu: add reset register dump trace on GPU reset

2022-02-20 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 30 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..25c38a4e13ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,19 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   uint32_t reg_value;
+   int i;
+
+   for (i = 0; i < adev->num_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4544,6 +4557,7 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



[PATCH v8 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-20 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |   4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 114 
 2 files changed, 118 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int num_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..14ad9610f805 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,118 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   uint32_t num_regs;
+   int i, ret, len = 0;
+
+   if (*pos)
+   return 0;
+
+   ret = down_read_killable(>reset_sem);
+
+   if (ret)
+   return ret;
+
+   num_regs = adev->num_regs;
+
+   up_read(>reset_sem);
+
+   if (num_regs == 0)
+   return 0;
+
+   for (i = 0; i < num_regs; i++) {
+
+   ret = down_read_killable(>reset_sem);
+
+   if (ret)
+   return ret;
+
+   sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+
+   up_read(>reset_sem);
+
+   ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+
+   if (ret)
+   return -EFAULT;
+
+   len += strlen(reg_offset);
+   }
+
+   ret = copy_to_user(buf + len, "\n", 1);
+
+   if (ret)
+   return -EFAULT;
+
+   len++;
+   *pos += len;
+
+   return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char *reg_offset, *reg, reg_temp[11];
+   uint32_t *tmp_list;
+   int ret, i = 0, len = 0;
+
+   do {
+   reg_offset = reg_temp;
+   memset(reg_offset, 0, 11);
+   ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));
+
+   if (ret) {
+   kfree(tmp_list);
+   return -EFAULT;
+   }
+
+   reg = strsep(_offset, " ");
+   tmp_list = krealloc_array(tmp_list,
+   1, sizeof(uint32_t), GFP_KERNEL);
+   ret = kstrtouint(reg, 16, _list[i]);
+
+   if (ret) {
+   kfree(tmp_list);
+   return -EFAULT;
+   }
+
+   len += strlen(reg) + 1;
+   i++;
+
+   } while (len < size);
+
+   ret = down_read_killable(>reset_sem);
+
+   if (ret) {
+   kfree(tmp_list);
+   return ret;
+   }
+
+   kfree(adev->reset_dump_reg_list);
+
+   swap(adev->reset_dump_reg_list, tmp_list);
+   adev->num_regs = i;
+
+   up_read(>reset_sem);
+
+   return size;
+}
+
+
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1784,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



Re: [PATCH v7 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-17 Thread Somalapuram, Amaranath



On 2/17/2022 8:36 PM, Christian König wrote:

Am 17.02.22 um 15:29 schrieb Somalapuram Amaranath:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h |   4 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 112 
  2 files changed, 116 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
    struct amdgpu_reset_control *reset_cntl;
  uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+    /* reset dump register */
+    uint32_t    *reset_dump_reg_list;
+    int num_regs;
  };
    static inline struct amdgpu_device *drm_to_adev(struct drm_device 
*ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index 164d6a9e9fbb..ad807350d13e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,116 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
  DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
  amdgpu_debugfs_sclk_set, "%llu\n");
  +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+    char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char reg_offset[11];
+    int i, ret, len = 0;
+
+    if (*pos)
+    return 0;
+
+    ret = down_read_killable(>reset_sem);


Using the _killable() variant is a really good idea here.


+
+    if (ret)
+    return ret;
+
+    if (adev->num_regs == 0)
+    return 0;
+
+    for (i = 0; i < adev->num_regs; i++) {
+    sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+    ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));


Uff, I'm not 100% sure if we can do copy_to_user without dropping the 
lock.



then I need to use kmalloc or krealloc_array.

Regards,

S.Amarnath


+
+    if (ret)
+    return -EFAULT;


But returning here without dropping the lock is certainly incorrect.


+
+    len += strlen(reg_offset);
+    }
+
+    up_read(>reset_sem);
+
+    ret = copy_to_user(buf + len, "\n", 1);
+
+    if (ret)
+    return -EFAULT;
+
+    len++;
+    *pos += len;
+
+    return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+    const char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char *reg_offset, *reg, reg_temp[11];
+    uint32_t reg_list[128];
+    int ret, i = 0, len = 0;
+
+    do {
+    reg_offset = reg_temp;
+    memset(reg_offset,  0, 11);
+    ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));

+
+    if (ret)
+    return -EFAULT;
+
+    if (strncmp(reg_offset, "clear", 5) == 0) {


Please completely drop the idea with the clear here, that should be 
unnecessary.



+
+    ret = down_read_killable(>reset_sem);
+
+    if (ret)
+    return ret;
+
+    kfree(adev->reset_dump_reg_list);
+    adev->reset_dump_reg_list = NULL;
+    adev->num_regs = 0;
+
+    up_read(>reset_sem);
+
+    return size;
+    }
+
+    reg = strsep(_offset, " ");
+    ret = kstrtouint(reg, 16, _list[i]);
+
+    if (ret)
+    return -EFAULT;
+
+    len += strlen(reg) + 1;
+    i++;
+
+    } while (len < size);
+
+    adev->reset_dump_reg_list = 
krealloc_array(adev->reset_dump_reg_list,

+    i, sizeof(uint32_t), GFP_KERNEL);


Well that still doesn't looks like what we need.

Here is once more the roughly what the code should do:

unsigned int i;
uint32_t *tmp;

i = 0;
do {
    tmp = krealloc_array(tmp, i, sizeof(uint32_t), GFP_KERNEL);
    copy_from_user().
    i++;
} while (len < size);

down_write_killable();
...

swap(adev->reset_dump_reg_list, tmp);
adev->num_regs = i;
up_write();

Regards,
Christian.


+
+    ret = down_read_killable(>reset_sem);
+
+    if (ret)
+    return ret;
+
+    adev->num_regs = i;
+    memcpy(adev->reset_dump_reg_list, reg_list,
+    sizeof(uint32_t) * adev->num_regs);
+
+    up_read(>reset_sem);
+
+    return size;
+}
+
+
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+    .owner = THIS_MODULE,
+    .read = amdgpu_reset_dump_register_list_read,
+    .write = amdgpu_reset_dump_register_list_write,
+    .llseek = default_llseek
+};
+
  int amdgpu_debugfs_init(struct amdgpu_device *adev)
  {
  struct dentry *

[PATCH v7 2/2] drm/amdgpu: add reset register dump trace on GPU reset

2022-02-17 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 30 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..25c38a4e13ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,19 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   uint32_t reg_value;
+   int i;
+
+   for (i = 0; i < adev->num_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4544,6 +4557,7 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



[PATCH v7 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-17 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |   4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 112 
 2 files changed, 116 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..6e35f2c4c869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int num_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..ad807350d13e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,116 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   int i, ret, len = 0;
+
+   if (*pos)
+   return 0;
+
+   ret = down_read_killable(>reset_sem);
+
+   if (ret)
+   return ret;
+
+   if (adev->num_regs == 0)
+   return 0;
+
+   for (i = 0; i < adev->num_regs; i++) {
+   sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+   ret = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+
+   if (ret)
+   return -EFAULT;
+
+   len += strlen(reg_offset);
+   }
+
+   up_read(>reset_sem);
+
+   ret = copy_to_user(buf + len, "\n", 1);
+
+   if (ret)
+   return -EFAULT;
+
+   len++;
+   *pos += len;
+
+   return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char *reg_offset, *reg, reg_temp[11];
+   uint32_t reg_list[128];
+   int ret, i = 0, len = 0;
+
+   do {
+   reg_offset = reg_temp;
+   memset(reg_offset,  0, 11);
+   ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));
+
+   if (ret)
+   return -EFAULT;
+
+   if (strncmp(reg_offset, "clear", 5) == 0) {
+
+   ret = down_read_killable(>reset_sem);
+
+   if (ret)
+   return ret;
+
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   adev->num_regs = 0;
+
+   up_read(>reset_sem);
+
+   return size;
+   }
+
+   reg = strsep(_offset, " ");
+   ret = kstrtouint(reg, 16, _list[i]);
+
+   if (ret)
+   return -EFAULT;
+
+   len += strlen(reg) + 1;
+   i++;
+
+   } while (len < size);
+
+   adev->reset_dump_reg_list =  krealloc_array(adev->reset_dump_reg_list,
+   i, sizeof(uint32_t), 
GFP_KERNEL);
+
+   ret = down_read_killable(>reset_sem);
+
+   if (ret)
+   return ret;
+
+   adev->num_regs = i;
+   memcpy(adev->reset_dump_reg_list, reg_list,
+   sizeof(uint32_t) * adev->num_regs);
+
+   up_read(>reset_sem);
+
+   return size;
+}
+
+
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1782,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+  

Re: [PATCH v6 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-16 Thread Somalapuram, Amaranath


On 2/16/2022 8:26 PM, Christian König wrote:

Am 16.02.22 um 14:11 schrieb Somalapuram, Amaranath:


On 2/16/2022 3:41 PM, Christian König wrote:


Am 16.02.22 um 10:49 schrieb Somalapuram Amaranath:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  5 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 95 
+

  2 files changed, 100 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index b85b67a88a3d..57965316873b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,11 @@ struct amdgpu_device {
    struct amdgpu_reset_control *reset_cntl;
  uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+    /* reset dump register */
+    uint32_t    *reset_dump_reg_list;
+    int n_regs;
+    struct mutex    reset_dump_mutex;


I think we should rather use the reset lock for this instead of 
introducing just another mutex.



  };
    static inline struct amdgpu_device *drm_to_adev(struct 
drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index 164d6a9e9fbb..faf985c7cb93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,98 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
  DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
  amdgpu_debugfs_sclk_set, "%llu\n");
  +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+    char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char reg_offset[11];
+    int i, r, len = 0;
+
+    if (*pos)
+    return 0;
+
+    if (adev->n_regs == 0)
+    return 0;
+
+    for (i = 0; i < adev->n_regs; i++) {
+    sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+    r = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+
+    if (r)
+    return -EFAULT;
+
+    len += strlen(reg_offset);
+    }


You need to hold the lock protecting adev->reset_dump_reg_list and 
adev->n_regs while accessing those.


(BTW: num_regs instead of n_regs would match more what we use 
elsewhere, but is not a must have).


This is read function for user and returns only list of reg offsets, 
I did not understand correctly !

+
+    r = copy_to_user(buf + len, "\n", 1);
+
+    if (r)
+    return -EFAULT;
+
+    len++;
+    *pos += len;
+
+    return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+    const char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char *reg_offset, *reg, reg_temp[11];
+    static int alloc_count;
+    int ret, i = 0, len = 0;
+
+    do {
+    reg_offset = reg_temp;
+    memset(reg_offset,  0, 11);
+    ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));

+
+    if (ret)
+    goto failed;
+
+    reg = strsep(_offset, " ");
+
+    if (alloc_count <= i) {



+ adev->reset_dump_reg_list =  krealloc_array(
+    adev->reset_dump_reg_list, 1,
+    sizeof(uint32_t), GFP_KERNEL);
+    alloc_count++;
+    }
+
+    ret = kstrtouint(reg, 16, >reset_dump_reg_list[i]);


This here is modifying adev->reset_dump_reg_list as well and so must 
be protected by a lock as well.


The tricky part is that we can't allocate memory while holding this 
lock (because we need it during reset as well).


One solution for this is to read the register list into a local 
array first and when that's done swap the local array with the one 
in adev->reset_dump_reg_list while holding the lock.



krealloc_array should be inside lock or outside lock? this may be problem.

Regards,

S.Amarnath


Regards,
Christian.


There are 2 situations:
1st time creating list n_regs will be 0 and trace event will not be 
triggered
2nd time while updating list n_regs is already set and 
adev->reset_dump_reg_list will have some offsets address 
(hypothetically speaking *during reset + update* read values from 
RREG32 will mix up of old list and new list)

its only critical when its freed and n_regs is not 0


No, that won't work like this. See you *must* always hold a lock when 
reading or writing the array.


Otherwise it is perfectly possible that one thread sees only halve of 
the updates of another thread.


The only alternative would be RCU, atomic replace and manual barrier 
handling, but that would be complete overkill for that feature.


Regards,
Christian.



Regards,
S.Amarnath

+
+   

Re: [PATCH v6 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-16 Thread Somalapuram, Amaranath



On 2/16/2022 6:47 PM, Lazar, Lijo wrote:



On 2/16/2022 4:39 PM, Somalapuram, Amaranath wrote:


On 2/16/2022 4:13 PM, Lazar, Lijo wrote:



On 2/16/2022 4:04 PM, Somalapuram, Amaranath wrote:


On 2/16/2022 3:45 PM, Lazar, Lijo wrote:



On 2/16/2022 3:19 PM, Somalapuram Amaranath wrote:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  5 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 95 
+

  2 files changed, 100 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index b85b67a88a3d..57965316873b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,11 @@ struct amdgpu_device {
    struct amdgpu_reset_control *reset_cntl;
  uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+    /* reset dump register */
+    uint32_t    *reset_dump_reg_list;
+    int n_regs;
+    struct mutex    reset_dump_mutex;
  };
    static inline struct amdgpu_device *drm_to_adev(struct 
drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index 164d6a9e9fbb..faf985c7cb93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,98 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, 
NULL,

  DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
  amdgpu_debugfs_sclk_set, "%llu\n");
  +static ssize_t amdgpu_reset_dump_register_list_read(struct 
file *f,

+    char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char reg_offset[11];
+    int i, r, len = 0;
+
+    if (*pos)
+    return 0;
+
+    if (adev->n_regs == 0)
+    return 0;
+
+    for (i = 0; i < adev->n_regs; i++) {
+    sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+    r = copy_to_user(buf + len, reg_offset, 
strlen(reg_offset));

+
+    if (r)
+    return -EFAULT;
+
+    len += strlen(reg_offset);
+    }
+
+    r = copy_to_user(buf + len, "\n", 1);
+
+    if (r)
+    return -EFAULT;
+
+    len++;
+    *pos += len;
+
+    return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file 
*f,

+    const char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char *reg_offset, *reg, reg_temp[11];
+    static int alloc_count;


This being static what happens when it is called on a second device?

Thanks,
Lijo


I tried to avoid adding to adev. It wont work for multiple devices.


Hmm.This is not friendly for single device also. Some one could just 
parse a text file of reg offsets and do

sudo echo offset > file

This will overwrite whatever is there. Instead you may define a 
syntax like

sudo echo 0x000 > file =>  Clears all
sudo echo offset > file => Append to the existing set.

Taking all offsets in one go may not be needed.

Thanks,
Lijo



0x000 can be offset for some registers !


It's indeed a valid register called MM_INDEX register. The register 
doesn't have any meaning in standalone.



This is application responsibly; any wrong data should clear the list.
Application can read back the list for confomation.



It needs to be done by user app anyway. This is more about how 
convenient the interface is. Probably you could switch to a user 
standpoint and try to add some 20-30 registers to the list. Then steps 
needed to add a revised list.




For clear we can send text “clear”
On next write should we replace or append ? (I think with "clear" append 
is better option)


Christian which is better ?

Regards,
S.Amarnath

Thanks,
Lijo


Regards,
S.Amarnath

+    int ret, i = 0, len = 0;
+
+    do {
+    reg_offset = reg_temp;
+    memset(reg_offset,  0, 11);
+    ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));

+
+    if (ret)
+    goto failed;
+
+    reg = strsep(_offset, " ");
+
+    if (alloc_count <= i) {
+    adev->reset_dump_reg_list = krealloc_array(
+ adev->reset_dump_reg_list, 1,
+    sizeof(uint32_t), GFP_KERNEL);
+    alloc_count++;
+    }
+
+    ret = kstrtouint(reg, 16, >reset_dump_reg_list[i]);
+
+    if (ret)
+    goto failed;
+
+    len += strlen(reg) + 1;
+    i++;
+
+    } while (len < size);
+
+    adev->n_regs = i;
+
+    return size;
+
+failed:
+    mutex_lock(>reset_dump_mutex);
+    kfree(adev->reset_dump_reg_list);
+    adev->reset_dump_reg_list = NULL;
+    alloc_count = 0;
+    adev->n_r

Re: [PATCH v6 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-16 Thread Somalapuram, Amaranath


On 2/16/2022 3:41 PM, Christian König wrote:

Am 16.02.22 um 10:49 schrieb Somalapuram Amaranath:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  5 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 95 +
  2 files changed, 100 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index b85b67a88a3d..57965316873b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,11 @@ struct amdgpu_device {
    struct amdgpu_reset_control *reset_cntl;
  uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+    /* reset dump register */
+    uint32_t    *reset_dump_reg_list;
+    int n_regs;
+    struct mutex    reset_dump_mutex;


I think we should rather use the reset lock for this instead of 
introducing just another mutex.



  };
    static inline struct amdgpu_device *drm_to_adev(struct drm_device 
*ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index 164d6a9e9fbb..faf985c7cb93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,98 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
  DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
  amdgpu_debugfs_sclk_set, "%llu\n");
  +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+    char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char reg_offset[11];
+    int i, r, len = 0;
+
+    if (*pos)
+    return 0;
+
+    if (adev->n_regs == 0)
+    return 0;
+
+    for (i = 0; i < adev->n_regs; i++) {
+    sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+    r = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+
+    if (r)
+    return -EFAULT;
+
+    len += strlen(reg_offset);
+    }


You need to hold the lock protecting adev->reset_dump_reg_list and 
adev->n_regs while accessing those.


(BTW: num_regs instead of n_regs would match more what we use 
elsewhere, but is not a must have).


This is read function for user and returns only list of reg offsets, I 
did not understand correctly !

+
+    r = copy_to_user(buf + len, "\n", 1);
+
+    if (r)
+    return -EFAULT;
+
+    len++;
+    *pos += len;
+
+    return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+    const char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char *reg_offset, *reg, reg_temp[11];
+    static int alloc_count;
+    int ret, i = 0, len = 0;
+
+    do {
+    reg_offset = reg_temp;
+    memset(reg_offset,  0, 11);
+    ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));

+
+    if (ret)
+    goto failed;
+
+    reg = strsep(_offset, " ");
+
+    if (alloc_count <= i) {



+    adev->reset_dump_reg_list =  krealloc_array(
+    adev->reset_dump_reg_list, 1,
+    sizeof(uint32_t), GFP_KERNEL);
+    alloc_count++;
+    }
+
+    ret = kstrtouint(reg, 16, >reset_dump_reg_list[i]);


This here is modifying adev->reset_dump_reg_list as well and so must 
be protected by a lock as well.


The tricky part is that we can't allocate memory while holding this 
lock (because we need it during reset as well).


One solution for this is to read the register list into a local array 
first and when that's done swap the local array with the one in 
adev->reset_dump_reg_list while holding the lock.


Regards,
Christian.


There are 2 situations:
1st time creating list n_regs will be 0 and trace event will not be 
triggered
2nd time while updating list n_regs is already set and 
adev->reset_dump_reg_list will have some offsets address (hypothetically 
speaking *during reset + update* read values from RREG32 will mix up of 
old list and new list)

its only critical when its freed and n_regs is not 0

Regards,
S.Amarnath

+
+    if (ret)
+    goto failed;
+
+    len += strlen(reg) + 1;
+    i++;
+
+    } while (len < size);
+
+    adev->n_regs = i;
+
+    return size;
+
+failed:
+    mutex_lock(>reset_dump_mutex);
+    kfree(adev->reset_dump_reg_list);
+    adev->reset_dump_reg_list = NULL;
+    alloc_count = 0;
+    adev->n_regs = 0;
+    mutex_unlock(>reset_dump_mutex);
+    return -EFAULT;
+}
+
+
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+    .owner = THIS_MODULE,
+    .read = amdgpu_reset_dump_register

Re: [PATCH v6 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-16 Thread Somalapuram, Amaranath



On 2/16/2022 4:13 PM, Lazar, Lijo wrote:



On 2/16/2022 4:04 PM, Somalapuram, Amaranath wrote:


On 2/16/2022 3:45 PM, Lazar, Lijo wrote:



On 2/16/2022 3:19 PM, Somalapuram Amaranath wrote:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  5 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 95 
+

  2 files changed, 100 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index b85b67a88a3d..57965316873b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,11 @@ struct amdgpu_device {
    struct amdgpu_reset_control *reset_cntl;
  uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+    /* reset dump register */
+    uint32_t    *reset_dump_reg_list;
+    int n_regs;
+    struct mutex    reset_dump_mutex;
  };
    static inline struct amdgpu_device *drm_to_adev(struct 
drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index 164d6a9e9fbb..faf985c7cb93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,98 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
  DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
  amdgpu_debugfs_sclk_set, "%llu\n");
  +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+    char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char reg_offset[11];
+    int i, r, len = 0;
+
+    if (*pos)
+    return 0;
+
+    if (adev->n_regs == 0)
+    return 0;
+
+    for (i = 0; i < adev->n_regs; i++) {
+    sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+    r = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+
+    if (r)
+    return -EFAULT;
+
+    len += strlen(reg_offset);
+    }
+
+    r = copy_to_user(buf + len, "\n", 1);
+
+    if (r)
+    return -EFAULT;
+
+    len++;
+    *pos += len;
+
+    return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+    const char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char *reg_offset, *reg, reg_temp[11];
+    static int alloc_count;


This being static what happens when it is called on a second device?

Thanks,
Lijo


I tried to avoid adding to adev. It wont work for multiple devices.


Hmm.This is not friendly for single device also. Some one could just 
parse a text file of reg offsets and do

sudo echo offset > file

This will overwrite whatever is there. Instead you may define a syntax 
like

sudo echo 0x000 > file =>  Clears all
sudo echo offset > file => Append to the existing set.

Taking all offsets in one go may not be needed.

Thanks,
Lijo



0x000 can be offset for some registers !
This is application responsibly; any wrong data should clear the list.
Application can read back the list for confomation.

Regards,
S.Amarnath

+    int ret, i = 0, len = 0;
+
+    do {
+    reg_offset = reg_temp;
+    memset(reg_offset,  0, 11);
+    ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));

+
+    if (ret)
+    goto failed;
+
+    reg = strsep(_offset, " ");
+
+    if (alloc_count <= i) {
+    adev->reset_dump_reg_list =  krealloc_array(
+    adev->reset_dump_reg_list, 1,
+    sizeof(uint32_t), GFP_KERNEL);
+    alloc_count++;
+    }
+
+    ret = kstrtouint(reg, 16, >reset_dump_reg_list[i]);
+
+    if (ret)
+    goto failed;
+
+    len += strlen(reg) + 1;
+    i++;
+
+    } while (len < size);
+
+    adev->n_regs = i;
+
+    return size;
+
+failed:
+    mutex_lock(>reset_dump_mutex);
+    kfree(adev->reset_dump_reg_list);
+    adev->reset_dump_reg_list = NULL;
+    alloc_count = 0;
+    adev->n_regs = 0;
+    mutex_unlock(>reset_dump_mutex);
+    return -EFAULT;
+}
+
+
+
+static const struct file_operations 
amdgpu_reset_dump_register_list = {

+    .owner = THIS_MODULE,
+    .read = amdgpu_reset_dump_register_list_read,
+    .write = amdgpu_reset_dump_register_list_write,
+    .llseek = default_llseek
+};
+
  int amdgpu_debugfs_init(struct amdgpu_device *adev)
  {
  struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1618,6 +1710,7 @@ int amdgpu_debugfs_init(struct amdgpu_device 
*adev)

  if (!debugfs_initialized())
  return 0;
  +    mutex_init(>reset_dump_mutex);
  ent = debugfs_create_file(&

Re: [PATCH v4 2/2] drm/amdgpu: add reset register dump trace on GPU reset

2022-02-16 Thread Somalapuram, Amaranath



On 2/15/2022 10:09 PM, Andrey Grodzovsky wrote:


On 2022-02-15 05:12, Somalapuram Amaranath wrote:

Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 -
  drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
  2 files changed, 32 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 1e651b959141..ff21262c6fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,19 @@ int amdgpu_device_pre_asic_reset(struct 
amdgpu_device *adev,

  return r;
  }
  +static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+    uint32_t reg_value;
+    int i;
+
+    for (i = 0; i < adev->n_regs; i++) {
+    reg_value = RREG32(adev->reset_dump_reg_list[i]);
+ trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], reg_value);
+    }
+
+    return 0;
+}
+
  int amdgpu_do_asic_reset(struct list_head *device_list_handle,
   struct amdgpu_reset_context *reset_context)
  {
@@ -4567,8 +4580,10 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,

  tmp_adev->gmc.xgmi.pending_reset = false;
  if (!queue_work(system_unbound_wq, 
_adev->xgmi_reset_work))

  r = -EALREADY;
-    } else
+    } else {
+    amdgpu_reset_reg_dumps(tmp_adev);
  r = amdgpu_asic_reset(tmp_adev);
+    }



Is there any particular reason you only dump registers in single ASIC 
case and not for XGMI ?


Andrey


Not really, should I move it to the top of function?

Regards,

S.Amarnath




    if (r) {
  dev_err(tmp_adev->dev, "ASIC reset failed with 
error, %d for drm dev, %s",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h

index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
    __entry->seqno)
  );
  +TRACE_EVENT(amdgpu_reset_reg_dumps,
+    TP_PROTO(uint32_t address, uint32_t value),
+    TP_ARGS(address, value),
+    TP_STRUCT__entry(
+ __field(uint32_t, address)
+ __field(uint32_t, value)
+ ),
+    TP_fast_assign(
+   __entry->address = address;
+   __entry->value = value;
+   ),
+    TP_printk("amdgpu register dump 0x%x: 0x%x",
+  __entry->address,
+  __entry->value)
+);
+
  #undef AMDGPU_JOB_GET_TIMELINE_NAME
  #endif


Re: [PATCH v6 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-16 Thread Somalapuram, Amaranath



On 2/16/2022 3:45 PM, Lazar, Lijo wrote:



On 2/16/2022 3:19 PM, Somalapuram Amaranath wrote:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  5 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 95 +
  2 files changed, 100 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index b85b67a88a3d..57965316873b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,11 @@ struct amdgpu_device {
    struct amdgpu_reset_control *reset_cntl;
  uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+    /* reset dump register */
+    uint32_t    *reset_dump_reg_list;
+    int n_regs;
+    struct mutex    reset_dump_mutex;
  };
    static inline struct amdgpu_device *drm_to_adev(struct drm_device 
*ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index 164d6a9e9fbb..faf985c7cb93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,98 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
  DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
  amdgpu_debugfs_sclk_set, "%llu\n");
  +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+    char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char reg_offset[11];
+    int i, r, len = 0;
+
+    if (*pos)
+    return 0;
+
+    if (adev->n_regs == 0)
+    return 0;
+
+    for (i = 0; i < adev->n_regs; i++) {
+    sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+    r = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+
+    if (r)
+    return -EFAULT;
+
+    len += strlen(reg_offset);
+    }
+
+    r = copy_to_user(buf + len, "\n", 1);
+
+    if (r)
+    return -EFAULT;
+
+    len++;
+    *pos += len;
+
+    return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+    const char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char *reg_offset, *reg, reg_temp[11];
+    static int alloc_count;


This being static what happens when it is called on a second device?

Thanks,
Lijo


I tried to avoid adding to adev. It wont work for multiple devices.

+    int ret, i = 0, len = 0;
+
+    do {
+    reg_offset = reg_temp;
+    memset(reg_offset,  0, 11);
+    ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));

+
+    if (ret)
+    goto failed;
+
+    reg = strsep(_offset, " ");
+
+    if (alloc_count <= i) {
+    adev->reset_dump_reg_list =  krealloc_array(
+    adev->reset_dump_reg_list, 1,
+    sizeof(uint32_t), GFP_KERNEL);
+    alloc_count++;
+    }
+
+    ret = kstrtouint(reg, 16, >reset_dump_reg_list[i]);
+
+    if (ret)
+    goto failed;
+
+    len += strlen(reg) + 1;
+    i++;
+
+    } while (len < size);
+
+    adev->n_regs = i;
+
+    return size;
+
+failed:
+    mutex_lock(>reset_dump_mutex);
+    kfree(adev->reset_dump_reg_list);
+    adev->reset_dump_reg_list = NULL;
+    alloc_count = 0;
+    adev->n_regs = 0;
+    mutex_unlock(>reset_dump_mutex);
+    return -EFAULT;
+}
+
+
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+    .owner = THIS_MODULE,
+    .read = amdgpu_reset_dump_register_list_read,
+    .write = amdgpu_reset_dump_register_list_write,
+    .llseek = default_llseek
+};
+
  int amdgpu_debugfs_init(struct amdgpu_device *adev)
  {
  struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1618,6 +1710,7 @@ int amdgpu_debugfs_init(struct amdgpu_device 
*adev)

  if (!debugfs_initialized())
  return 0;
  +    mutex_init(>reset_dump_mutex);
  ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
    _ib_preempt);
  if (IS_ERR(ent)) {
@@ -1672,6 +1765,8 @@ int amdgpu_debugfs_init(struct amdgpu_device 
*adev)

  _debugfs_test_ib_fops);
  debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
  _debugfs_vm_info_fops);
+    debugfs_create_file("amdgpu_reset_dump_register_list", 0644, 
root, adev,

+    _reset_dump_register_list);
    adev->debugfs_vbios_blob.data = adev->bios;
  adev->debugfs_vbios_blob.size = adev->bios_size;



[PATCH v6 2/2] drm/amdgpu: add reset register dump trace on GPU reset

2022-02-16 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 34 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..247e4f97de33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,21 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   uint32_t reg_value;
+   int i;
+
+   mutex_lock(>reset_dump_mutex);
+   for (i = 0; i < adev->n_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+   mutex_unlock(>reset_dump_mutex);
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4567,8 +4582,10 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
tmp_adev->gmc.xgmi.pending_reset = false;
if (!queue_work(system_unbound_wq, 
_adev->xgmi_reset_work))
r = -EALREADY;
-   } else
+   } else {
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_asic_reset(tmp_adev);
+   }
 
if (r) {
dev_err(tmp_adev->dev, "ASIC reset failed with 
error, %d for drm dev, %s",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



[PATCH v6 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-16 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  5 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 95 +
 2 files changed, 100 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..57965316873b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,11 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int n_regs;
+   struct mutexreset_dump_mutex;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..faf985c7cb93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,98 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   int i, r, len = 0;
+
+   if (*pos)
+   return 0;
+
+   if (adev->n_regs == 0)
+   return 0;
+
+   for (i = 0; i < adev->n_regs; i++) {
+   sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+   r = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+
+   if (r)
+   return -EFAULT;
+
+   len += strlen(reg_offset);
+   }
+
+   r = copy_to_user(buf + len, "\n", 1);
+
+   if (r)
+   return -EFAULT;
+
+   len++;
+   *pos += len;
+
+   return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char *reg_offset, *reg, reg_temp[11];
+   static int alloc_count;
+   int ret, i = 0, len = 0;
+
+   do {
+   reg_offset = reg_temp;
+   memset(reg_offset,  0, 11);
+   ret = copy_from_user(reg_offset, buf + len, min(11, 
((int)size-len)));
+
+   if (ret)
+   goto failed;
+
+   reg = strsep(_offset, " ");
+
+   if (alloc_count <= i) {
+   adev->reset_dump_reg_list =  krealloc_array(
+   
adev->reset_dump_reg_list, 1,
+   sizeof(uint32_t), 
GFP_KERNEL);
+   alloc_count++;
+   }
+
+   ret = kstrtouint(reg, 16, >reset_dump_reg_list[i]);
+
+   if (ret)
+   goto failed;
+
+   len += strlen(reg) + 1;
+   i++;
+
+   } while (len < size);
+
+   adev->n_regs = i;
+
+   return size;
+
+failed:
+   mutex_lock(>reset_dump_mutex);
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   alloc_count = 0;
+   adev->n_regs = 0;
+   mutex_unlock(>reset_dump_mutex);
+   return -EFAULT;
+}
+
+
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1618,6 +1710,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (!debugfs_initialized())
return 0;
 
+   mutex_init(>reset_dump_mutex);
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
  _ib_preempt);
if (IS_ERR(ent)) {
@@ -1672,6 +1765,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



[PATCH v5 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-15 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 95 +
 2 files changed, 99 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..2e8c2318276d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int n_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..be4336574fec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,99 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   int i, r, len = 0;
+
+   if (adev->n_regs == 0)
+   return 0;
+
+   for (i = 0; i < adev->n_regs; i++) {
+   sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+   r = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+   
+   if (r)
+   return -EFAULT;
+
+   len += strlen(reg_offset);
+   }
+
+   r = copy_to_user(buf + len, "\n", 1);
+
+   if (r)
+   return -EFAULT;
+
+   len++;
+
+   if (*pos >= len)
+   return 0;
+
+   *pos += len - r;
+
+   return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char *reg_offset, *reg, reg_temp[11];
+   int ret, i = 0, len = 0;
+
+   reg_offset = reg_temp;
+   memset(reg_offset,  0, 11);
+   ret = copy_from_user(reg_offset, buf + len, (11 < (size) ? 11 : 
(size)));
+
+   if (ret)
+   return -EFAULT;
+
+   if (adev->n_regs > 0) {
+   adev->n_regs = 0;
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   }
+
+   while (((reg = strsep(_offset, " ")) != NULL) && len < size) {
+   adev->reset_dump_reg_list =  krealloc_array(
+   adev->reset_dump_reg_list, 1,
+   sizeof(uint32_t), GFP_KERNEL);
+   ret  = kstrtouint(reg, 16, >reset_dump_reg_list[i]);
+
+   if (ret) {
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   return -EINVAL;
+   }
+
+   len += strlen(reg) + 1;
+   reg_offset = reg_temp;
+   memset(reg_offset,  0, 11);
+   ret = copy_from_user(reg_offset, buf + len, (11 < (size-len) ? 
11 : (size-len)));
+
+   if (ret) {
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   return -EFAULT;
+   }
+
+   i++;
+   }
+
+   adev->n_regs = i;
+
+   return size;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1765,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



[PATCH v5 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-15 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 95 +
 2 files changed, 99 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..2e8c2318276d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int n_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..be4336574fec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,99 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   int i, r, len = 0;
+
+   if (adev->n_regs == 0)
+   return 0;
+
+   for (i = 0; i < adev->n_regs; i++) {
+   sprintf(reg_offset, "0x%x ", adev->reset_dump_reg_list[i]);
+   r = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+   
+   if (r)
+   return -EFAULT;
+
+   len += strlen(reg_offset);
+   }
+
+   r = copy_to_user(buf + len, "\n", 1);
+
+   if (r)
+   return -EFAULT;
+
+   len++;
+
+   if (*pos >= len)
+   return 0;
+
+   *pos += len - r;
+
+   return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char *reg_offset, *reg, reg_temp[11];
+   int ret, i = 0, len = 0;
+
+   reg_offset = reg_temp;
+   memset(reg_offset,  0, 11);
+   ret = copy_from_user(reg_offset, buf + len, (11 < (size) ? 11 : 
(size)));
+
+   if (ret)
+   return -EFAULT;
+
+   if (adev->n_regs > 0) {
+   adev->n_regs = 0;
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   }
+
+   while (((reg = strsep(_offset, " ")) != NULL) && len < size) {
+   adev->reset_dump_reg_list =  krealloc_array(
+   adev->reset_dump_reg_list, 1,
+   sizeof(uint32_t), GFP_KERNEL);
+   ret  = kstrtouint(reg, 16, >reset_dump_reg_list[i]);
+
+   if (ret) {
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   return -EINVAL;
+   }
+
+   len += strlen(reg) + 1;
+   reg_offset = reg_temp;
+   memset(reg_offset,  0, 11);
+   ret = copy_from_user(reg_offset, buf + len, (11 < (size-len) ? 
11 : (size-len)));
+
+   if (ret) {
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   return -EFAULT;
+   }
+
+   i++;
+   }
+
+   adev->n_regs = i;
+
+   return size;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1765,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



Re: [PATCH v4 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-15 Thread Somalapuram, Amaranath



On 2/15/2022 3:46 PM, Christian König wrote:



Am 15.02.22 um 11:12 schrieb Somalapuram Amaranath:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 89 +
  2 files changed, 93 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index b85b67a88a3d..2e8c2318276d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
    struct amdgpu_reset_control *reset_cntl;
  uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+    /* reset dump register */
+    uint32_t    *reset_dump_reg_list;
+    int n_regs;
  };
    static inline struct amdgpu_device *drm_to_adev(struct drm_device 
*ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index 164d6a9e9fbb..edcb032bc1f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,93 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
  DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
  amdgpu_debugfs_sclk_set, "%llu\n");
  +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+    char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char reg_offset[11];
+    int i, r, len = 0;
+
+    if (adev->n_regs == 0)
+    return 0;
+
+    for (i = 0; i < adev->n_regs; i++) {
+    memset(reg_offset,  0, 11);
+    sprintf(reg_offset + strlen(reg_offset),


That here looks odd, probably just a leftover from the older code.


My mistake leftover code.

let me try to remove memset() also.


Without it you can also drop the memset().


+    "0x%x ", adev->reset_dump_reg_list[i]);
+    r = copy_to_user(buf + len, reg_offset, strlen(reg_offset));


Whenever copy_to_user() returns a nonzero value the best practice is 
to return -EFAULT.



agreed.

+    len += strlen(reg_offset);
+    }
+
+    r = copy_to_user(buf + len, "\n", 1);


Same here.


agreed.

+    len++;
+
+    if (*pos >= len)
+    return 0;


What is that good for?

If i don't have this condition read operation will be infinite loop and 
data keeps repeating.


I am not sure why !. maybe i need to set something while write operation.


Regards,
Christian.


+
+    *pos += len - r;
+
+    return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+    const char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char *reg_offset, *reg, reg_temp[11];
+    int ret, i = 0, len = 0;
+
+    reg_offset = reg_temp;
+    memset(reg_offset,  0, 11);
+    ret = copy_from_user(reg_offset, buf, 11);
+
+    if (ret)
+    return -EFAULT;
+
+    if (adev->n_regs > 0) {
+    adev->n_regs = 0;
+    kfree(adev->reset_dump_reg_list);
+    adev->reset_dump_reg_list = NULL;
+    }
+
+    while (((reg = strsep(_offset, " ")) != NULL) && len < size) {
+    adev->reset_dump_reg_list =  krealloc_array(
+    adev->reset_dump_reg_list, 1,
+    sizeof(uint32_t), GFP_KERNEL);
+    ret  = kstrtouint(reg, 16, >reset_dump_reg_list[i]);
+
+    if (ret) {
+    kfree(adev->reset_dump_reg_list);
+    adev->reset_dump_reg_list = NULL;
+    return -EINVAL;
+    }
+
+    len += strlen(reg) + 1;
+    reg_offset = reg_temp;
+    memset(reg_offset,  0, 11);
+    ret = copy_from_user(reg_offset, buf + len, 11);
+
+    if (ret) {
+    kfree(adev->reset_dump_reg_list);
+    adev->reset_dump_reg_list = NULL;
+    return -EFAULT;
+    }
+
+    i++;
+    }
+
+    adev->n_regs = i;
+
+    return size;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+    .owner = THIS_MODULE,
+    .read = amdgpu_reset_dump_register_list_read,
+    .write = amdgpu_reset_dump_register_list_write,
+    .llseek = default_llseek
+};
+
  int amdgpu_debugfs_init(struct amdgpu_device *adev)
  {
  struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1759,8 @@ int amdgpu_debugfs_init(struct amdgpu_device 
*adev)

  _debugfs_test_ib_fops);
  debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
  _debugfs_vm_info_fops);
+    debugfs_create_file("amdgpu_reset_dump_register_list", 0644, 
root, adev,

+    _reset_dump_register_list);
    adev->debugfs_vbios_blob.data = adev->bios;
  adev->debugfs_vbios_blob.size = adev->bios_size;




[PATCH v4 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-15 Thread Somalapuram Amaranath
List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 89 +
 2 files changed, 93 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b85b67a88a3d..2e8c2318276d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
 
struct amdgpu_reset_control *reset_cntl;
uint32_t
ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+   /* reset dump register */
+   uint32_t*reset_dump_reg_list;
+   int n_regs;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..edcb032bc1f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,93 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
 
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+   char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char reg_offset[11];
+   int i, r, len = 0;
+
+   if (adev->n_regs == 0)
+   return 0;
+
+   for (i = 0; i < adev->n_regs; i++) {
+   memset(reg_offset,  0, 11);
+   sprintf(reg_offset + strlen(reg_offset),
+   "0x%x ", adev->reset_dump_reg_list[i]);
+   r = copy_to_user(buf + len, reg_offset, strlen(reg_offset));
+   len += strlen(reg_offset);
+   }
+
+   r = copy_to_user(buf + len, "\n", 1);
+   len++;
+
+   if (*pos >= len)
+   return 0;
+
+   *pos += len - r;
+
+   return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+   const char __user *buf, size_t size, loff_t *pos)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;
+   char *reg_offset, *reg, reg_temp[11];
+   int ret, i = 0, len = 0;
+
+   reg_offset = reg_temp;
+   memset(reg_offset,  0, 11);
+   ret = copy_from_user(reg_offset, buf, 11);
+
+   if (ret)
+   return -EFAULT;
+
+   if (adev->n_regs > 0) {
+   adev->n_regs = 0;
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   }
+
+   while (((reg = strsep(_offset, " ")) != NULL) && len < size) {
+   adev->reset_dump_reg_list =  krealloc_array(
+   adev->reset_dump_reg_list, 1,
+   sizeof(uint32_t), GFP_KERNEL);
+   ret  = kstrtouint(reg, 16, >reset_dump_reg_list[i]);
+
+   if (ret) {
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   return -EINVAL;
+   }
+
+   len += strlen(reg) + 1;
+   reg_offset = reg_temp;
+   memset(reg_offset,  0, 11);
+   ret = copy_from_user(reg_offset, buf + len, 11);
+
+   if (ret) {
+   kfree(adev->reset_dump_reg_list);
+   adev->reset_dump_reg_list = NULL;
+   return -EFAULT;
+   }
+
+   i++;
+   }
+
+   adev->n_regs = i;
+
+   return size;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+   .owner = THIS_MODULE,
+   .read = amdgpu_reset_dump_register_list_read,
+   .write = amdgpu_reset_dump_register_list_write,
+   .llseek = default_llseek
+};
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1759,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
_debugfs_vm_info_fops);
+   debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+   _reset_dump_register_list);
 
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
-- 
2.25.1



[PATCH v4 2/2] drm/amdgpu: add reset register dump trace on GPU reset

2022-02-15 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 32 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..ff21262c6fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,19 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   uint32_t reg_value;
+   int i;
+
+   for (i = 0; i < adev->n_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4567,8 +4580,10 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
tmp_adev->gmc.xgmi.pending_reset = false;
if (!queue_work(system_unbound_wq, 
_adev->xgmi_reset_work))
r = -EALREADY;
-   } else
+   } else {
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_asic_reset(tmp_adev);
+   }
 
if (r) {
dev_err(tmp_adev->dev, "ASIC reset failed with 
error, %d for drm dev, %s",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



Re: [PATCH v3 1/2] drm/amdgpu: add debugfs for reset registers list

2022-02-14 Thread Somalapuram, Amaranath



On 2/14/2022 2:59 PM, Christian König wrote:



Am 14.02.22 um 10:16 schrieb Somalapuram Amaranath:

List of register populated for dump collection during the GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  4 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 80 +
  2 files changed, 84 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index b85b67a88a3d..2e8c2318276d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1097,6 +1097,10 @@ struct amdgpu_device {
    struct amdgpu_reset_control *reset_cntl;
  uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+
+    /* reset dump register */
+    uint32_t    *reset_dump_reg_list;
+    int n_regs;
  };
    static inline struct amdgpu_device *drm_to_adev(struct drm_device 
*ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

index 164d6a9e9fbb..6d49bed5b761 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1609,6 +1609,84 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
  DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
  amdgpu_debugfs_sclk_set, "%llu\n");
  +static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+    char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char *reg_offset;
+    int i, r, len;
+
+    if (adev->n_regs == 0)
+    return 0;
+
+    reg_offset = kmalloc((adev->n_regs * 11) + 1, GFP_KERNEL);
+    memset(reg_offset,  0, (adev->n_regs * 11) + 1);
+
+    for (i = 0; i < adev->n_regs; i++)
+    sprintf(reg_offset + strlen(reg_offset), "0x%x ", 
adev->reset_dump_reg_list[i]);

+
+    sprintf(reg_offset + strlen(reg_offset), "\n");
+    len = strlen(reg_offset);
+
+    if (*pos >= len)
+    return 0;
+
+    r = copy_to_user(buf, reg_offset, len);


Maybe better copy that to userspace one register at a time. This is 
not performance critical in any way.


Same for the write function.

Regards,
Christian.

I tried to push all list of register in one go, so that the list can be 
overwritten/updated (every write makes new list)
if we add one register at a time from user-space the design will change 
to appending the list and cant be edited or updated, it should be OK ?
and don't see much use of **_read api (as we can see the list in trace) 
i will remove it (debugfs interface will change from string to uint32_t )!?


Regards,
S.Amarnath

+    *pos += len - r;
+    kfree(reg_offset);
+
+    return len - r;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+    const char __user *buf, size_t size, loff_t *pos)
+{
+    struct amdgpu_device *adev = (struct amdgpu_device 
*)file_inode(f)->i_private;

+    char *reg_offset, *reg;
+    int ret, i = 0;
+
+    reg_offset = kmalloc(size, GFP_KERNEL);
+    memset(reg_offset,  0, size);
+    ret = copy_from_user(reg_offset, buf, size);
+
+    if (ret)
+    return -EFAULT;
+
+    if (adev->n_regs > 0) {
+    adev->n_regs = 0;
+    kfree(adev->reset_dump_reg_list);
+    adev->reset_dump_reg_list = NULL;
+    }
+
+    while ((reg = strsep(_offset, " ")) != NULL) {
+    adev->reset_dump_reg_list =  krealloc_array(
+    adev->reset_dump_reg_list, 1,
+    sizeof(uint32_t), GFP_KERNEL);
+    ret  = kstrtouint(reg, 16, >reset_dump_reg_list[i]);
+
+    if (ret) {
+    kfree(adev->reset_dump_reg_list);
+    kfree(reg_offset);
+    adev->reset_dump_reg_list = NULL;
+    return -EINVAL;
+    }
+
+    i++;
+    }
+
+    adev->n_regs = i;
+    kfree(reg_offset);
+
+    return size;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+    .owner = THIS_MODULE,
+    .read = amdgpu_reset_dump_register_list_read,
+    .write = amdgpu_reset_dump_register_list_write,
+    .llseek = default_llseek
+};
+
  int amdgpu_debugfs_init(struct amdgpu_device *adev)
  {
  struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1672,6 +1750,8 @@ int amdgpu_debugfs_init(struct amdgpu_device 
*adev)

  _debugfs_test_ib_fops);
  debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
  _debugfs_vm_info_fops);
+    debugfs_create_file("amdgpu_reset_dump_register_list", 0644, 
root, adev,

+    _reset_dump_register_list);
    adev->debugfs_vbios_blob.data = adev->bios;
  adev->debugfs_vbios_blob.size = adev->bios_size;




[PATCH v3 2/2] drm/amdgpu: add reset register dump trace function on GPU reset

2022-02-14 Thread Somalapuram Amaranath
Dump the list of register values to trace event on GPU reset.

Signed-off-by: Somalapuram Amaranath 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  | 16 
 2 files changed, 32 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..4e11a93134cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4534,6 +4534,19 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device 
*adev,
return r;
 }
 
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+   int i;
+   uint32_t reg_value;
+
+   for (i = 0; i < adev->n_regs; i++) {
+   reg_value = RREG32(adev->reset_dump_reg_list[i]);
+   trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 
reg_value);
+   }
+
+   return 0;
+}
+
 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 struct amdgpu_reset_context *reset_context)
 {
@@ -4567,8 +4580,10 @@ int amdgpu_do_asic_reset(struct list_head 
*device_list_handle,
tmp_adev->gmc.xgmi.pending_reset = false;
if (!queue_work(system_unbound_wq, 
_adev->xgmi_reset_work))
r = -EALREADY;
-   } else
+   } else {
+   amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_asic_reset(tmp_adev);
+   }
 
if (r) {
dev_err(tmp_adev->dev, "ASIC reset failed with 
error, %d for drm dev, %s",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..b9637925e85c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -537,6 +537,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
  __entry->seqno)
 );
 
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+   TP_PROTO(uint32_t address, uint32_t value),
+   TP_ARGS(address, value),
+   TP_STRUCT__entry(
+__field(uint32_t, address)
+__field(uint32_t, value)
+),
+   TP_fast_assign(
+  __entry->address = address;
+  __entry->value = value;
+  ),
+   TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif
 
-- 
2.25.1



  1   2   >