Re: [PATCH v4 4/6] drm/amdgpu: switch to amdgpu_bo_vm for vm code

2021-05-28 Thread Christian König



Am 28.05.21 um 12:56 schrieb Nirmoy Das:

The subclass, amdgpu_bo_vm is intended for PT/PD BOs which are also
shadowed, so switch to amdgpu_bo_vm BO for PT/PD BOs.

v4: update amdgpu_vm_update_funcs to accept amdgpu_bo_vm.
v3: simplify code.
 check also if shadow bo exist instead of checking bo only type.
v2: squash three related patches.

Signed-off-by: Nirmoy Das 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 123 
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |   5 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c  |  14 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c |  19 +--
  4 files changed, 96 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6bc7566cc193..223c63342ecd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -652,15 +652,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device 
*adev,
spin_lock(>mman.bdev.lru_lock);
list_for_each_entry(bo_base, >idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
+   struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);

if (!bo->parent)
continue;

ttm_bo_move_to_lru_tail(>tbo, >tbo.mem,
>lru_bulk_move);
-   if (bo->shadow)
-   ttm_bo_move_to_lru_tail(>shadow->tbo,
-   >shadow->tbo.mem,
+   if (shadow)
+   ttm_bo_move_to_lru_tail(>tbo, >tbo.mem,
>lru_bulk_move);
}
spin_unlock(>mman.bdev.lru_lock);
@@ -692,12 +692,13 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, 
struct amdgpu_vm *vm,

list_for_each_entry_safe(bo_base, tmp, >evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
+   struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);

r = validate(param, bo);
if (r)
return r;
-   if (bo->shadow) {
-   r = validate(param, bo->shadow);
+   if (shadow) {
+   r = validate(param, shadow);
if (r)
return r;
}
@@ -705,7 +706,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, 
struct amdgpu_vm *vm,
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
} else {
-   vm->update_funcs->map_table(bo);
+   vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
}
@@ -737,7 +738,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
   *
   * @adev: amdgpu_device pointer
   * @vm: VM to clear BO from
- * @bo: BO to clear
+ * @vmbo: BO to clear
   * @immediate: use an immediate update
   *
   * Root PD needs to be reserved when calling this.
@@ -747,13 +748,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
   */
  static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
  struct amdgpu_vm *vm,
- struct amdgpu_bo *bo,
+ struct amdgpu_bo_vm *vmbo,
  bool immediate)
  {
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
struct amdgpu_vm_update_params params;
-   struct amdgpu_bo *ancestor = bo;
+   struct amdgpu_bo *ancestor = >bo;
+   struct amdgpu_bo *bo = >bo;
unsigned entries, ats_entries;
uint64_t addr;
int r;
@@ -793,14 +795,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
return r;

-   if (bo->shadow) {
-   r = ttm_bo_validate(>shadow->tbo, >shadow->placement,
-   );
+   if (vmbo->shadow) {
+   struct amdgpu_bo *shadow = vmbo->shadow;
+
+   r = ttm_bo_validate(>tbo, >placement, );
if (r)
return r;
}

-   r = vm->update_funcs->map_table(bo);
+   r = vm->update_funcs->map_table(vmbo);
if (r)
return r;

@@ -824,7 +827,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
amdgpu_gmc_get_vm_pde(adev, level, , );
}

-   r = vm->update_funcs->update(, bo, addr, 0, ats_entries,
+   r = vm->update_funcs->update(, vmbo, addr, 0, 
ats_entries,
 value, flags);
if (r)
return r;
@@ -847,7 +850,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
}
   

[PATCH v4 4/6] drm/amdgpu: switch to amdgpu_bo_vm for vm code

2021-05-28 Thread Nirmoy Das
The subclass, amdgpu_bo_vm is intended for PT/PD BOs which are also
shadowed, so switch to amdgpu_bo_vm BO for PT/PD BOs.

v4: update amdgpu_vm_update_funcs to accept amdgpu_bo_vm.
v3: simplify code.
check also if shadow bo exist instead of checking bo only type.
v2: squash three related patches.

Signed-off-by: Nirmoy Das 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 123 
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |   5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c  |  14 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c |  19 +--
 4 files changed, 96 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6bc7566cc193..223c63342ecd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -652,15 +652,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device 
*adev,
spin_lock(>mman.bdev.lru_lock);
list_for_each_entry(bo_base, >idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
+   struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);

if (!bo->parent)
continue;

ttm_bo_move_to_lru_tail(>tbo, >tbo.mem,
>lru_bulk_move);
-   if (bo->shadow)
-   ttm_bo_move_to_lru_tail(>shadow->tbo,
-   >shadow->tbo.mem,
+   if (shadow)
+   ttm_bo_move_to_lru_tail(>tbo, >tbo.mem,
>lru_bulk_move);
}
spin_unlock(>mman.bdev.lru_lock);
@@ -692,12 +692,13 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, 
struct amdgpu_vm *vm,

list_for_each_entry_safe(bo_base, tmp, >evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
+   struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);

r = validate(param, bo);
if (r)
return r;
-   if (bo->shadow) {
-   r = validate(param, bo->shadow);
+   if (shadow) {
+   r = validate(param, shadow);
if (r)
return r;
}
@@ -705,7 +706,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, 
struct amdgpu_vm *vm,
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
} else {
-   vm->update_funcs->map_table(bo);
+   vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
}
@@ -737,7 +738,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
  *
  * @adev: amdgpu_device pointer
  * @vm: VM to clear BO from
- * @bo: BO to clear
+ * @vmbo: BO to clear
  * @immediate: use an immediate update
  *
  * Root PD needs to be reserved when calling this.
@@ -747,13 +748,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
  */
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
  struct amdgpu_vm *vm,
- struct amdgpu_bo *bo,
+ struct amdgpu_bo_vm *vmbo,
  bool immediate)
 {
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
struct amdgpu_vm_update_params params;
-   struct amdgpu_bo *ancestor = bo;
+   struct amdgpu_bo *ancestor = >bo;
+   struct amdgpu_bo *bo = >bo;
unsigned entries, ats_entries;
uint64_t addr;
int r;
@@ -793,14 +795,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
return r;

-   if (bo->shadow) {
-   r = ttm_bo_validate(>shadow->tbo, >shadow->placement,
-   );
+   if (vmbo->shadow) {
+   struct amdgpu_bo *shadow = vmbo->shadow;
+
+   r = ttm_bo_validate(>tbo, >placement, );
if (r)
return r;
}

-   r = vm->update_funcs->map_table(bo);
+   r = vm->update_funcs->map_table(vmbo);
if (r)
return r;

@@ -824,7 +827,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
amdgpu_gmc_get_vm_pde(adev, level, , );
}

-   r = vm->update_funcs->update(, bo, addr, 0, ats_entries,
+   r = vm->update_funcs->update(, vmbo, addr, 0, 
ats_entries,
 value, flags);
if (r)
return r;
@@ -847,7 +850,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
}
}

-   r = vm->update_funcs->update(, bo, addr, 0, entries,
+