Am 16.09.22 um 17:05 schrieb Philip Yang:
The vm status_lock will be used to protect all vm status lists.

Signed-off-by: Philip Yang <philip.y...@amd.com>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 30 +++++++++++++-------------
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 +++-
  2 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 690fd4f639f1..596f1ea8babc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -225,9 +225,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base 
*vm_bo)
   */
  static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
  {
-       spin_lock(&vm_bo->vm->invalidated_lock);
+       spin_lock(&vm_bo->vm->status_lock);
        list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
-       spin_unlock(&vm_bo->vm->invalidated_lock);
+       spin_unlock(&vm_bo->vm->status_lock);
  }
/**
@@ -256,9 +256,9 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base 
*vm_bo)
   */
  static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
  {
-       spin_lock(&vm_bo->vm->invalidated_lock);
+       spin_lock(&vm_bo->vm->status_lock);
        list_move(&vm_bo->vm_status, &vm_bo->vm->done);
-       spin_unlock(&vm_bo->vm->invalidated_lock);
+       spin_unlock(&vm_bo->vm->status_lock);
  }
/**
@@ -936,7 +936,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t 
*vram_mem,
                amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
                                gtt_mem, cpu_mem);
        }
-       spin_lock(&vm->invalidated_lock);
+       spin_lock(&vm->status_lock);
        list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
                if (!bo_va->base.bo)
                        continue;
@@ -949,7 +949,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t 
*vram_mem,
                amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
                                gtt_mem, cpu_mem);
        }
-       spin_unlock(&vm->invalidated_lock);
+       spin_unlock(&vm->status_lock);
  }
  /**
   * amdgpu_vm_bo_update - update all BO mappings in the vm page table
@@ -1290,12 +1290,12 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
                        return r;
        }
- spin_lock(&vm->invalidated_lock);
+       spin_lock(&vm->status_lock);
        while (!list_empty(&vm->invalidated)) {
                bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
                                         base.vm_status);
                resv = bo_va->base.bo->tbo.base.resv;
-               spin_unlock(&vm->invalidated_lock);
+               spin_unlock(&vm->status_lock);
/* Try to reserve the BO to avoid clearing its ptes */
                if (!amdgpu_vm_debug && dma_resv_trylock(resv))
@@ -1310,9 +1310,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
if (!clear)
                        dma_resv_unlock(resv);
-               spin_lock(&vm->invalidated_lock);
+               spin_lock(&vm->status_lock);
        }
-       spin_unlock(&vm->invalidated_lock);
+       spin_unlock(&vm->status_lock);
return 0;
  }
@@ -1763,9 +1763,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
                }
        }
- spin_lock(&vm->invalidated_lock);
+       spin_lock(&vm->status_lock);
        list_del(&bo_va->base.vm_status);
-       spin_unlock(&vm->invalidated_lock);
+       spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
                list_del(&mapping->list);
@@ -2019,7 +2019,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
        INIT_LIST_HEAD(&vm->moved);
        INIT_LIST_HEAD(&vm->idle);
        INIT_LIST_HEAD(&vm->invalidated);
-       spin_lock_init(&vm->invalidated_lock);
+       spin_lock_init(&vm->status_lock);
        INIT_LIST_HEAD(&vm->freed);
        INIT_LIST_HEAD(&vm->done);
@@ -2584,7 +2584,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
        id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
-       spin_lock(&vm->invalidated_lock);
+       spin_lock(&vm->status_lock);
        list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
                if (!bo_va->base.bo)
                        continue;
@@ -2599,7 +2599,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, 
struct seq_file *m)
                        continue;
                total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
        }
-       spin_unlock(&vm->invalidated_lock);
+       spin_unlock(&vm->status_lock);
        total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 9ecb7f663e19..98895c8fef6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -268,7 +268,9 @@ struct amdgpu_vm {
/* regular invalidated BOs, but not yet updated in the PT */
        struct list_head        invalidated;
-       spinlock_t              invalidated_lock;
+
+       /* Lock to protect vm_bo add/del/move on all lists of vm */
+       spinlock_t              status_lock;

Maybe move that field before all the list_head members. Otherwise somebody could think that the invalidated list is not protected.

Apart from that Reviewed-by: Christian König <christian.koe...@amd.com>

Thanks,
Christian.

/* BO mappings freed, but not yet updated in the PT */
        struct list_head        freed;

Reply via email to