Start using the new component here as well.

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 86 +++++++++++--------------
 1 file changed, 39 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 82e27bd4f038..95292a65fd25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -22,6 +22,7 @@
  */
 
 #include <linux/firmware.h>
+#include <drm/drm_exec.h>
 
 #include "amdgpu_mes.h"
 #include "amdgpu.h"
@@ -1126,34 +1127,29 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device 
*adev,
                                 struct amdgpu_mes_ctx_data *ctx_data)
 {
        struct amdgpu_bo_va *bo_va;
-       struct ww_acquire_ctx ticket;
-       struct list_head list;
-       struct amdgpu_bo_list_entry pd;
-       struct ttm_validate_buffer csa_tv;
        struct amdgpu_sync sync;
+       struct drm_exec exec;
        int r;
 
        amdgpu_sync_create(&sync);
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&csa_tv.head);
 
-       csa_tv.bo = &ctx_data->meta_data_obj->tbo;
-       csa_tv.num_shared = 1;
-
-       list_add(&csa_tv.head, &list);
-       amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-       if (r) {
-               DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
-               return r;
+       drm_exec_init(&exec, false);
+       drm_exec_while_not_all_locked(&exec) {
+               r = drm_exec_prepare_obj(&exec,
+                                        &ctx_data->meta_data_obj->tbo.base,
+                                        0);
+               if (likely(!r))
+                       r = amdgpu_vm_lock_pd(vm, &exec);
+               drm_exec_continue_on_contention(&exec);
+                if (unlikely(r))
+                       goto error_fini_exec;
        }
 
        bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
        if (!bo_va) {
-               ttm_eu_backoff_reservation(&ticket, &list);
                DRM_ERROR("failed to create bo_va for meta data BO\n");
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto error_fini_exec;
        }
 
        r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
@@ -1163,33 +1159,35 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device 
*adev,
 
        if (r) {
                DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
-               goto error;
+               goto error_del_bo_va;
        }
 
        r = amdgpu_vm_bo_update(adev, bo_va, false);
        if (r) {
                DRM_ERROR("failed to do vm_bo_update on meta data\n");
-               goto error;
+               goto error_del_bo_va;
        }
        amdgpu_sync_fence(&sync, bo_va->last_pt_update);
 
        r = amdgpu_vm_update_pdes(adev, vm, false);
        if (r) {
                DRM_ERROR("failed to update pdes on meta data\n");
-               goto error;
+               goto error_del_bo_va;
        }
        amdgpu_sync_fence(&sync, vm->last_update);
 
        amdgpu_sync_wait(&sync, false);
-       ttm_eu_backoff_reservation(&ticket, &list);
+       drm_exec_fini(&exec);
 
        amdgpu_sync_free(&sync);
        ctx_data->meta_data_va = bo_va;
        return 0;
 
-error:
+error_del_bo_va:
        amdgpu_vm_bo_del(adev, bo_va);
-       ttm_eu_backoff_reservation(&ticket, &list);
+
+error_fini_exec:
+       drm_exec_fini(&exec);
        amdgpu_sync_free(&sync);
        return r;
 }
@@ -1200,34 +1198,28 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device 
*adev,
        struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
        struct amdgpu_bo *bo = ctx_data->meta_data_obj;
        struct amdgpu_vm *vm = bo_va->base.vm;
-       struct amdgpu_bo_list_entry vm_pd;
-       struct list_head list, duplicates;
-       struct dma_fence *fence = NULL;
-       struct ttm_validate_buffer tv;
-       struct ww_acquire_ctx ticket;
-       long r = 0;
-
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&duplicates);
-
-       tv.bo = &bo->tbo;
-       tv.num_shared = 2;
-       list_add(&tv.head, &list);
-
-       amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
-       if (r) {
-               dev_err(adev->dev, "leaking bo va because "
-                       "we fail to reserve bo (%ld)\n", r);
-               return r;
+       struct dma_fence *fence;
+       struct drm_exec exec;
+       long r;
+
+       drm_exec_init(&exec, false);
+       drm_exec_while_not_all_locked(&exec) {
+               r = drm_exec_prepare_obj(&exec,
+                                        &ctx_data->meta_data_obj->tbo.base,
+                                        0);
+               if (likely(!r))
+                       r = amdgpu_vm_lock_pd(vm, &exec);
+               drm_exec_continue_on_contention(&exec);
+                if (unlikely(r))
+                       goto out_unlock;
        }
 
        amdgpu_vm_bo_del(adev, bo_va);
        if (!amdgpu_vm_ready(vm))
                goto out_unlock;
 
-       r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, 
&fence);
+       r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+                                  &fence);
        if (r)
                goto out_unlock;
        if (fence) {
@@ -1246,7 +1238,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device 
*adev,
 out_unlock:
        if (unlikely(r < 0))
                dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
-       ttm_eu_backoff_reservation(&ticket, &list);
+       drm_exec_fini(&exec);
 
        return r;
 }
-- 
2.34.1

Reply via email to