Setting the no_gpu_wait flag means that the allocate BO must be available
immediately and we can't wait for any GPU operation to finish.

Signed-off-by: Christian König <christian.koe...@amd.com>
Acked-by: Felix Kuehling <felix.kuehl...@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 43 +++++++++++++++++++++---------------
 1 file changed, 25 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2070e8a57ed8..2899702139fb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -924,7 +924,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
  */
 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                                 struct ttm_mem_type_manager *man,
-                                struct ttm_mem_reg *mem)
+                                struct ttm_mem_reg *mem,
+                                bool no_wait_gpu)
 {
        struct dma_fence *fence;
        int ret;
@@ -933,19 +934,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object 
*bo,
        fence = dma_fence_get(man->move);
        spin_unlock(&man->move_lock);
 
-       if (fence) {
-               reservation_object_add_shared_fence(bo->resv, fence);
+       if (!fence)
+               return 0;
 
-               ret = reservation_object_reserve_shared(bo->resv, 1);
-               if (unlikely(ret)) {
-                       dma_fence_put(fence);
-                       return ret;
-               }
+       if (no_wait_gpu)
+               return -EBUSY;
+
+       reservation_object_add_shared_fence(bo->resv, fence);
 
-               dma_fence_put(bo->moving);
-               bo->moving = fence;
+       ret = reservation_object_reserve_shared(bo->resv, 1);
+       if (unlikely(ret)) {
+               dma_fence_put(fence);
+               return ret;
        }
 
+       dma_fence_put(bo->moving);
+       bo->moving = fence;
        return 0;
 }
 
@@ -974,7 +978,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object 
*bo,
                        return ret;
        } while (1);
 
-       return ttm_bo_add_move_fence(bo, man, mem);
+       return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
 }
 
 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1116,13 +1120,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                if (unlikely(ret))
                        goto error;
 
-               if (mem->mm_node) {
-                       ret = ttm_bo_add_move_fence(bo, man, mem);
-                       if (unlikely(ret)) {
-                               (*man->func->put_node)(man, mem);
-                               goto error;
-                       }
-                       return 0;
+               if (!mem->mm_node)
+                       continue;
+
+               ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+               if (unlikely(ret)) {
+                       (*man->func->put_node)(man, mem);
+                       if (ret == -EBUSY)
+                               continue;
+
+                       goto error;
                }
        }
 
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to