looks good to me.  Reviewed-by: Roger He <hongbo...@amd.com>

-----Original Message-----
From: dri-devel [mailto:dri-devel-boun...@lists.freedesktop.org] On Behalf Of 
Christian K?nig
Sent: Tuesday, February 20, 2018 8:58 PM
To: amd-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org; 
linux-ker...@vger.kernel.org
Subject: [PATCH 4/4] drm/ttm: keep BOs reserved until end of eviction

This avoids problems when BOs are evicted but directly moved back into the 
domain from other threads.

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 37 +++++++++++++++++++++++++++++--------
 1 file changed, 29 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 
3a44c2ee4155..593a0216faff 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -742,7 +742,8 @@ static bool ttm_bo_evict_swapout_allowable(struct 
ttm_buffer_object *bo,  static int ttm_mem_evict_first(struct ttm_bo_device 
*bdev,
                               uint32_t mem_type,
                               const struct ttm_place *place,
-                              struct ttm_operation_ctx *ctx)
+                              struct ttm_operation_ctx *ctx,
+                              struct list_head *evicted)
 {
        struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type]; @@ -792,17 
+793,28 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 
        ret = ttm_bo_evict(bo, ctx);
        if (locked) {
-               ttm_bo_unreserve(bo);
+               list_add_tail(&bo->lru, evicted);
        } else {
                spin_lock(&glob->lru_lock);
                ttm_bo_add_to_lru(bo);
                spin_unlock(&glob->lru_lock);
+               kref_put(&bo->list_kref, ttm_bo_release_list);
        }
 
-       kref_put(&bo->list_kref, ttm_bo_release_list);
        return ret;
 }
 
+static void ttm_mem_evict_cleanup(struct list_head *evicted) {
+       struct ttm_buffer_object *bo, *tmp;
+
+       list_for_each_entry_safe(bo, tmp, evicted, lru) {
+               list_del_init(&bo->lru);
+               ttm_bo_unreserve(bo);
+               kref_put(&bo->list_kref, ttm_bo_release_list);
+       }
+}
+
 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)  {
        struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; @@ 
-852,20 +864,26 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object 
*bo,  {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+       struct list_head evicted;
        int ret;
 
+       INIT_LIST_HEAD(&evicted);
        do {
                ret = (*man->func->get_node)(man, bo, place, mem);
                if (unlikely(ret != 0))
                        return ret;
                if (mem->mm_node)
                        break;
-               ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
+               ret = ttm_mem_evict_first(bdev, mem_type, place, ctx, &evicted);
                if (unlikely(ret != 0))
-                       return ret;
+                       goto error;
        } while (1);
        mem->mem_type = mem_type;
-       return ttm_bo_add_move_fence(bo, man, mem);
+       ret = ttm_bo_add_move_fence(bo, man, mem);
+
+error:
+       ttm_mem_evict_cleanup(&evicted);
+       return ret;
 }
 
 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, @@ 
-1345,6 +1363,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device 
*bdev,
        struct ttm_operation_ctx ctx = { false, false };
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        struct ttm_bo_global *glob = bdev->glob;
+       struct list_head evicted;
        struct dma_fence *fence;
        int ret;
        unsigned i;
@@ -1352,18 +1371,20 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device 
*bdev,
        /*
         * Can't use standard list traversal since we're unlocking.
         */
-
+       INIT_LIST_HEAD(&evicted);
        spin_lock(&glob->lru_lock);
        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
                while (!list_empty(&man->lru[i])) {
                        spin_unlock(&glob->lru_lock);
-                       ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
+                       ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
+                                                 &evicted);
                        if (ret)
                                return ret;
                        spin_lock(&glob->lru_lock);
                }
        }
        spin_unlock(&glob->lru_lock);
+       ttm_mem_evict_cleanup(&evicted);
 
        spin_lock(&man->move_lock);
        fence = dma_fence_get(man->move);
--
2.14.1

_______________________________________________
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to