From: Michel Dänzer <[email protected]>

The radeon driver uses placement range restrictions for several reasons,
in particular to make sure BOs in VRAM can be accessed by the CPU, e.g.
during a page fault.

Without this change, TTM could evict other BOs while trying to satisfy
the requested placement, even if the evicted BOs were outside of the
requested placement range. Doing so didn't free up any space in the
requested placement range, so the (potentially high) eviction cost was
incurred for no benefit.

Nominating for stable because radeon driver changes in 3.17 made this
much more noticeable than before.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=84662
Signed-off-by: Michel Dänzer <[email protected]>
Signed-off-by: Alex Deucher <[email protected]>
---

This is a backport of commit e300180f71037fd9ed1ca967006fd9f3ee466bcd for
the 3.17 and older stable branches.

 drivers/gpu/drm/ttm/ttm_bo.c | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3da89d5..e25845e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -716,6 +716,7 @@ out:
 
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                                uint32_t mem_type,
+                               const struct ttm_placement *placement,
                                bool interruptible,
                                bool no_wait_gpu)
 {
@@ -727,8 +728,22 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
        spin_lock(&glob->lru_lock);
        list_for_each_entry(bo, &man->lru, lru) {
                ret = __ttm_bo_reserve(bo, false, true, false, NULL);
-               if (!ret)
+               if (!ret) {
+                       if (placement && (placement->fpfn || placement->lpfn)) {
+                               /* Don't evict this BO if it's outside of the
+                                * requested placement range
+                                */
+                               if (placement->fpfn >= (bo->mem.start + 
bo->mem.size) ||
+                                   (placement->lpfn &&
+                                    placement->lpfn <= bo->mem.start)) {
+                                       __ttm_bo_unreserve(bo);
+                                       ret = -EBUSY;
+                                       continue;
+                               }
+                       }
+
                        break;
+               }
        }
 
        if (ret) {
@@ -789,7 +804,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object 
*bo,
                        return ret;
                if (mem->mm_node)
                        break;
-               ret = ttm_mem_evict_first(bdev, mem_type,
+               ret = ttm_mem_evict_first(bdev, mem_type, placement,
                                          interruptible, no_wait_gpu);
                if (unlikely(ret != 0))
                        return ret;
@@ -1245,7 +1260,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device 
*bdev,
        spin_lock(&glob->lru_lock);
        while (!list_empty(&man->lru)) {
                spin_unlock(&glob->lru_lock);
-               ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+               ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
                if (ret) {
                        if (allow_errors) {
                                return ret;
-- 
2.1.3

--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to