Wire xe_bo_move to GPUSVM migration to SRAM with trylocking of mmap
lock.

v2:
 - Use xe_svm_bo_evict
 - Drop bo->range

Signed-off-by: Matthew Brost <[email protected]>
---
 drivers/gpu/drm/xe/xe_bo.c  | 20 ++++++++++++++++++++
 drivers/gpu/drm/xe/xe_svm.c |  5 +++++
 drivers/gpu/drm/xe/xe_svm.h |  3 +++
 3 files changed, 28 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index dbd03383878e..17d158762e03 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -25,6 +25,7 @@
 #include "xe_pm.h"
 #include "xe_preempt_fence.h"
 #include "xe_res_cursor.h"
+#include "xe_svm.h"
 #include "xe_trace_bo.h"
 #include "xe_ttm_stolen_mgr.h"
 #include "xe_vm.h"
@@ -250,6 +251,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct 
xe_bo *bo,
 static void xe_evict_flags(struct ttm_buffer_object *tbo,
                           struct ttm_placement *placement)
 {
+       struct xe_bo *bo;
+
        if (!xe_bo_is_xe_bo(tbo)) {
                /* Don't handle scatter gather BOs */
                if (tbo->type == ttm_bo_type_sg) {
@@ -261,6 +264,12 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
                return;
        }
 
+       bo = ttm_to_xe_bo(tbo);
+       if (bo->flags & XE_BO_FLAG_SYSTEM_ALLOC) {
+               *placement = sys_placement;
+               return;
+       }
+
        /*
         * For xe, sg bos that are evicted to system just triggers a
         * rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -738,6 +747,17 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, 
bool evict,
                }
        }
 
+       if (!move_lacks_source && (bo->flags & XE_BO_FLAG_SYSTEM_ALLOC) &&
+           new_mem->mem_type == XE_PL_SYSTEM) {
+               ret = xe_svm_bo_evict(bo);
+               if (!ret) {
+                       drm_dbg(&xe->drm, "Evict system allocator BO 
success\n");
+                       ttm_bo_move_null(ttm_bo, new_mem);
+               }
+
+               goto out;
+       }
+
        if (!move_lacks_source &&
            ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
             (mem_type_is_vram(old_mem_type) &&
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 31b80cde15c4..555bc71ae523 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -752,3 +752,8 @@ int xe_devm_add(struct xe_tile *tile, struct xe_mem_region 
*mr)
                 tile->id, mr->io_start, mr->io_start + mr->usable_size, res);
        return 0;
 }
+
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+       return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 6893664dae70..5b9d5ac9ef72 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -11,6 +11,7 @@
 
 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
 
+struct xe_bo;
 struct xe_mem_region;
 struct xe_tile;
 struct xe_vm;
@@ -35,6 +36,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma 
*vma,
                            bool atomic);
 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
 
+int xe_svm_bo_evict(struct xe_bo *bo);
+
 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
 {
        return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
-- 
2.34.1

Reply via email to