This add the support for the new fault callback and also the
infrastructure for supporting unmappable VRAM.

V2 validate BO with no_wait = true
V3 don't derefence bo->mem.mm_node as it's not NULL only for
   VRAM or GTT
V4 update to splitted no_wait ttm change
V5 update to new balanced io_mem_reserve/free change

Signed-off-by: Jerome Glisse <jgli...@redhat.com>
---
 drivers/gpu/drm/radeon/radeon_object.c |   26 ++++++++++++++++-
 drivers/gpu/drm/radeon/radeon_object.h |    2 +-
 drivers/gpu/drm/radeon/radeon_ttm.c    |   47 +++++++++++++++++++++++++++++++-
 3 files changed, 71 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_object.c 
b/drivers/gpu/drm/radeon/radeon_object.c
index 3adfa88..aef44f3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -496,11 +496,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
        radeon_bo_check_tiling(rbo, 0, 1);
 }
 
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
+       struct radeon_device *rdev;
        struct radeon_bo *rbo;
+       unsigned long offset, size;
+       int r;
+
        if (!radeon_ttm_bo_is_radeon_bo(bo))
-               return;
+               return 0;
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
+       rdev = rbo->rdev;
+       if (bo->mem.mem_type == TTM_PL_VRAM) {
+               size = bo->mem.num_pages << PAGE_SHIFT;
+               offset = bo->mem.mm_node->start << PAGE_SHIFT;
+               if ((offset + size) > rdev->mc.visible_vram_size) {
+                       /* hurrah the memory is not visible ! */
+                       radeon_ttm_placement_from_domain(rbo, 
RADEON_GEM_DOMAIN_VRAM);
+                       rbo->placement.lpfn = rdev->mc.visible_vram_size >> 
PAGE_SHIFT;
+                       r = ttm_bo_validate(bo, &rbo->placement, false, true, 
false);
+                       if (unlikely(r != 0))
+                               return r;
+                       offset = bo->mem.mm_node->start << PAGE_SHIFT;
+                       /* this should not happen */
+                       if ((offset + size) > rdev->mc.visible_vram_size)
+                               return -EINVAL;
+               }
+       }
+       return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.h 
b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de..353998d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, 
bool has_moved,
                                bool force_drop);
 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                                        struct ttm_mem_reg *mem);
-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c 
b/drivers/gpu/drm/radeon/radeon_ttm.c
index ba4724c..d2700b8 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -437,10 +437,53 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
 memcpy:
                r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, 
new_mem);
        }
-
        return r;
 }
 
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct 
ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* system memory */
+               return 0;
+       case TTM_PL_TT:
+#if __OS_HAS_AGP
+               if (rdev->flags & RADEON_IS_AGP) {
+                       /* RADEON_IS_AGP is set only if AGP is active */
+                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.base = rdev->mc.agp_base;
+                       mem->bus.is_iomem = true;
+               }
+#endif
+               return 0;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               /* check if it's visible */
+               if ((mem->bus.offset + mem->bus.size) > 
rdev->mc.visible_vram_size)
+                       return -EINVAL;
+               mem->bus.base = rdev->mc.aper_base;
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct 
ttm_mem_reg *mem)
+{
+       /* hopefully will be usefull soon */
+}
+
 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
                                bool lazy, bool interruptible)
 {
@@ -481,6 +524,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
        .sync_obj_ref = &radeon_sync_obj_ref,
        .move_notify = &radeon_bo_move_notify,
        .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+       .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+       .io_mem_free = &radeon_ttm_io_mem_free,
 };
 
 int radeon_ttm_init(struct radeon_device *rdev)
-- 
1.6.6.1


------------------------------------------------------------------------------
Download Intel&#174; Parallel Studio Eval
Try the new software tools for yourself. Speed compiling, find bugs
proactively, and fine-tune applications for parallel performance.
See why Intel Parallel Studio got high marks during beta.
http://p.sf.net/sfu/intel-sw-dev
--
_______________________________________________
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel

Reply via email to