Use the new vma-manager infrastructure. This doesn't change any
implementation details as the vma-offset-manager is nearly copied 1-to-1
from TTM.

Even though the vma-manager uses its own locks, we still need bo->vm_lock
to prevent bos from being destroyed before we can get a reference during
lookup. However, this lock is not needed during vm-setup as we still
hold a reference there.

This also drops the addr_space_offset member as it is a copy of vm_start
in vma_node objects. Use the accessor functions instead.

Signed-off-by: David Herrmann <dh.herrmann at gmail.com>
---
 drivers/gpu/drm/ast/ast_main.c            |  2 +-
 drivers/gpu/drm/cirrus/cirrus_main.c      |  2 +-
 drivers/gpu/drm/mgag200/mgag200_main.c    |  2 +-
 drivers/gpu/drm/nouveau/nouveau_display.c |  2 +-
 drivers/gpu/drm/nouveau/nouveau_gem.c     |  2 +-
 drivers/gpu/drm/qxl/qxl_object.h          |  2 +-
 drivers/gpu/drm/qxl/qxl_release.c         |  2 +-
 drivers/gpu/drm/radeon/radeon_object.h    |  5 +-
 drivers/gpu/drm/ttm/ttm_bo.c              | 84 ++++++-------------------------
 drivers/gpu/drm/ttm/ttm_bo_util.c         |  3 +-
 drivers/gpu/drm/ttm/ttm_bo_vm.c           | 81 ++++++++++++-----------------
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c  |  4 +-
 include/drm/ttm/ttm_bo_api.h              | 15 ++----
 include/drm/ttm/ttm_bo_driver.h           |  7 +--
 14 files changed, 65 insertions(+), 148 deletions(-)

diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f60fd7b..c195dc2 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -487,7 +487,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)

 static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
 {
-       return bo->bo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }
 int
 ast_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c 
b/drivers/gpu/drm/cirrus/cirrus_main.c
index 35cbae8..3a7a0ef 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -294,7 +294,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)

 static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
 {
-       return bo->bo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }

 int
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c 
b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9905923..1b560a1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -330,7 +330,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj)

 static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
 {
-       return bo->bo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }

 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c 
b/drivers/gpu/drm/nouveau/nouveau_display.c
index f17dc2a..52498de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -705,7 +705,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
        gem = drm_gem_object_lookup(dev, file_priv, handle);
        if (gem) {
                struct nouveau_bo *bo = gem->driver_private;
-               *poffset = bo->bo.addr_space_offset;
+               *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
                drm_gem_object_unreference_unlocked(gem);
                return 0;
        }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c 
b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b4b4d0c..357dace 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -192,7 +192,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct 
drm_gem_object *gem,
        }

        rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
-       rep->map_handle = nvbo->bo.addr_space_offset;
+       rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
        rep->tile_mode = nvbo->tile_mode;
        rep->tile_flags = nvbo->tile_flags;
        return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index b4fd89f..1fc4e4b 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -64,7 +64,7 @@ static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)

 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
 {
-       return bo->tbo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->tbo.vma_node);
 }

 static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/qxl/qxl_release.c 
b/drivers/gpu/drm/qxl/qxl_release.c
index b443d67..1a648e1 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -87,7 +87,7 @@ qxl_release_free(struct qxl_device *qdev,

        for (i = 0 ; i < release->bo_count; ++i) {
                QXL_INFO(qdev, "release %llx\n",
-                       release->bos[i]->tbo.addr_space_offset
+                       drm_vma_node_offset_addr(&release->bos[i]->tbo.vma_node)
                                                - DRM_FILE_OFFSET);
                qxl_fence_remove_release(&release->bos[i]->fence, release->id);
                qxl_bo_unref(&release->bos[i]);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h 
b/drivers/gpu/drm/radeon/radeon_object.h
index e2cb80a..ffa4e82 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -98,13 +98,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct 
radeon_bo *bo)
  * @bo:        radeon object for which we query the offset
  *
  * Returns mmap offset of the object.
- *
- * Note: addr_space_offset is constant after ttm bo init thus isn't protected
- * by any lock.
  */
 static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
 {
-       return bo->tbo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->tbo.vma_node);
 }

 extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e97c5a0..2b96a75 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -776,11 +776,7 @@ static void ttm_bo_release(struct kref *kref)
        struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

        write_lock(&bdev->vm_lock);
-       if (likely(bo->vm_node != NULL)) {
-               rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
-               drm_mm_put_block(bo->vm_node);
-               bo->vm_node = NULL;
-       }
+       drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
        write_unlock(&bdev->vm_lock);
        ttm_mem_io_lock(man, false);
        ttm_mem_io_free_vm(bo);
@@ -1289,6 +1285,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->acc_size = acc_size;
        bo->sg = sg;
        atomic_inc(&bo->glob->bo_count);
+       drm_vma_node_reset(&bo->vma_node);

        ret = ttm_bo_check_placement(bo, placement);
        if (unlikely(ret != 0))
@@ -1588,9 +1585,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
                TTM_DEBUG("Swap list was clean\n");
        spin_unlock(&glob->lru_lock);

-       BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
        write_lock(&bdev->vm_lock);
-       drm_mm_takedown(&bdev->addr_space_mm);
+       drm_vma_offset_manager_destroy(&bdev->vma_manager);
        write_unlock(&bdev->vm_lock);

        return ret;
@@ -1618,9 +1614,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
        if (unlikely(ret != 0))
                goto out_no_sys;

-       bdev->addr_space_rb = RB_ROOT;
-       drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
-
+       drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
+                                   0x10000000);
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
        INIT_LIST_HEAD(&bdev->ddestroy);
        bdev->dev_mapping = NULL;
@@ -1662,12 +1657,17 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, 
struct ttm_mem_reg *mem)
 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       loff_t offset = (loff_t) bo->addr_space_offset;
-       loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+       loff_t offset, holelen;

        if (!bdev->dev_mapping)
                return;
-       unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+
+       if (drm_vma_node_has_offset(&bo->vma_node)) {
+               offset = (loff_t) drm_vma_node_offset_addr(&bo->vma_node);
+               holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+
+               unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+       }
        ttm_mem_io_free_vm(bo);
 }

@@ -1684,31 +1684,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)

 EXPORT_SYMBOL(ttm_bo_unmap_virtual);

-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
-       struct ttm_bo_device *bdev = bo->bdev;
-       struct rb_node **cur = &bdev->addr_space_rb.rb_node;
-       struct rb_node *parent = NULL;
-       struct ttm_buffer_object *cur_bo;
-       unsigned long offset = bo->vm_node->start;
-       unsigned long cur_offset;
-
-       while (*cur) {
-               parent = *cur;
-               cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
-               cur_offset = cur_bo->vm_node->start;
-               if (offset < cur_offset)
-                       cur = &parent->rb_left;
-               else if (offset > cur_offset)
-                       cur = &parent->rb_right;
-               else
-                       BUG();
-       }
-
-       rb_link_node(&bo->vm_rb, parent, cur);
-       rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
-}
-
 /**
  * ttm_bo_setup_vm:
  *
@@ -1723,38 +1698,9 @@ static void ttm_bo_vm_insert_rb(struct ttm_buffer_object 
*bo)
 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       int ret;
-
-retry_pre_get:
-       ret = drm_mm_pre_get(&bdev->addr_space_mm);
-       if (unlikely(ret != 0))
-               return ret;

-       write_lock(&bdev->vm_lock);
-       bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
-                                        bo->mem.num_pages, 0, 0);
-
-       if (unlikely(bo->vm_node == NULL)) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
-
-       bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
-                                             bo->mem.num_pages, 0);
-
-       if (unlikely(bo->vm_node == NULL)) {
-               write_unlock(&bdev->vm_lock);
-               goto retry_pre_get;
-       }
-
-       ttm_bo_vm_insert_rb(bo);
-       write_unlock(&bdev->vm_lock);
-       bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
-       return 0;
-out_unlock:
-       write_unlock(&bdev->vm_lock);
-       return ret;
+       return drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+                                 bo->mem.num_pages);
 }

 int ttm_bo_wait(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index af89458..d8b6e5f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -30,6 +30,7 @@

 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/io.h>
 #include <linux/highmem.h>
 #include <linux/wait.h>
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct 
ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        INIT_LIST_HEAD(&fbo->io_reserve_lru);
-       fbo->vm_node = NULL;
+       drm_vma_node_reset(&fbo->vma_node);
        atomic_set(&fbo->cpu_writers, 0);

        spin_lock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3df9f16..54a67f1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -33,6 +33,7 @@
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_bo_driver.h>
 #include <ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/mm.h>
 #include <linux/rbtree.h>
 #include <linux/module.h>
@@ -40,37 +41,6 @@

 #define TTM_BO_VM_NUM_PREFAULT 16

-static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device 
*bdev,
-                                                    unsigned long page_start,
-                                                    unsigned long num_pages)
-{
-       struct rb_node *cur = bdev->addr_space_rb.rb_node;
-       unsigned long cur_offset;
-       struct ttm_buffer_object *bo;
-       struct ttm_buffer_object *best_bo = NULL;
-
-       while (likely(cur != NULL)) {
-               bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
-               cur_offset = bo->vm_node->start;
-               if (page_start >= cur_offset) {
-                       cur = cur->rb_right;
-                       best_bo = bo;
-                       if (page_start == cur_offset)
-                               break;
-               } else
-                       cur = cur->rb_left;
-       }
-
-       if (unlikely(best_bo == NULL))
-               return NULL;
-
-       if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
-                    (page_start + num_pages)))
-               return NULL;
-
-       return best_bo;
-}
-
 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, 
struct vm_fault *vmf)
        }

        page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
-           bo->vm_node->start - vma->vm_pgoff;
+           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
        page_last = vma_pages(vma) +
-           bo->vm_node->start - vma->vm_pgoff;
+           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;

        if (unlikely(page_offset >= bo->num_pages)) {
                retval = VM_FAULT_SIGBUS;
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
        .close = ttm_bo_vm_close
 };

+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+                                                 unsigned long offset,
+                                                 unsigned long pages)
+{
+       struct drm_vma_offset_node *node;
+       struct ttm_buffer_object *bo = NULL;
+
+       read_lock(&bdev->vm_lock);
+
+       node = drm_vma_offset_lookup(&bdev->vma_manager, offset, pages);
+       if (likely(node)) {
+               bo = container_of(node, struct ttm_buffer_object, vma_node);
+               if (!kref_get_unless_zero(&bo->kref))
+                       bo = NULL;
+       }
+
+       read_unlock(&bdev->vm_lock);
+
+       if (!bo)
+               pr_err("Could not find buffer object to map\n");
+
+       return bo;
+}
+
 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
                struct ttm_bo_device *bdev)
 {
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct 
*vma,
        struct ttm_buffer_object *bo;
        int ret;

-       read_lock(&bdev->vm_lock);
-       bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
-                                vma_pages(vma));
-       if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
-               bo = NULL;
-       read_unlock(&bdev->vm_lock);
-
-       if (unlikely(bo == NULL)) {
-               pr_err("Could not find buffer object to map\n");
+       bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
+       if (unlikely(!bo))
                return -EINVAL;
-       }

        driver = bo->bdev->driver;
        if (unlikely(!driver->verify_access)) {
@@ -324,12 +310,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file 
*filp,
        bool no_wait = false;
        bool dummy;

-       read_lock(&bdev->vm_lock);
-       bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
-       if (likely(bo != NULL))
-               ttm_bo_reference(bo);
-       read_unlock(&bdev->vm_lock);
-
+       bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
        if (unlikely(bo == NULL))
                return -EFAULT;

@@ -343,7 +324,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file 
*filp,
        if (unlikely(ret != 0))
                goto out_unref;

-       kmap_offset = dev_offset - bo->vm_node->start;
+       kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node);
        if (unlikely(kmap_offset >= bo->num_pages)) {
                ret = -EFBIG;
                goto out_unref;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index bc78425..b150079 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void 
*data,
                goto out_no_dmabuf;

        rep->handle = handle;
-       rep->map_handle = dma_buf->base.addr_space_offset;
+       rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
        rep->cur_gmr_id = handle;
        rep->cur_gmr_offset = 0;

@@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
        if (ret != 0)
                return -EINVAL;

-       *offset = out_buf->base.addr_space_offset;
+       *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
        vmw_dmabuf_unreference(&out_buf);
        return 0;
 }
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 3cb5d84..fcbe198 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -32,12 +32,12 @@
 #define _TTM_BO_API_H_

 #include <drm/drm_hashtab.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/wait.h>
 #include <linux/mutex.h>
 #include <linux/mm.h>
-#include <linux/rbtree.h>
 #include <linux/bitmap.h>

 struct ttm_bo_device;
@@ -144,7 +144,6 @@ struct ttm_tt;
  * @type: The bo type.
  * @destroy: Destruction function. If NULL, kfree is used.
  * @num_pages: Actual number of pages.
- * @addr_space_offset: Address space offset.
  * @acc_size: Accounted size for this object.
  * @kref: Reference count of this buffer object. When this refcount reaches
  * zero, the object is put on the delayed delete list.
@@ -172,8 +171,7 @@ struct ttm_tt;
  * @reserved: Deadlock-free lock used for synchronization state transitions.
  * @sync_obj: Pointer to a synchronization object.
  * @priv_flags: Flags describing buffer object internal state.
- * @vm_rb: Rb node for the vm rb tree.
- * @vm_node: Address space manager node.
+ * @vma_node: Address space manager node.
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
  * @cur_placement: Hint of current placement.
@@ -200,7 +198,6 @@ struct ttm_buffer_object {
        enum ttm_bo_type type;
        void (*destroy) (struct ttm_buffer_object *);
        unsigned long num_pages;
-       uint64_t addr_space_offset;
        size_t acc_size;

        /**
@@ -254,13 +251,7 @@ struct ttm_buffer_object {
        void *sync_obj;
        unsigned long priv_flags;

-       /**
-        * Members protected by the bdev::vm_lock
-        */
-
-       struct rb_node vm_rb;
-       struct drm_mm_node *vm_node;
-
+       struct drm_vma_offset_node vma_node;

        /**
         * Special members that are protected by the reserve lock
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 9c8dca7..af63da3 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -35,6 +35,7 @@
 #include <ttm/ttm_module.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_global.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/workqueue.h>
 #include <linux/fs.h>
 #include <linux/spinlock.h>
@@ -517,7 +518,7 @@ struct ttm_bo_global {
  * @man: An array of mem_type_managers.
  * @fence_lock: Protects the synchronizing members on *all* bos belonging
  * to this device.
- * @addr_space_mm: Range manager for the device address space.
+ * @vma_manager: Address space manager
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
  * @val_seq: Current validation sequence.
@@ -538,11 +539,11 @@ struct ttm_bo_device {
        rwlock_t vm_lock;
        struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
        spinlock_t fence_lock;
+
        /*
         * Protected by the vm lock.
         */
-       struct rb_root addr_space_rb;
-       struct drm_mm addr_space_mm;
+       struct drm_vma_offset_manager vma_manager;

        /*
         * Protected by the global:lru lock.
-- 
1.8.3.2

Reply via email to