On Thu, Oct 22, 2020 at 05:37:43PM -0400, Joe Gidi wrote:
> SENDBUG: -*- sendbug -*-
> SENDBUG: Lines starting with `SENDBUG' will be removed automatically.
> SENDBUG:
> SENDBUG: Choose from the following categories:
> SENDBUG:
> SENDBUG: system user library documentation kernel alpha amd64 arm hppa
> i386 m88k mips64 powerpc sh sparc sparc64 vax
> SENDBUG:
> SENDBUG:
> 
> >Synopsis:    System freezes with radeondrm since latest drm_mm changes
> >Category:    kernel
> >Environment:
>       System      : OpenBSD 6.8
>       Details     : OpenBSD 6.8-current (GENERIC.MP) #131: Thu Oct 22 09:52:11
> MDT 2020
>                        
> dera...@amd64.openbsd.org:/usr/src/sys/arch/amd64/compile/GENERIC.MP
> 
>       Architecture: OpenBSD.amd64
>       Machine     : amd64
> >Description:
>       I'm seeing system freezes while using Firefox since the recent drm_mm
> commits.
>       The system will freeze up, /var/log/messages will show many instances 
> of the
>       following lines, the screen goes black, and I have to reset the system.
> 
> Oct 22 17:18:39 ryzen /bsd: drm:pid23792:radeon_ring_test_lockup *ERROR*
> ring 0 stalled for more than 10040msec
> Oct 22 17:18:39 ryzen /bsd: drm:pid23792:radeon_fence_check_lockup
> *WARNING* GPU lockup (current fence id 0x0000000000021e1f last fence id
> 0x0000000000021e28 on ring 0)
> Oct 22 17:18:39 ryzen /bsd: drm:pid23792:radeon_ring_test_lockup *ERROR*
> ring 4 stalled for more than 10000msec
> Oct 22 17:18:39 ryzen /bsd: drm:pid23792:radeon_fence_check_lockup
> *WARNING* GPU lockup (current fence id 0x000000000001e20a last fence id
> 0x000000000001e210 on ring 4)

Does reverting the drm_mm and drm_vma commits change this?

diff --git sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c 
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
index de0afe174d1..8014d760f69 100644
--- sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
+++ sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
@@ -217,7 +217,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file 
*filp)
 {
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
-       struct drm_file *file_priv = (void *)filp;
 
        /*
         * Don't verify access for KFD BOs. They don't have a GEM
@@ -228,7 +227,7 @@ static int amdgpu_verify_access(struct ttm_buffer_object 
*bo, struct file *filp)
 
        if (amdgpu_ttm_tt_get_usermm(bo->ttm))
                return -EPERM;
-       return drm_vma_node_verify_access(&abo->tbo.base.vma_node, file_priv);
+       return drm_vma_node_verify_access(&abo->tbo.base.vma_node, filp);
 }
 
 /**
diff --git sys/dev/pci/drm/drm_gem.c sys/dev/pci/drm/drm_gem.c
index 729b9e921ed..1a6f898cfda 100644
--- sys/dev/pci/drm/drm_gem.c
+++ sys/dev/pci/drm/drm_gem.c
@@ -198,7 +198,7 @@ udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t 
off, vsize_t size)
        if (!obj)
                return NULL;
 
-       if (!drm_vma_node_is_allowed(node, priv)) {
+       if (!drm_vma_node_is_allowed(node, filp)) {
                drm_gem_object_put_unlocked(obj);
                return NULL;
        }
@@ -439,7 +439,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
                dev->driver->gem_close_object(obj, file_priv);
 
        drm_gem_remove_prime_handles(obj, file_priv);
-       drm_vma_node_revoke(&obj->vma_node, file_priv);
+       drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
 
        drm_gem_object_handle_put_unlocked(obj);
 
@@ -583,7 +583,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
 
        handle = ret;
 
-       ret = drm_vma_node_allow(&obj->vma_node, file_priv);
+       ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
        if (ret)
                goto err_remove;
 
@@ -601,7 +601,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
        return 0;
 
 err_revoke:
-       drm_vma_node_revoke(&obj->vma_node, file_priv);
+       drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
 err_remove:
        spin_lock(&file_priv->table_lock);
        idr_remove(&file_priv->object_idr, handle);
diff --git sys/dev/pci/drm/drm_mm.c sys/dev/pci/drm/drm_mm.c
index 17c3fffcd0d..58a1afe504b 100644
--- sys/dev/pci/drm/drm_mm.c
+++ sys/dev/pci/drm/drm_mm.c
@@ -93,11 +93,19 @@
  * some basic allocator dumpers for debugging.
  *
  * Note that this range allocator is not thread-safe, drivers need to protect
- * modifications with their own locking. The idea behind this is that for a 
full
+ * modifications with their on locking. The idea behind this is that for a full
  * memory manager additional data needs to be protected anyway, hence internal
  * locking would be fully redundant.
  */
 
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct 
drm_mm *mm,
+                                               u64 size,
+                                               u64 alignment,
+                                               unsigned long color,
+                                               u64 start,
+                                               u64 end,
+                                               enum drm_mm_search_flags flags);
+
 #ifdef CONFIG_DRM_DEBUG_MM
 #include <linux/stackdepot.h>
 
@@ -107,19 +115,25 @@
 static noinline void save_stack(struct drm_mm_node *node)
 {
        unsigned long entries[STACKDEPTH];
-       unsigned int n;
+       struct stack_trace trace = {
+               .entries = entries,
+               .max_entries = STACKDEPTH,
+               .skip = 1
+       };
 
-       n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+       save_stack_trace(&trace);
+       if (trace.nr_entries != 0 &&
+           trace.entries[trace.nr_entries-1] == ULONG_MAX)
+               trace.nr_entries--;
 
        /* May be called under spinlock, so avoid sleeping */
-       node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
+       node->stack = depot_save_stack(&trace, GFP_NOWAIT);
 }
 
 static void show_leaks(struct drm_mm *mm)
 {
        struct drm_mm_node *node;
-       unsigned long *entries;
-       unsigned int nr_entries;
+       unsigned long entries[STACKDEPTH];
        char *buf;
 
        buf = kmalloc(BUFSZ, GFP_KERNEL);
@@ -127,14 +141,19 @@ static void show_leaks(struct drm_mm *mm)
                return;
 
        list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
+               struct stack_trace trace = {
+                       .entries = entries,
+                       .max_entries = STACKDEPTH
+               };
+
                if (!node->stack) {
                        DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
                                  node->start, node->size);
                        continue;
                }
 
-               nr_entries = stack_depot_fetch(node->stack, &entries);
-               stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
+               depot_fetch_stack(node->stack, &trace);
+               snprint_stack_trace(buf, BUFSZ, &trace, 0);
                DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
                          node->start, node->size, buf);
        }
@@ -157,48 +176,39 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
                     u64, __subtree_last,
                     START, LAST, static inline, drm_mm_interval_tree)
 #else
-static struct drm_mm_node *
-drm_mm_interval_tree_iter_first(const struct rb_root_cached *root,
-    uint64_t start, uint64_t last)
+struct drm_mm_node *   
+drm_mm_interval_tree_iter_first(struct rb_root *rb, u64 start, u64 last)
 {
+       struct drm_mm *mm = container_of(rb, typeof(*mm), interval_tree);
        struct drm_mm_node *node;
-       struct rb_node *rb;
 
-       for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
-               node = rb_entry(rb, typeof(*node), rb);
+       drm_mm_for_each_node(node, mm) {
                if (LAST(node) >= start && START(node) <= last)
                        return node;
        }
        return NULL;
 }
-
-static void
-drm_mm_interval_tree_remove(struct drm_mm_node *node,
-    struct rb_root_cached *root) 
-{
-       rb_erase_cached(&node->rb, root);
-}
 #endif
 
 struct drm_mm_node *
 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
 {
-       return drm_mm_interval_tree_iter_first((struct rb_root_cached 
*)&mm->interval_tree,
-                                              start, last) ?: (struct 
drm_mm_node *)&mm->head_node;
+       return drm_mm_interval_tree_iter_first((struct rb_root 
*)&mm->interval_tree,
+                                              start, last);
 }
 EXPORT_SYMBOL(__drm_mm_interval_first);
 
+#ifdef __linux__
 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
                                          struct drm_mm_node *node)
 {
        struct drm_mm *mm = hole_node->mm;
        struct rb_node **link, *rb;
        struct drm_mm_node *parent;
-       bool leftmost;
 
        node->__subtree_last = LAST(node);
 
-       if (drm_mm_node_allocated(hole_node)) {
+       if (hole_node->allocated) {
                rb = &hole_node->rb;
                while (rb) {
                        parent = rb_entry(rb, struct drm_mm_node, rb);
@@ -211,11 +221,9 @@ static void drm_mm_interval_tree_add_node(struct 
drm_mm_node *hole_node,
 
                rb = &hole_node->rb;
                link = &hole_node->rb.rb_right;
-               leftmost = false;
        } else {
                rb = NULL;
-               link = &mm->interval_tree.rb_root.rb_node;
-               leftmost = true;
+               link = &mm->interval_tree.rb_node;
        }
 
        while (*link) {
@@ -223,192 +231,84 @@ static void drm_mm_interval_tree_add_node(struct 
drm_mm_node *hole_node,
                parent = rb_entry(rb, struct drm_mm_node, rb);
                if (parent->__subtree_last < node->__subtree_last)
                        parent->__subtree_last = node->__subtree_last;
-               if (node->start < parent->start) {
+               if (node->start < parent->start)
                        link = &parent->rb.rb_left;
-               } else {
+               else
                        link = &parent->rb.rb_right;
-                       leftmost = false;
-               }
        }
 
        rb_link_node(&node->rb, rb, link);
-#ifdef notyet
-       rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
-                                  &drm_mm_interval_tree_augment);
-#else
-       rb_insert_color_cached(&node->rb, &mm->interval_tree, leftmost);
-#endif
-}
-
-#define DRM_RB_INSERT(root, member, expr) do { \
-       struct rb_node **link = &root.rb_node, *rb = NULL; \
-       u64 x = expr(node); \
-       while (*link) { \
-               rb = *link; \
-               if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
-                       link = &rb->rb_left; \
-               else \
-                       link = &rb->rb_right; \
-       } \
-       rb_link_node(&node->member, rb, link); \
-       rb_insert_color(&node->member, &root); \
-} while (0)
-
-#define HOLE_SIZE(NODE) ((NODE)->hole_size)
-#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
-
-static u64 rb_to_hole_size(struct rb_node *rb)
-{
-       return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
-}
-
-static void insert_hole_size(struct rb_root_cached *root,
-                            struct drm_mm_node *node)
-{
-       struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
-       u64 x = node->hole_size;
-       bool first = true;
-
-       while (*link) {
-               rb = *link;
-               if (x > rb_to_hole_size(rb)) {
-                       link = &rb->rb_left;
-               } else {
-                       link = &rb->rb_right;
-                       first = false;
-               }
-       }
-
-       rb_link_node(&node->rb_hole_size, rb, link);
-       rb_insert_color_cached(&node->rb_hole_size, root, first);
-}
-
-static void add_hole(struct drm_mm_node *node)
-{
-       struct drm_mm *mm = node->mm;
-
-       node->hole_size =
-               __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
-       DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
-
-       insert_hole_size(&mm->holes_size, node);
-       DRM_RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
-
-       list_add(&node->hole_stack, &mm->hole_stack);
+       rb_insert_augmented(&node->rb,
+                           &mm->interval_tree,
+                           &drm_mm_interval_tree_augment);
 }
+#endif
 
-static void rm_hole(struct drm_mm_node *node)
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+                                struct drm_mm_node *node,
+                                u64 size, u64 alignment,
+                                unsigned long color,
+                                u64 range_start, u64 range_end,
+                                enum drm_mm_allocator_flags flags)
 {
-       DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
+       struct drm_mm *mm = hole_node->mm;
+       u64 hole_start = drm_mm_hole_node_start(hole_node);
+       u64 hole_end = drm_mm_hole_node_end(hole_node);
+       u64 adj_start = hole_start;
+       u64 adj_end = hole_end;
 
-       list_del(&node->hole_stack);
-       rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
-       rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
-       node->hole_size = 0;
+       DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated);
 
-       DRM_MM_BUG_ON(drm_mm_hole_follows(node));
-}
+       if (mm->color_adjust)
+               mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
-static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
-{
-       return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
-}
+       adj_start = max(adj_start, range_start);
+       adj_end = min(adj_end, range_end);
 
-static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
-{
-       return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
-}
+       if (flags & DRM_MM_CREATE_TOP)
+               adj_start = adj_end - size;
 
-static inline u64 rb_hole_size(struct rb_node *rb)
-{
-       return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
-}
+       if (alignment) {
+               u64 rem;
 
-static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
-{
-       struct rb_node *rb = mm->holes_size.rb_root.rb_node;
-       struct drm_mm_node *best = NULL;
-
-       do {
-               struct drm_mm_node *node =
-                       rb_entry(rb, struct drm_mm_node, rb_hole_size);
-
-               if (size <= node->hole_size) {
-                       best = node;
-                       rb = rb->rb_right;
-               } else {
-                       rb = rb->rb_left;
+               div64_u64_rem(adj_start, alignment, &rem);
+               if (rem) {
+                       if (flags & DRM_MM_CREATE_TOP)
+                               adj_start -= rem;
+                       else
+                               adj_start += alignment - rem;
                }
-       } while (rb);
-
-       return best;
-}
-
-static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
-{
-       struct rb_node *rb = mm->holes_addr.rb_node;
-       struct drm_mm_node *node = NULL;
-
-       while (rb) {
-               u64 hole_start;
-
-               node = rb_hole_addr_to_node(rb);
-               hole_start = __drm_mm_hole_node_start(node);
+       }
 
-               if (addr < hole_start)
-                       rb = node->rb_hole_addr.rb_left;
-               else if (addr > hole_start + node->hole_size)
-                       rb = node->rb_hole_addr.rb_right;
-               else
-                       break;
+       if (adj_start == hole_start) {
+               hole_node->hole_follows = 0;
+               list_del(&hole_node->hole_stack);
        }
 
-       return node;
-}
+       node->start = adj_start;
+       node->size = size;
+       node->mm = mm;
+       node->color = color;
+       node->allocated = 1;
 
-static struct drm_mm_node *
-first_hole(struct drm_mm *mm,
-          u64 start, u64 end, u64 size,
-          enum drm_mm_insert_mode mode)
-{
-       switch (mode) {
-       default:
-       case DRM_MM_INSERT_BEST:
-               return best_hole(mm, size);
+       list_add(&node->node_list, &hole_node->node_list);
 
-       case DRM_MM_INSERT_LOW:
-               return find_hole(mm, start);
+#ifdef __linux__
+       drm_mm_interval_tree_add_node(hole_node, node);
+#endif
 
-       case DRM_MM_INSERT_HIGH:
-               return find_hole(mm, end);
+       DRM_MM_BUG_ON(node->start < range_start);
+       DRM_MM_BUG_ON(node->start < adj_start);
+       DRM_MM_BUG_ON(node->start + node->size > adj_end);
+       DRM_MM_BUG_ON(node->start + node->size > range_end);
 
-       case DRM_MM_INSERT_EVICT:
-               return list_first_entry_or_null(&mm->hole_stack,
-                                               struct drm_mm_node,
-                                               hole_stack);
+       node->hole_follows = 0;
+       if (__drm_mm_hole_node_start(node) < hole_end) {
+               list_add(&node->hole_stack, &mm->hole_stack);
+               node->hole_follows = 1;
        }
-}
-
-static struct drm_mm_node *
-next_hole(struct drm_mm *mm,
-         struct drm_mm_node *node,
-         enum drm_mm_insert_mode mode)
-{
-       switch (mode) {
-       default:
-       case DRM_MM_INSERT_BEST:
-               return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 
-       case DRM_MM_INSERT_LOW:
-               return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
-
-       case DRM_MM_INSERT_HIGH:
-               return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
-
-       case DRM_MM_INSERT_EVICT:
-               node = list_next_entry(node, hole_stack);
-               return &node->hole_stack == &mm->hole_stack ? NULL : node;
-       }
+       save_stack(node);
 }
 
 /**
@@ -427,22 +327,31 @@ next_hole(struct drm_mm *mm,
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
+       u64 end = node->start + node->size;
        struct drm_mm_node *hole;
        u64 hole_start, hole_end;
        u64 adj_start, adj_end;
-       u64 end;
 
        end = node->start + node->size;
        if (unlikely(end <= node->start))
                return -ENOSPC;
 
        /* Find the relevant hole to add our node to */
-       hole = find_hole(mm, node->start);
-       if (!hole)
+       hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
+                                              node->start, ~(u64)0);
+       if (hole) {
+               if (hole->start < end)
+                       return -ENOSPC;
+       } else {
+               hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
+       }
+
+       hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
+       if (!drm_mm_hole_follows(hole))
                return -ENOSPC;
 
        adj_start = hole_start = __drm_mm_hole_node_start(hole);
-       adj_end = hole_end = hole_start + hole->hole_size;
+       adj_end = hole_end = __drm_mm_hole_node_end(hole);
 
        if (mm->color_adjust)
                mm->color_adjust(hole, node->color, &adj_start, &adj_end);
@@ -451,148 +360,72 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct 
drm_mm_node *node)
                return -ENOSPC;
 
        node->mm = mm;
+       node->allocated = 1;
 
-       __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
        list_add(&node->node_list, &hole->node_list);
+
+#ifdef __linux__
        drm_mm_interval_tree_add_node(hole, node);
-       node->hole_size = 0;
+#endif
 
-       rm_hole(hole);
-       if (node->start > hole_start)
-               add_hole(hole);
-       if (end < hole_end)
-               add_hole(node);
+       if (node->start == hole_start) {
+               hole->hole_follows = 0;
+               list_del(&hole->hole_stack);
+       }
+
+       node->hole_follows = 0;
+       if (end != hole_end) {
+               list_add(&node->hole_stack, &mm->hole_stack);
+               node->hole_follows = 1;
+       }
 
        save_stack(node);
+
        return 0;
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
-static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
-{
-       return rb ? rb_to_hole_size(rb) : 0;
-}
-
 /**
- * drm_mm_insert_node_in_range - ranged search for space and insert @node
+ * drm_mm_insert_node_in_range_generic - ranged search for space and insert 
@node
  * @mm: drm_mm to allocate from
  * @node: preallocate node to insert
  * @size: size of the allocation
  * @alignment: alignment of the allocation
  * @color: opaque tag value to use for this node
- * @range_start: start of the allowed range for this node
- * @range_end: end of the allowed range for this node
- * @mode: fine-tune the allocation search and placement
+ * @start: start of the allowed range for this node
+ * @end: end of the allowed range for this node
+ * @sflags: flags to fine-tune the allocation search
+ * @aflags: flags to fine-tune the allocation behavior
  *
  * The preallocated @node must be cleared to 0.
  *
  * Returns:
  * 0 on success, -ENOSPC if there's no suitable hole.
  */
-int drm_mm_insert_node_in_range(struct drm_mm * const mm,
-                               struct drm_mm_node * const node,
-                               u64 size, u64 alignment,
-                               unsigned long color,
-                               u64 range_start, u64 range_end,
-                               enum drm_mm_insert_mode mode)
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node 
*node,
+                                       u64 size, u64 alignment,
+                                       unsigned long color,
+                                       u64 start, u64 end,
+                                       enum drm_mm_search_flags sflags,
+                                       enum drm_mm_allocator_flags aflags)
 {
-       struct drm_mm_node *hole;
-       u64 remainder_mask;
-       bool once;
+       struct drm_mm_node *hole_node;
 
-       DRM_MM_BUG_ON(range_start > range_end);
+       if (WARN_ON(size == 0))
+               return -EINVAL;
 
-       if (unlikely(size == 0 || range_end - range_start < size))
+       hole_node = drm_mm_search_free_in_range_generic(mm,
+                                                       size, alignment, color,
+                                                       start, end, sflags);
+       if (!hole_node)
                return -ENOSPC;
 
-       if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
-               return -ENOSPC;
-
-       if (alignment <= 1)
-               alignment = 0;
-
-       once = mode & DRM_MM_INSERT_ONCE;
-       mode &= ~DRM_MM_INSERT_ONCE;
-
-       remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
-       for (hole = first_hole(mm, range_start, range_end, size, mode);
-            hole;
-            hole = once ? NULL : next_hole(mm, hole, mode)) {
-               u64 hole_start = __drm_mm_hole_node_start(hole);
-               u64 hole_end = hole_start + hole->hole_size;
-               u64 adj_start, adj_end;
-               u64 col_start, col_end;
-
-               if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
-                       break;
-
-               if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
-                       break;
-
-               col_start = hole_start;
-               col_end = hole_end;
-               if (mm->color_adjust)
-                       mm->color_adjust(hole, color, &col_start, &col_end);
-
-               adj_start = max(col_start, range_start);
-               adj_end = min(col_end, range_end);
-
-               if (adj_end <= adj_start || adj_end - adj_start < size)
-                       continue;
-
-               if (mode == DRM_MM_INSERT_HIGH)
-                       adj_start = adj_end - size;
-
-               if (alignment) {
-                       u64 rem;
-
-                       if (likely(remainder_mask))
-                               rem = adj_start & remainder_mask;
-                       else
-                               div64_u64_rem(adj_start, alignment, &rem);
-                       if (rem) {
-                               adj_start -= rem;
-                               if (mode != DRM_MM_INSERT_HIGH)
-                                       adj_start += alignment;
-
-                               if (adj_start < max(col_start, range_start) ||
-                                   min(col_end, range_end) - adj_start < size)
-                                       continue;
-
-                               if (adj_end <= adj_start ||
-                                   adj_end - adj_start < size)
-                                       continue;
-                       }
-               }
-
-               node->mm = mm;
-               node->size = size;
-               node->start = adj_start;
-               node->color = color;
-               node->hole_size = 0;
-
-               __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
-               list_add(&node->node_list, &hole->node_list);
-               drm_mm_interval_tree_add_node(hole, node);
-
-               rm_hole(hole);
-               if (adj_start > hole_start)
-                       add_hole(hole);
-               if (adj_start + size < hole_end)
-                       add_hole(node);
-
-               save_stack(node);
-               return 0;
-       }
-
-       return -ENOSPC;
-}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-
-static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
-{
-       return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+       drm_mm_insert_helper(hole_node, node,
+                            size, alignment, color,
+                            start, end, aflags);
+       return 0;
 }
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 
 /**
  * drm_mm_remove_node - Remove a memory node from the allocator.
@@ -607,24 +440,97 @@ void drm_mm_remove_node(struct drm_mm_node *node)
        struct drm_mm *mm = node->mm;
        struct drm_mm_node *prev_node;
 
-       DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
-       DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
+       DRM_MM_BUG_ON(!node->allocated);
+       DRM_MM_BUG_ON(node->scanned_block);
 
-       prev_node = list_prev_entry(node, node_list);
+       prev_node =
+           list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+       if (drm_mm_hole_follows(node)) {
+               DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
+                             __drm_mm_hole_node_end(node));
+               list_del(&node->hole_stack);
+       } else {
+               DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
+                             __drm_mm_hole_node_end(node));
+       }
 
-       if (drm_mm_hole_follows(node))
-               rm_hole(node);
+       if (!drm_mm_hole_follows(prev_node)) {
+               prev_node->hole_follows = 1;
+               list_add(&prev_node->hole_stack, &mm->hole_stack);
+       } else
+               list_move(&prev_node->hole_stack, &mm->hole_stack);
 
+#ifdef __linux__
        drm_mm_interval_tree_remove(node, &mm->interval_tree);
+#endif
        list_del(&node->node_list);
+       node->allocated = 0;
+}
+EXPORT_SYMBOL(drm_mm_remove_node);
+
+static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
+{
+       if (end - start < size)
+               return 0;
 
-       if (drm_mm_hole_follows(prev_node))
-               rm_hole(prev_node);
-       add_hole(prev_node);
+       if (alignment) {
+               u64 rem;
+
+               div64_u64_rem(start, alignment, &rem);
+               if (rem)
+                       start += alignment - rem;
+       }
 
-       clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
+       return end >= start + size;
+}
+
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct 
drm_mm *mm,
+                                                       u64 size,
+                                                       u64 alignment,
+                                                       unsigned long color,
+                                                       u64 start,
+                                                       u64 end,
+                                                       enum 
drm_mm_search_flags flags)
+{
+       struct drm_mm_node *entry;
+       struct drm_mm_node *best;
+       u64 adj_start;
+       u64 adj_end;
+       u64 best_size;
+
+       DRM_MM_BUG_ON(mm->scan_active);
+
+       best = NULL;
+       best_size = ~0UL;
+
+       __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
+                              flags & DRM_MM_SEARCH_BELOW) {
+               u64 hole_size = adj_end - adj_start;
+
+               if (mm->color_adjust) {
+                       mm->color_adjust(entry, color, &adj_start, &adj_end);
+                       if (adj_end <= adj_start)
+                               continue;
+               }
+
+               adj_start = max(adj_start, start);
+               adj_end = min(adj_end, end);
+
+               if (!check_free_hole(adj_start, adj_end, size, alignment))
+                       continue;
+
+               if (!(flags & DRM_MM_SEARCH_BEST))
+                       return entry;
+
+               if (hole_size < best_size) {
+                       best = entry;
+                       best_size = hole_size;
+               }
+       }
+
+       return best;
 }
-EXPORT_SYMBOL(drm_mm_remove_node);
 
 /**
  * drm_mm_replace_node - move an allocation from @old to @new
@@ -637,27 +543,22 @@ EXPORT_SYMBOL(drm_mm_remove_node);
  */
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
-       struct drm_mm *mm = old->mm;
-
-       DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
-
-       *new = *old;
+       DRM_MM_BUG_ON(!old->allocated);
 
-       __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
        list_replace(&old->node_list, &new->node_list);
-       rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
-
-       if (drm_mm_hole_follows(old)) {
-               list_replace(&old->hole_stack, &new->hole_stack);
-               rb_replace_node_cached(&old->rb_hole_size,
-                                      &new->rb_hole_size,
-                                      &mm->holes_size);
-               rb_replace_node(&old->rb_hole_addr,
-                               &new->rb_hole_addr,
-                               &mm->holes_addr);
-       }
-
-       clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
+       list_replace(&old->hole_stack, &new->hole_stack);
+#ifdef __linux__
+       rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
+#endif
+       new->hole_follows = old->hole_follows;
+       new->mm = old->mm;
+       new->start = old->start;
+       new->size = old->size;
+       new->color = old->color;
+       new->__subtree_last = old->__subtree_last;
+
+       old->allocated = 0;
+       new->allocated = 1;
 }
 EXPORT_SYMBOL(drm_mm_replace_node);
 
@@ -702,7 +603,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
  * @color: opaque tag value to use for the allocation
  * @start: start of the allowed range for the allocation
  * @end: end of the allowed range for the allocation
- * @mode: fine-tune the allocation search and placement
+ * @flags: flags to specify how the allocation will be performed afterwards
  *
  * This simply sets up the scanning routines with the parameters for the 
desired
  * hole.
@@ -718,7 +619,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
                                 unsigned long color,
                                 u64 start,
                                 u64 end,
-                                enum drm_mm_insert_mode mode)
+                                unsigned int flags)
 {
        DRM_MM_BUG_ON(start >= end);
        DRM_MM_BUG_ON(!size || size > end - start);
@@ -733,7 +634,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
        scan->alignment = alignment;
        scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
        scan->size = size;
-       scan->mode = mode;
+       scan->flags = flags;
 
        DRM_MM_BUG_ON(end <= start);
        scan->range_start = start;
@@ -765,9 +666,9 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
        u64 adj_start, adj_end;
 
        DRM_MM_BUG_ON(node->mm != mm);
-       DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
-       DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
-       __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+       DRM_MM_BUG_ON(!node->allocated);
+       DRM_MM_BUG_ON(node->scanned_block);
+       node->scanned_block = true;
        mm->scan_active++;
 
        /* Remove this block from the node_list so that we enlarge the hole
@@ -792,7 +693,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
        if (adj_end <= adj_start || adj_end - adj_start < scan->size)
                return false;
 
-       if (scan->mode == DRM_MM_INSERT_HIGH)
+       if (scan->flags == DRM_MM_CREATE_TOP)
                adj_start = adj_end - scan->size;
 
        if (scan->alignment) {
@@ -804,7 +705,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
                        div64_u64_rem(adj_start, scan->alignment, &rem);
                if (rem) {
                        adj_start -= rem;
-                       if (scan->mode != DRM_MM_INSERT_HIGH)
+                       if (scan->flags != DRM_MM_CREATE_TOP)
                                adj_start += scan->alignment;
                        if (adj_start < max(col_start, scan->range_start) ||
                            min(col_end, scan->range_end) - adj_start < 
scan->size)
@@ -840,7 +741,7 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
  * When the scan list is empty, the selected memory nodes can be freed. An
  * immediately following drm_mm_insert_node_in_range_generic() or one of the
  * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
- * the just freed block (because it's at the top of the free_stack list).
+ * the just freed block (because its at the top of the free_stack list).
  *
  * Returns:
  * True if this block should be evicted, false otherwise. Will always
@@ -852,8 +753,8 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
        struct drm_mm_node *prev_node;
 
        DRM_MM_BUG_ON(node->mm != scan->mm);
-       DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
-       __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+       DRM_MM_BUG_ON(!node->scanned_block);
+       node->scanned_block = false;
 
        DRM_MM_BUG_ON(!node->mm->scan_active);
        node->mm->scan_active--;
@@ -898,24 +799,9 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct 
drm_mm_scan *scan)
        if (!mm->color_adjust)
                return NULL;
 
-       /*
-        * The hole found during scanning should ideally be the first element
-        * in the hole_stack list, but due to side-effects in the driver it
-        * may not be.
-        */
-       list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
-               hole_start = __drm_mm_hole_node_start(hole);
-               hole_end = hole_start + hole->hole_size;
-
-               if (hole_start <= scan->hit_start &&
-                   hole_end >= scan->hit_end)
-                       break;
-       }
-
-       /* We should only be called after we found the hole previously */
-       DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
-       if (unlikely(&hole->hole_stack == &mm->hole_stack))
-               return NULL;
+       hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
+       hole_start = __drm_mm_hole_node_start(hole);
+       hole_end = __drm_mm_hole_node_end(hole);
 
        DRM_MM_BUG_ON(hole_start > scan->hit_start);
        DRM_MM_BUG_ON(hole_end < scan->hit_end);
@@ -942,22 +828,21 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 {
        DRM_MM_BUG_ON(start + size <= start);
 
-       mm->color_adjust = NULL;
-
        INIT_LIST_HEAD(&mm->hole_stack);
-       mm->interval_tree = RB_ROOT_CACHED;
-       mm->holes_size = RB_ROOT_CACHED;
-       mm->holes_addr = RB_ROOT;
+       mm->scan_active = 0;
 
        /* Clever trick to avoid a special case in the free hole tracking. */
        INIT_LIST_HEAD(&mm->head_node.node_list);
-       mm->head_node.flags = 0;
+       mm->head_node.allocated = 0;
+       mm->head_node.hole_follows = 1;
        mm->head_node.mm = mm;
        mm->head_node.start = start + size;
-       mm->head_node.size = -size;
-       add_hole(&mm->head_node);
+       mm->head_node.size = start - mm->head_node.start;
+       list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
-       mm->scan_active = 0;
+       mm->interval_tree = RB_ROOT;
+
+       mm->color_adjust = NULL;
 }
 EXPORT_SYMBOL(drm_mm_init);
 
@@ -978,17 +863,20 @@ EXPORT_SYMBOL(drm_mm_takedown);
 
 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node 
*entry)
 {
-       u64 start, size;
-
-       size = entry->hole_size;
-       if (size) {
-               start = drm_mm_hole_node_start(entry);
-               drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
-                          start, start + size, size);
+       u64 hole_start, hole_end, hole_size;
+
+       if (entry->hole_follows) {
+               hole_start = drm_mm_hole_node_start(entry);
+               hole_end = drm_mm_hole_node_end(entry);
+               hole_size = hole_end - hole_start;
+               drm_printf(p, "%#018llx-%#018llx: %llu: free\n", hole_start,
+                          hole_end, hole_size);
+               return hole_size;
        }
 
-       return size;
+       return 0;
 }
+
 /**
  * drm_mm_print - print allocator state
  * @mm: drm_mm allocator to print
diff --git sys/dev/pci/drm/drm_vma_manager.c sys/dev/pci/drm/drm_vma_manager.c
index 7b17da77006..7ef7ce59c94 100644
--- sys/dev/pci/drm/drm_vma_manager.c
+++ sys/dev/pci/drm/drm_vma_manager.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*     $OpenBSD: drm_vma_manager.c,v 1.5 2020/06/08 04:47:58 jsg Exp $ */
 /*
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  * Copyright (c) 2012 David Airlie <airl...@linux.ie>
@@ -86,6 +86,7 @@ void drm_vma_offset_manager_init(struct 
drm_vma_offset_manager *mgr,
                                 unsigned long page_offset, unsigned long size)
 {
        rw_init(&mgr->vm_lock, "drmvmo");
+       mgr->vm_addr_space_rb = RB_ROOT;
        drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
 }
 EXPORT_SYMBOL(drm_vma_offset_manager_init);
@@ -103,7 +104,10 @@ EXPORT_SYMBOL(drm_vma_offset_manager_init);
  */
 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
 {
+       /* take the lock to protect against buggy drivers */
+       write_lock(&mgr->vm_lock);
        drm_mm_takedown(&mgr->vm_addr_space_mm);
+       write_unlock(&mgr->vm_lock);
 }
 EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
 
@@ -113,44 +117,27 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
  * @start: Start address for object (page-based)
  * @pages: Size of object (page-based)
  *
- * Find a node given a start address and object size. This returns the _best_
- * match for the given node. That is, @start may point somewhere into a valid
- * region and the given node will be returned, as long as the node spans the
- * whole requested area (given the size in number of pages as @pages).
- *
- * Note that before lookup the vma offset manager lookup lock must be acquired
- * with drm_vma_offset_lock_lookup(). See there for an example. This can then 
be
- * used to implement weakly referenced lookups using kref_get_unless_zero().
- *
- * Example:
- *
- * ::
- *
- *     drm_vma_offset_lock_lookup(mgr);
- *     node = drm_vma_offset_lookup_locked(mgr);
- *     if (node)
- *         kref_get_unless_zero(container_of(node, sth, entr));
- *     drm_vma_offset_unlock_lookup(mgr);
+ * Same as drm_vma_offset_lookup() but requires the caller to lock offset 
lookup
+ * manually. See drm_vma_offset_lock_lookup() for an example.
  *
  * RETURNS:
  * Returns NULL if no suitable node can be found. Otherwise, the best match
- * is returned. It's the caller's responsibility to make sure the node doesn't
- * get destroyed before the caller can access it.
+ * is returned.
  */
 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct 
drm_vma_offset_manager *mgr,
                                                         unsigned long start,
                                                         unsigned long pages)
 {
-       struct drm_mm_node *node, *best;
+       struct drm_vma_offset_node *node, *best;
        struct rb_node *iter;
        unsigned long offset;
 
-       iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
+       iter = mgr->vm_addr_space_rb.rb_node;
        best = NULL;
 
        while (likely(iter)) {
-               node = rb_entry(iter, struct drm_mm_node, rb);
-               offset = node->start;
+               node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
+               offset = node->vm_node.start;
                if (start >= offset) {
                        iter = iter->rb_right;
                        best = node;
@@ -163,18 +150,39 @@ struct drm_vma_offset_node 
*drm_vma_offset_lookup_locked(struct drm_vma_offset_m
 
        /* verify that the node spans the requested area */
        if (best) {
-               offset = best->start + best->size;
+               offset = best->vm_node.start + best->vm_node.size;
                if (offset < start + pages)
                        best = NULL;
        }
 
-       if (!best)
-               return NULL;
-
-       return container_of(best, struct drm_vma_offset_node, vm_node);
+       return best;
 }
 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
 
+/* internal helper to link @node into the rb-tree */
+static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
+                                  struct drm_vma_offset_node *node)
+{
+       struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
+       struct rb_node *parent = NULL;
+       struct drm_vma_offset_node *iter_node;
+
+       while (likely(*iter)) {
+               parent = *iter;
+               iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+
+               if (node->vm_node.start < iter_node->vm_node.start)
+                       iter = &(*iter)->rb_left;
+               else if (node->vm_node.start > iter_node->vm_node.start)
+                       iter = &(*iter)->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&node->vm_rb, parent, iter);
+       rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+}
+
 /**
  * drm_vma_offset_add() - Add offset node to manager
  * @mgr: Manager object
@@ -201,16 +209,24 @@ EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
                       struct drm_vma_offset_node *node, unsigned long pages)
 {
-       int ret = 0;
+       int ret;
 
        write_lock(&mgr->vm_lock);
 
-       if (!drm_mm_node_allocated(&node->vm_node))
-               ret = drm_mm_insert_node(&mgr->vm_addr_space_mm,
-                                        &node->vm_node, pages);
+       if (drm_mm_node_allocated(&node->vm_node)) {
+               ret = 0;
+               goto out_unlock;
+       }
+
+       ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
+                                pages, 0, DRM_MM_SEARCH_DEFAULT);
+       if (ret)
+               goto out_unlock;
 
-       write_unlock(&mgr->vm_lock);
+       _drm_vma_offset_add_rb(mgr, node);
 
+out_unlock:
+       write_unlock(&mgr->vm_lock);
        return ret;
 }
 EXPORT_SYMBOL(drm_vma_offset_add);
@@ -232,6 +248,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager 
*mgr,
        write_lock(&mgr->vm_lock);
 
        if (drm_mm_node_allocated(&node->vm_node)) {
+               rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
                drm_mm_remove_node(&node->vm_node);
                memset(&node->vm_node, 0, sizeof(node->vm_node));
        }
@@ -243,9 +260,9 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
 /**
  * drm_vma_node_allow - Add open-file to list of allowed users
  * @node: Node to modify
- * @tag: Tag of file to remove
+ * @filp: Open file to add
  *
- * Add @tag to the list of allowed open-files for this node. If @tag is
+ * Add @filp to the list of allowed open-files for this node. If @filp is
  * already on this list, the ref-count is incremented.
  *
  * The list of allowed-users is preserved across drm_vma_offset_add() and
@@ -260,7 +277,7 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
  * RETURNS:
  * 0 on success, negative error code on internal failure (out-of-mem)
  */
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
 {
        struct rb_node **iter;
        struct rb_node *parent = NULL;
@@ -281,10 +298,10 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, 
struct drm_file *tag)
                parent = *iter;
                entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
 
-               if (tag == entry->vm_tag) {
+               if (filp == entry->vm_filp) {
                        entry->vm_count++;
                        goto unlock;
-               } else if (tag > entry->vm_tag) {
+               } else if (filp > entry->vm_filp) {
                        iter = &(*iter)->rb_right;
                } else {
                        iter = &(*iter)->rb_left;
@@ -296,7 +313,7 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, 
struct drm_file *tag)
                goto unlock;
        }
 
-       new->vm_tag = tag;
+       new->vm_filp = filp;
        new->vm_count = 1;
        rb_link_node(&new->vm_rb, parent, iter);
        rb_insert_color(&new->vm_rb, &node->vm_files);
@@ -312,18 +329,17 @@ EXPORT_SYMBOL(drm_vma_node_allow);
 /**
  * drm_vma_node_revoke - Remove open-file from list of allowed users
  * @node: Node to modify
- * @tag: Tag of file to remove
+ * @filp: Open file to remove
  *
- * Decrement the ref-count of @tag in the list of allowed open-files on @node.
- * If the ref-count drops to zero, remove @tag from the list. You must call
- * this once for every drm_vma_node_allow() on @tag.
+ * Decrement the ref-count of @filp in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @filp from the list. You must call
+ * this once for every drm_vma_node_allow() on @filp.
  *
  * This is locked against concurrent access internally.
  *
- * If @tag is not on the list, nothing is done.
+ * If @filp is not on the list, nothing is done.
  */
-void drm_vma_node_revoke(struct drm_vma_offset_node *node,
-                        struct drm_file *tag)
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
 {
        struct drm_vma_offset_file *entry;
        struct rb_node *iter;
@@ -333,13 +349,13 @@ void drm_vma_node_revoke(struct drm_vma_offset_node *node,
        iter = node->vm_files.rb_node;
        while (likely(iter)) {
                entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
-               if (tag == entry->vm_tag) {
+               if (filp == entry->vm_filp) {
                        if (!--entry->vm_count) {
                                rb_erase(&entry->vm_rb, &node->vm_files);
                                kfree(entry);
                        }
                        break;
-               } else if (tag > entry->vm_tag) {
+               } else if (filp > entry->vm_filp) {
                        iter = iter->rb_right;
                } else {
                        iter = iter->rb_left;
@@ -353,9 +369,9 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
 /**
  * drm_vma_node_is_allowed - Check whether an open-file is granted access
  * @node: Node to check
- * @tag: Tag of file to remove
+ * @filp: Open-file to check for
  *
- * Search the list in @node whether @tag is currently on the list of allowed
+ * Search the list in @node whether @filp is currently on the list of allowed
  * open-files (see drm_vma_node_allow()).
  *
  * This is locked against concurrent access internally.
@@ -364,7 +380,7 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
  * true iff @filp is on the list
  */
 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
-                            struct drm_file *tag)
+                            struct file *filp)
 {
        struct drm_vma_offset_file *entry;
        struct rb_node *iter;
@@ -374,9 +390,9 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node 
*node,
        iter = node->vm_files.rb_node;
        while (likely(iter)) {
                entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
-               if (tag == entry->vm_tag)
+               if (filp == entry->vm_filp)
                        break;
-               else if (tag > entry->vm_tag)
+               else if (filp > entry->vm_filp)
                        iter = iter->rb_right;
                else
                        iter = iter->rb_left;
diff --git sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c 
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
index 971ed84f371..b6bf81f647b 100644
--- sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
+++ sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
@@ -877,7 +877,13 @@ static void reloc_cache_init(struct reloc_cache *cache,
        cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
        cache->has_fence = cache->gen < 4;
        cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
+#ifdef notyet
        cache->node.flags = 0;
+#else
+       cache->node.hole_follows = 0;
+       cache->node.allocated = 0;
+       cache->node.scanned_block = 0;
+#endif
        cache->rq = NULL;
        cache->rq_size = 0;
 
diff --git sys/dev/pci/drm/i915/gem/i915_gem_mman.c 
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
index 23423395aa2..3e627edcba6 100644
--- sys/dev/pci/drm/i915/gem/i915_gem_mman.c
+++ sys/dev/pci/drm/i915/gem/i915_gem_mman.c
@@ -911,7 +911,7 @@ insert:
        GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
 out:
        if (file)
-               drm_vma_node_allow(&mmo->vma_node, file);
+               drm_vma_node_allow(&mmo->vma_node, file->filp);
        return mmo;
 
 err:
@@ -1240,7 +1240,7 @@ i915_gem_mmap(struct file *filp, vm_prot_t accessprot,
        node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
                                                  off >> PAGE_SHIFT,
                                                  atop(round_page(size)));
-       if (node && drm_vma_node_is_allowed(node, priv)) {
+       if (node && drm_vma_node_is_allowed(node, filp)) {
                /*
                 * Skip 0-refcnted objects as it is in the process of being
                 * destroyed and will be invalid when the vma manager lock
diff --git sys/dev/pci/drm/i915/gem/i915_gem_object.c 
sys/dev/pci/drm/i915/gem/i915_gem_object.c
index 8dd99dc0894..7511a515a4d 100644
--- sys/dev/pci/drm/i915/gem/i915_gem_object.c
+++ sys/dev/pci/drm/i915/gem/i915_gem_object.c
@@ -134,7 +134,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, 
struct drm_file *file)
 
        spin_lock(&obj->mmo.lock);
        rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
-               drm_vma_node_revoke(&mmo->vma_node, file);
+               drm_vma_node_revoke(&mmo->vma_node, file->filp);
        spin_unlock(&obj->mmo.lock);
 
        list_for_each_entry_safe(lut, ln, &close, obj_link) {
diff --git sys/dev/pci/drm/i915/i915_gem.c sys/dev/pci/drm/i915/i915_gem.c
index 5857612aa3c..92ed27c93ea 100644
--- sys/dev/pci/drm/i915/i915_gem.c
+++ sys/dev/pci/drm/i915/i915_gem.c
@@ -438,7 +438,13 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                                               PIN_NOEVICT);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
+#ifdef notyet
                node.flags = 0;
+#else
+               node.hole_follows = 0;
+               node.allocated = 0;
+               node.scanned_block = 0;
+#endif
        } else {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
@@ -664,7 +670,13 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                                               PIN_NOEVICT);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
+#ifdef notyet
                node.flags = 0;
+#else
+               node.hole_follows = 0;
+               node.allocated = 0;
+               node.scanned_block = 0;
+#endif
        } else {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
diff --git sys/dev/pci/drm/include/drm/drm_mm.h 
sys/dev/pci/drm/include/drm/drm_mm.h
index ee8b0e80ca9..63a94d20f41 100644
--- sys/dev/pci/drm/include/drm/drm_mm.h
+++ sys/dev/pci/drm/include/drm/drm_mm.h
@@ -40,7 +40,6 @@
 #include <linux/bug.h>
 #include <linux/rbtree.h>
 #include <linux/kernel.h>
-#include <linux/mm_types.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #ifdef CONFIG_DRM_DEBUG_MM
@@ -54,6 +53,20 @@
 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #endif
 
+enum drm_mm_search_flags {
+       DRM_MM_SEARCH_DEFAULT =         0,
+       DRM_MM_SEARCH_BEST =            1 << 0,
+       DRM_MM_SEARCH_BELOW =           1 << 1,
+};
+
+enum drm_mm_allocator_flags {
+       DRM_MM_CREATE_DEFAULT =         0,
+       DRM_MM_CREATE_TOP =             1 << 0,
+};
+
+#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
+#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
+
 /**
  * enum drm_mm_insert_mode - control search and allocation behaviour
  *
@@ -160,17 +173,14 @@ struct drm_mm_node {
        /** @size: Size of the allocated block. */
        u64 size;
        /* private: */
-       struct drm_mm *mm;
        struct list_head node_list;
        struct list_head hole_stack;
        struct rb_node rb;
-       struct rb_node rb_hole_size;
-       struct rb_node rb_hole_addr;
+       unsigned hole_follows : 1;
+       unsigned allocated : 1;
+       bool scanned_block : 1;
        u64 __subtree_last;
-       u64 hole_size;
-       unsigned long flags;
-#define DRM_MM_NODE_ALLOCATED_BIT      0
-#define DRM_MM_NODE_SCANNED_BIT                1
+       struct drm_mm *mm;
 #ifdef CONFIG_DRM_DEBUG_MM
        depot_stack_handle_t stack;
 #endif
@@ -205,9 +215,7 @@ struct drm_mm {
         * according to the (increasing) start address of the memory node. */
        struct drm_mm_node head_node;
        /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
-       struct rb_root_cached interval_tree;
-       struct rb_root_cached holes_size;
-       struct rb_root holes_addr;
+       struct rb_root interval_tree;
 
        unsigned long scan_active;
 };
@@ -236,7 +244,7 @@ struct drm_mm_scan {
        u64 hit_end;
 
        unsigned long color;
-       enum drm_mm_insert_mode mode;
+       unsigned int flags;
 };
 
 /**
@@ -254,7 +262,7 @@ struct drm_mm_scan {
  */
 static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
 {
-       return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
+       return node->allocated;
 }
 
 /**
@@ -272,7 +280,7 @@ static inline bool drm_mm_node_allocated(const struct 
drm_mm_node *node)
  */
 static inline bool drm_mm_initialized(const struct drm_mm *mm)
 {
-       return READ_ONCE(mm->hole_stack.next);
+       return mm->hole_stack.next;
 }
 
 /**
@@ -289,7 +297,7 @@ static inline bool drm_mm_initialized(const struct drm_mm 
*mm)
  */
 static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
 {
-       return node->hole_size;
+       return node->hole_follows;
 }
 
 static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
@@ -372,9 +380,17 @@ static inline u64 drm_mm_hole_node_end(const struct 
drm_mm_node *hole_node)
 #define drm_mm_for_each_node_safe(entry, next, mm) \
        list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
 
+#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
+       for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : 
(mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+            &entry->hole_stack != &(mm)->hole_stack ? \
+            hole_start = drm_mm_hole_node_start(entry), \
+            hole_end = drm_mm_hole_node_end(entry), \
+            1 : 0; \
+            entry = list_entry((backwards) ? entry->hole_stack.prev : 
entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
 /**
  * drm_mm_for_each_hole - iterator to walk over all holes
- * @pos: &drm_mm_node used internally to track progress
+ * @entry: &drm_mm_node used internally to track progress
  * @mm: &drm_mm allocator to walk
  * @hole_start: ulong variable to assign the hole start to on each iteration
  * @hole_end: ulong variable to assign the hole end to on each iteration
@@ -387,28 +403,79 @@ static inline u64 drm_mm_hole_node_end(const struct 
drm_mm_node *hole_node)
  * Implementation Note:
  * We need to inline list_for_each_entry in order to be able to set hole_start
  * and hole_end on each iteration while keeping the macro sane.
+ *
+ * The __drm_mm_for_each_hole version is similar, but with added support for
+ * going backwards.
  */
-#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
-       for (pos = list_first_entry(&(mm)->hole_stack, \
-                                   typeof(*pos), hole_stack); \
-            &pos->hole_stack != &(mm)->hole_stack ? \
-            hole_start = drm_mm_hole_node_start(pos), \
-            hole_end = hole_start + pos->hole_size, \
-            1 : 0; \
-            pos = list_next_entry(pos, hole_stack))
+#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
+       __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
 
 /*
  * Basic range manager support (drm_mm.c)
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
-int drm_mm_insert_node_in_range(struct drm_mm *mm,
-                               struct drm_mm_node *node,
-                               u64 size,
-                               u64 alignment,
-                               unsigned long color,
-                               u64 start,
-                               u64 end,
-                               enum drm_mm_insert_mode mode);
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+                                       struct drm_mm_node *node,
+                                       u64 size,
+                                       u64 alignment,
+                                       unsigned long color,
+                                       u64 start,
+                                       u64 end,
+                                       enum drm_mm_search_flags sflags,
+                                       enum drm_mm_allocator_flags aflags);
+
+/**
+ * drm_mm_insert_node_in_range - ranged search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @start: start of the allowed range for this node
+ * @end: end of the allowed range for this node
+ * @flags: flags to fine-tune the allocation
+ *
+ * This is a simplified version of drm_mm_insert_node_in_range_generic() with
+ * @color set to 0.
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
+ */
+static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
+                                             struct drm_mm_node *node,
+                                             u64 size,
+                                             u64 alignment,
+                                             unsigned long color,
+                                             u64 start,
+                                             u64 end,
+                                             enum drm_mm_insert_mode mode)
+{
+       enum drm_mm_search_flags sflags;
+       enum drm_mm_allocator_flags aflags;
+       switch (mode) {
+       case DRM_MM_INSERT_HIGHEST:
+               sflags = DRM_MM_SEARCH_BELOW;
+               aflags = DRM_MM_CREATE_TOP;
+               break;
+       case DRM_MM_INSERT_BEST:
+               sflags = DRM_MM_SEARCH_BEST;
+               aflags = DRM_MM_CREATE_DEFAULT;
+               break;
+       case DRM_MM_INSERT_LOW:
+       case DRM_MM_INSERT_HIGH:
+       case DRM_MM_INSERT_EVICT:
+       case DRM_MM_INSERT_ONCE:
+       case DRM_MM_INSERT_LOWEST:
+       default:
+               sflags = DRM_MM_SEARCH_DEFAULT;
+               aflags = DRM_MM_CREATE_DEFAULT;
+               break;
+       }
+       return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
+                                                  color, start, end,
+                                                  sflags, aflags);
+}
 
 /**
  * drm_mm_insert_node_generic - search for space and insert @node
@@ -417,9 +484,10 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm,
  * @size: size of the allocation
  * @alignment: alignment of the allocation
  * @color: opaque tag value to use for this node
- * @mode: fine-tune the allocation search and placement
+ * @sflags: flags to fine-tune the allocation search
+ * @aflags: flags to fine-tune the allocation behavior
  *
- * This is a simplified version of drm_mm_insert_node_in_range() with no
+ * This is a simplified version of drm_mm_insert_node_in_range_generic() with 
no
  * range restrictions applied.
  *
  * The preallocated node must be cleared to 0.
@@ -431,11 +499,13 @@ static inline int
 drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
                           u64 size, u64 alignment,
                           unsigned long color,
-                          enum drm_mm_insert_mode mode)
+                          enum drm_mm_search_flags sflags,
+                          enum drm_mm_allocator_flags aflags)
 {
-       return drm_mm_insert_node_in_range(mm, node,
-                                          size, alignment, color,
-                                          0, U64_MAX, mode);
+       return drm_mm_insert_node_in_range_generic(mm, node,
+                                                  size, alignment, 0,
+                                                  0, U64_MAX,
+                                                  sflags, aflags);
 }
 
 /**
@@ -443,6 +513,8 @@ drm_mm_insert_node_generic(struct drm_mm *mm, struct 
drm_mm_node *node,
  * @mm: drm_mm to allocate from
  * @node: preallocate node to insert
  * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @flags: flags to fine-tune the allocation
  *
  * This is a simplified version of drm_mm_insert_node_generic() with @color set
  * to 0.
@@ -454,9 +526,13 @@ drm_mm_insert_node_generic(struct drm_mm *mm, struct 
drm_mm_node *node,
  */
 static inline int drm_mm_insert_node(struct drm_mm *mm,
                                     struct drm_mm_node *node,
-                                    u64 size)
+                                    u64 size,
+                                    u64 alignment,
+                                    enum drm_mm_search_flags flags)
 {
-       return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
+       return drm_mm_insert_node_generic(mm, node,
+                                         size, alignment, 0,
+                                         flags, DRM_MM_CREATE_DEFAULT);
 }
 
 void drm_mm_remove_node(struct drm_mm_node *node);
@@ -493,20 +569,17 @@ __drm_mm_interval_first(const struct drm_mm *mm, u64 
start, u64 last);
  * but using the internal interval tree to accelerate the search for the
  * starting node, and so not safe against removal of elements. It assumes
  * that @end is within (or is the upper limit of) the drm_mm allocator.
- * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
- * over the special _unallocated_ &drm_mm.head_node, and may even continue
- * indefinitely.
  */
 #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)    \
        for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
-            node__->start < (end__);                                   \
+            node__ && node__->start < (end__);                         \
             node__ = list_next_entry(node__, node_list))
 
 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
                                 struct drm_mm *mm,
                                 u64 size, u64 alignment, unsigned long color,
                                 u64 start, u64 end,
-                                enum drm_mm_insert_mode mode);
+                                unsigned int flags);
 
 /**
  * drm_mm_scan_init - initialize lru scanning
@@ -515,7 +588,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
  * @size: size of the allocation
  * @alignment: alignment of the allocation
  * @color: opaque tag value to use for the allocation
- * @mode: fine-tune the allocation search and placement
+ * @flags: flags to specify how the allocation will be performed afterwards
  *
  * This is a simplified version of drm_mm_scan_init_with_range() with no range
  * restrictions applied.
@@ -532,11 +605,12 @@ static inline void drm_mm_scan_init(struct drm_mm_scan 
*scan,
                                    u64 size,
                                    u64 alignment,
                                    unsigned long color,
-                                   enum drm_mm_insert_mode mode)
+                                   unsigned int flags)
 {
        drm_mm_scan_init_with_range(scan, mm,
                                    size, alignment, color,
-                                   0, U64_MAX, mode);
+                                   0, U64_MAX,
+                                   flags);
 }
 
 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
diff --git sys/dev/pci/drm/include/drm/drm_vma_manager.h 
sys/dev/pci/drm/include/drm/drm_vma_manager.h
index 42a848898f2..0a1812412b4 100644
--- sys/dev/pci/drm/include/drm/drm_vma_manager.h
+++ sys/dev/pci/drm/include/drm/drm_vma_manager.h
@@ -1,3 +1,4 @@
+/*     $OpenBSD: drm_vma_manager.h,v 1.3 2020/06/08 04:48:14 jsg Exp $ */
 #ifndef __DRM_VMA_MANAGER_H__
 #define __DRM_VMA_MANAGER_H__
 
@@ -45,19 +46,21 @@ struct drm_file;
 
 struct drm_vma_offset_file {
        struct rb_node vm_rb;
-       struct drm_file *vm_tag;
+       struct file *vm_filp;
        unsigned long vm_count;
 };
 
 struct drm_vma_offset_node {
        rwlock_t vm_lock;
        struct drm_mm_node vm_node;
+       struct rb_node vm_rb;
        struct rb_root vm_files;
        bool readonly:1;
 };
 
 struct drm_vma_offset_manager {
        rwlock_t vm_lock;
+       struct rb_root vm_addr_space_rb;
        struct drm_mm vm_addr_space_mm;
 };
 
@@ -73,11 +76,10 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
                           struct drm_vma_offset_node *node);
 
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
-void drm_vma_node_revoke(struct drm_vma_offset_node *node,
-                        struct drm_file *tag);
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
-                            struct drm_file *tag);
+                            struct file *filp);
 
 /**
  * drm_vma_offset_exact_lookup_locked() - Look up node by exact address
@@ -204,6 +206,7 @@ static inline __u64 drm_vma_node_offset_addr(struct 
drm_vma_offset_node *node)
        return ((__u64)node->vm_node.start) << PAGE_SHIFT;
 }
 
+#ifdef __linux__
 /**
  * drm_vma_node_unmap() - Unmap offset node
  * @node: Offset node
@@ -216,7 +219,6 @@ static inline __u64 drm_vma_node_offset_addr(struct 
drm_vma_offset_node *node)
  * This call is unlocked. The caller must guarantee that 
drm_vma_offset_remove()
  * is not called on this node concurrently.
  */
-#ifdef __linux__
 static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
                                      struct address_space *file_mapping)
 {
@@ -230,9 +232,9 @@ static inline void drm_vma_node_unmap(struct 
drm_vma_offset_node *node,
 /**
  * drm_vma_node_verify_access() - Access verification helper for TTM
  * @node: Offset node
- * @tag: Tag of file to check
+ * @filp: Open-file
  *
- * This checks whether @tag is granted access to @node. It is the same as
+ * This checks whether @filp is granted access to @node. It is the same as
  * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
  * verify_access() callbacks.
  *
@@ -240,9 +242,9 @@ static inline void drm_vma_node_unmap(struct 
drm_vma_offset_node *node,
  * 0 if access is granted, -EACCES otherwise.
  */
 static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
-                                            struct drm_file *tag)
+                                            struct file *filp)
 {
-       return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
+       return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
 }
 
 #endif /* __DRM_VMA_MANAGER_H__ */
diff --git sys/dev/pci/drm/include/linux/rbtree.h 
sys/dev/pci/drm/include/linux/rbtree.h
index 39d013afd50..875ce42f2cd 100644
--- sys/dev/pci/drm/include/linux/rbtree.h
+++ sys/dev/pci/drm/include/linux/rbtree.h
@@ -85,8 +85,6 @@ RB_PROTOTYPE(linux_root, rb_node, __entry, panic_cmp);
 #define        rb_erase_cached(node, root)                                     
        \
        linux_root_RB_REMOVE((struct linux_root *)(&(root)->rb_root), (node))
 #define        rb_first_cached(root)   RB_MIN(linux_root, (struct linux_root 
*)(&(root)->rb_root))
-#define        rb_replace_node_cached(old, new, root)                          
\
-       rb_replace_node(old, new, &(root)->rb_root)
 
 static inline struct rb_node *
 __rb_deepest_left(struct rb_node *node)
diff --git sys/dev/pci/drm/radeon/radeon_ttm.c 
sys/dev/pci/drm/radeon/radeon_ttm.c
index 7c116a385d5..e20c42a715c 100644
--- sys/dev/pci/drm/radeon/radeon_ttm.c
+++ sys/dev/pci/drm/radeon/radeon_ttm.c
@@ -186,11 +186,10 @@ static void radeon_evict_flags(struct ttm_buffer_object 
*bo,
 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file 
*filp)
 {
        struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
-       struct drm_file *file_priv = (void *)filp;
 
        if (radeon_ttm_tt_has_userptr(bo->ttm))
                return -EPERM;
-       return drm_vma_node_verify_access(&rbo->tbo.base.vma_node, file_priv);
+       return drm_vma_node_verify_access(&rbo->tbo.base.vma_node, filp);
 }
 
 static void radeon_move_null(struct ttm_buffer_object *bo,

Reply via email to