On Tue, Sep 02, 2025 at 12:26:04AM +0530, Arunpravin Paneer Selvam wrote:
> Replace the freelist (O(n)) used for free block management with a
> red-black tree, providing more efficient O(log n) search, insert,
> and delete operations. This improves scalability and performance
> when managing large numbers of free blocks per order (e.g., hundreds
> or thousands).

Did you consider the interval tree?


> @@ -41,23 +43,53 @@ static void drm_block_free(struct drm_buddy *mm,
>       kmem_cache_free(slab_blocks, block);
>  }
>  
> -static void list_insert_sorted(struct drm_buddy *mm,
> -                            struct drm_buddy_block *block)
> +static void rbtree_insert(struct drm_buddy *mm,
> +                       struct drm_buddy_block *block)
>  {
> +     struct rb_root *root = &mm->free_tree[drm_buddy_block_order(block)];
> +     struct rb_node **link = &root->rb_node;
> +     struct rb_node *parent = NULL;
>       struct drm_buddy_block *node;
> -     struct list_head *head;
> +     u64 offset;
> +
> +     offset = drm_buddy_block_offset(block);
>  
> -     head = &mm->free_list[drm_buddy_block_order(block)];
> -     if (list_empty(head)) {
> -             list_add(&block->link, head);
> -             return;
> +     while (*link) {
> +             parent = *link;
> +             node = rb_entry(parent, struct drm_buddy_block, rb);
> +
> +             if (offset < drm_buddy_block_offset(node))
> +                     link = &parent->rb_left;
> +             else
> +                     link = &parent->rb_right;
>       }
>  
> -     list_for_each_entry(node, head, link)
> -             if (drm_buddy_block_offset(block) < 
> drm_buddy_block_offset(node))
> -                     break;
> +     rb_link_node(&block->rb, parent, link);
> +     rb_insert_color(&block->rb, root);
> +}

static inline bool __drm_bb_less(const struct drm_buddy_block *a,
                                 const struct drm_buddy_block *b)
{
        return drm_buddy_block_offset(a) < drm_buddy_block_offset(b);
}

#define __node_2_drm_bb(node) rb_entry((node), struct drm_buddy_block, rb)

static inline bool rb_drm_bb_less(struct rb_node *a, const struct rb_node *b)
{
        return __drm_bb_less(__node_2_drm_bb(a), __node_2_drm_bb(b));
}

static void rbtree_insert(struct drm_buddy *mm, struct drm_buddy_block *block)
{
        rb_add(block->rb, &mm->free_tree[drm_buddy_block_order(block)], 
rb_drm_bb_less);
}

> +
> +static void rbtree_remove(struct drm_buddy *mm,
> +                       struct drm_buddy_block *block)
> +{
> +     struct rb_root *root;
> +
> +     root = &mm->free_tree[drm_buddy_block_order(block)];
> +     rb_erase(&block->rb, root);
>  
> -     __list_add(&block->link, node->link.prev, &node->link);
> +     RB_CLEAR_NODE(&block->rb);
> +}
> +
> +static inline struct drm_buddy_block *
> +rbtree_last_entry(struct drm_buddy *mm, unsigned int order)
> +{
> +     struct rb_node *node = rb_last(&mm->free_tree[order]);
> +
> +     return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
> +}

rb_add_cached() caches the leftmost entry, if you invert the key, the
last is first.

> diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
> index 8d2ba3749866..17190bb4837c 100644
> --- a/include/linux/rbtree.h
> +++ b/include/linux/rbtree.h
> @@ -79,6 +79,62 @@ static inline void rb_link_node_rcu(struct rb_node *node, 
> struct rb_node *parent
>          ____ptr ? rb_entry(____ptr, type, member) : NULL; \
>       })
>  
> +/**
> + * rbtree_for_each_entry - iterate in-order over rb_root of given type
> + *
> + * @pos:     the 'type *' to use as a loop cursor.
> + * @root:    'rb_root *' of the rbtree.
> + * @member:  the name of the rb_node field within 'type'.
> + */
> +#define rbtree_for_each_entry(pos, root, member) \
> +     for ((pos) = rb_entry_safe(rb_first(root), typeof(*(pos)), member); \
> +          (pos); \
> +          (pos) = rb_entry_safe(rb_next(&(pos)->member), typeof(*(pos)), 
> member))
> +
> +/**
> + * rbtree_reverse_for_each_entry - iterate in reverse in-order over rb_root
> + * of given type
> + *
> + * @pos:     the 'type *' to use as a loop cursor.
> + * @root:    'rb_root *' of the rbtree.
> + * @member:  the name of the rb_node field within 'type'.
> + */
> +#define rbtree_reverse_for_each_entry(pos, root, member) \
> +     for ((pos) = rb_entry_safe(rb_last(root), typeof(*(pos)), member); \
> +          (pos); \
> +          (pos) = rb_entry_safe(rb_prev(&(pos)->member), typeof(*(pos)), 
> member))
> +
> +/**
> + * rbtree_for_each_entry_safe - iterate in-order over rb_root safe against 
> removal
> + *
> + * @pos:     the 'type *' to use as a loop cursor
> + * @n:               another 'type *' to use as temporary storage
> + * @root:    'rb_root *' of the rbtree
> + * @member:  the name of the rb_node field within 'type'
> + */
> +#define rbtree_for_each_entry_safe(pos, n, root, member) \
> +     for ((pos) = rb_entry_safe(rb_first(root), typeof(*(pos)), member), \
> +          (n) = (pos) ? rb_entry_safe(rb_next(&(pos)->member), 
> typeof(*(pos)), member) : NULL; \
> +          (pos); \
> +          (pos) = (n), \
> +          (n) = (pos) ? rb_entry_safe(rb_next(&(pos)->member), 
> typeof(*(pos)), member) : NULL)
> +
> +/**
> + * rbtree_reverse_for_each_entry_safe - iterate in reverse in-order over 
> rb_root
> + * safe against removal
> + *
> + * @pos:     the struct type * to use as a loop cursor.
> + * @n:               another struct type * to use as temporary storage.
> + * @root:    pointer to struct rb_root to iterate.
> + * @member:  name of the rb_node field within the struct.
> + */
> +#define rbtree_reverse_for_each_entry_safe(pos, n, root, member) \
> +     for ((pos) = rb_entry_safe(rb_last(root), typeof(*(pos)), member), \
> +          (n) = (pos) ? rb_entry_safe(rb_prev(&(pos)->member), 
> typeof(*(pos)), member) : NULL; \
> +          (pos); \
> +          (pos) = (n), \
> +          (n) = (pos) ? rb_entry_safe(rb_prev(&(pos)->member), 
> typeof(*(pos)), member) : NULL)
> +

Not really a fan of these. That's typically a sign you're doing it
wrong. Full tree iteration is actually slower than linked list.

Reply via email to