Add KUnit test to validate offset-aligned allocations in the DRM buddy
allocator.

Validate offset-aligned allocation:
The test covers allocations with sizes smaller than the alignment constraint
and verifies correct size preservation, offset alignment, and behavior across
multiple allocation sizes. It also exercises fragmentation by freeing
alternating blocks and confirms that allocation fails once all aligned offsets
are consumed.

Stress subtree_max_alignment propagation:
Exercise subtree_max_alignment tracking by allocating blocks with descending
alignment constraints and freeing them in reverse order. This verifies that
free-tree augmentation correctly propagates the maximum offset alignment
present in each subtree at every stage.

Signed-off-by: Arunpravin Paneer Selvam <[email protected]>
---
 drivers/gpu/drm/tests/drm_buddy_test.c | 167 +++++++++++++++++++++++++
 1 file changed, 167 insertions(+)

diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c 
b/drivers/gpu/drm/tests/drm_buddy_test.c
index 5f40b5343bd8..b24302ef4188 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -21,6 +21,171 @@ static inline u64 get_size(int order, u64 chunk_size)
        return (1 << order) * chunk_size;
 }
 
+static void drm_test_buddy_subtree_offset_alignment_stress(struct kunit *test)
+{
+       struct drm_buddy_block *block;
+       struct rb_node *node = NULL;
+       const u64 mm_size = SZ_2M;
+       const u64 alignments[] = {
+               SZ_1M,
+               SZ_512K,
+               SZ_256K,
+               SZ_128K,
+               SZ_64K,
+               SZ_32K,
+               SZ_16K,
+               SZ_8K,
+       };
+
+       struct list_head allocated[ARRAY_SIZE(alignments)];
+       unsigned int i, order, max_subtree_align = 0;
+       struct drm_buddy mm;
+       int ret, tree;
+
+       KUNIT_ASSERT_FALSE(test, drm_buddy_init(&mm, mm_size, SZ_4K),
+                          "buddy_init failed\n")
+
+       for (i = 0; i < ARRAY_SIZE(allocated); i++)
+               INIT_LIST_HEAD(&allocated[i]);
+
+       /*
+        * Exercise subtree_max_alignment tracking by allocating blocks with 
descending
+        * alignment constraints and freeing them in reverse order. This 
verifies that
+        * free-tree augmentation correctly propagates the maximum offset 
alignment
+        * present in each subtree at every stage.
+        */
+
+       for (i = 0; i < ARRAY_SIZE(alignments); i++) {
+               struct drm_buddy_block *root = NULL;
+               unsigned int expected;
+               u64 align;
+
+               align = alignments[i];
+               expected = ilog2(align) - 1;
+
+               for (;;) {
+                       ret = drm_buddy_alloc_blocks(&mm,
+                                                    0, mm_size,
+                                                    SZ_4K, align,
+                                                    &allocated[i],
+                                                    0);
+                       if (ret)
+                               break;
+
+                       block = list_last_entry(&allocated[i],
+                                               struct drm_buddy_block,
+                                               link);
+                       KUNIT_EXPECT_EQ(test, drm_buddy_block_offset(block) & 
(align - 1), 0ULL);
+               }
+
+               for (order = mm.max_order + 1; order-- > 0 && !root; ) {
+                       for (tree = 0; tree < 2; tree++) {
+                               node = mm.free_trees[tree][order].rb_node;
+                               if (node) {
+                                       root = container_of(node,
+                                                           struct 
drm_buddy_block,
+                                                           rb);
+                                       break;
+                               }
+                       }
+               }
+
+               KUNIT_ASSERT_NOT_NULL(test, root);
+               KUNIT_EXPECT_EQ(test, root->subtree_max_alignment, expected);
+       }
+
+       for (i = ARRAY_SIZE(alignments); i-- > 0; ) {
+               drm_buddy_free_list(&mm, &allocated[i], 0);
+
+               for (order = 0; order <= mm.max_order; order++) {
+                       for (tree = 0; tree < 2; tree++) {
+                               node = mm.free_trees[tree][order].rb_node;
+                               if (!node)
+                                       continue;
+
+                               block = container_of(node, struct 
drm_buddy_block, rb);
+                               max_subtree_align = max(max_subtree_align, 
block->subtree_max_alignment);
+                       }
+               }
+
+               KUNIT_EXPECT_GE(test, max_subtree_align, ilog2(alignments[i]));
+       }
+
+       drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_offset_aligned_allocation(struct kunit *test)
+{
+       struct drm_buddy_block *block, *tmp;
+       int num_blocks, i, count = 0;
+       LIST_HEAD(allocated);
+       struct drm_buddy mm;
+       u64 mm_size = SZ_4M;
+       LIST_HEAD(freed);
+
+       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
+                              "buddy_init failed\n");
+
+       num_blocks = mm_size / SZ_256K;
+
+       /*
+        * Allocate multiple sizes under a fixed offset alignment.
+        * Ensures alignment handling is independent of allocation size and
+        * exercises subtree max-alignment pruning for small requests.
+        */
+       for (i = 0; i < num_blocks; i++)
+               KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, 
mm_size, SZ_8K, SZ_256K,
+                                                                   &allocated, 
0),
+                                       "buddy_alloc hit an error size=%u\n", 
SZ_8K);
+
+       list_for_each_entry(block, &allocated, link) {
+               /* Ensure the allocated block uses the expected 8 KB size */
+               KUNIT_EXPECT_EQ(test, drm_buddy_block_size(&mm, block), SZ_8K);
+               /* Ensure the block starts at a 256 KB-aligned offset for 
proper alignment */
+               KUNIT_EXPECT_EQ(test, drm_buddy_block_offset(block) & (SZ_256K 
- 1), 0ULL);
+       }
+       drm_buddy_free_list(&mm, &allocated, 0);
+
+       for (i = 0; i < num_blocks; i++)
+               KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, 
mm_size, SZ_16K, SZ_256K,
+                                                                   &allocated, 
0),
+                                       "buddy_alloc hit an error size=%u\n", 
SZ_16K);
+
+       list_for_each_entry(block, &allocated, link) {
+               /* Ensure the allocated block uses the expected 16 KB size */
+               KUNIT_EXPECT_EQ(test, drm_buddy_block_size(&mm, block), SZ_16K);
+               /* Ensure the block starts at a 256 KB-aligned offset for 
proper alignment */
+               KUNIT_EXPECT_EQ(test, drm_buddy_block_offset(block) & (SZ_256K 
- 1), 0ULL);
+       }
+
+       /*
+        * Free alternating aligned blocks to introduce fragmentation.
+        * Ensures offset-aligned allocations remain valid after frees and
+        * verifies subtree max-alignment metadata is correctly maintained.
+        */
+       list_for_each_entry_safe(block, tmp, &allocated, link) {
+               if (count % 2 == 0)
+                       list_move_tail(&block->link, &freed);
+               count++;
+       }
+       drm_buddy_free_list(&mm, &freed, 0);
+
+       for (i = 0; i < num_blocks / 2; i++)
+               KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, 
mm_size, SZ_16K, SZ_256K,
+                                                                   &allocated, 
0),
+                                       "buddy_alloc hit an error size=%u\n", 
SZ_16K);
+
+       /*
+        * Allocate with offset alignment after all slots are used; must fail.
+        * Confirms that no aligned offsets remain.
+        */
+       KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 
SZ_16K, SZ_256K,
+                                                          &allocated, 0),
+                              "buddy_alloc hit an error size=%u\n", SZ_16K);
+       drm_buddy_free_list(&mm, &allocated, 0);
+       drm_buddy_fini(&mm);
+}
+
 static void drm_test_buddy_fragmentation_performance(struct kunit *test)
 {
        struct drm_buddy_block *block, *tmp;
@@ -877,6 +1042,8 @@ static struct kunit_case drm_buddy_tests[] = {
        KUNIT_CASE(drm_test_buddy_alloc_clear),
        KUNIT_CASE(drm_test_buddy_alloc_range_bias),
        KUNIT_CASE(drm_test_buddy_fragmentation_performance),
+       KUNIT_CASE(drm_test_buddy_offset_aligned_allocation),
+       KUNIT_CASE(drm_test_buddy_subtree_offset_alignment_stress),
        {}
 };
 
-- 
2.34.1

Reply via email to