Add KUnit test to validate offset-aligned allocations in the DRM buddy allocator.
Validate offset-aligned allocation: The test covers allocations with sizes smaller than the alignment constraint and verifies correct size preservation, offset alignment, and behavior across multiple allocation sizes. It also exercises fragmentation by freeing alternating blocks and confirms that allocation fails once all aligned offsets are consumed. Stress subtree_max_alignment propagation: Exercise subtree_max_alignment tracking by allocating blocks with descending alignment constraints and freeing them in reverse order. This verifies that free-tree augmentation correctly propagates the maximum offset alignment present in each subtree at every stage. v2: - Move the patch to gpu/tests/gpu_buddy_test.c file. Signed-off-by: Arunpravin Paneer Selvam <[email protected]> --- drivers/gpu/tests/gpu_buddy_test.c | 166 +++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) diff --git a/drivers/gpu/tests/gpu_buddy_test.c b/drivers/gpu/tests/gpu_buddy_test.c index 450e71deed90..37f22655b5fb 100644 --- a/drivers/gpu/tests/gpu_buddy_test.c +++ b/drivers/gpu/tests/gpu_buddy_test.c @@ -21,6 +21,170 @@ static inline u64 get_size(int order, u64 chunk_size) return (1 << order) * chunk_size; } +static void gpu_test_buddy_subtree_offset_alignment_stress(struct kunit *test) +{ + struct gpu_buddy_block *block; + struct rb_node *node = NULL; + const u64 mm_size = SZ_2M; + const u64 alignments[] = { + SZ_1M, + SZ_512K, + SZ_256K, + SZ_128K, + SZ_64K, + SZ_32K, + SZ_16K, + SZ_8K, + }; + + struct list_head allocated[ARRAY_SIZE(alignments)]; + unsigned int i, order, max_subtree_align = 0; + struct gpu_buddy mm; + int ret, tree; + + KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, SZ_4K), + "buddy_init failed\n"); + + for (i = 0; i < ARRAY_SIZE(allocated); i++) + INIT_LIST_HEAD(&allocated[i]); + + /* + * Exercise subtree_max_alignment tracking by allocating blocks with descending + * alignment constraints and freeing them in reverse order. This verifies that + * free-tree augmentation correctly propagates the maximum offset alignment + * present in each subtree at every stage. + */ + + for (i = 0; i < ARRAY_SIZE(alignments); i++) { + struct gpu_buddy_block *root = NULL; + unsigned int expected; + u64 align; + + align = alignments[i]; + expected = ilog2(align) - 1; + + for (;;) { + ret = gpu_buddy_alloc_blocks(&mm, + 0, mm_size, + SZ_4K, align, + &allocated[i], + 0); + if (ret) + break; + + block = list_last_entry(&allocated[i], + struct gpu_buddy_block, + link); + KUNIT_EXPECT_EQ(test, gpu_buddy_block_offset(block) & (align - 1), 0ULL); + } + + for (order = mm.max_order + 1; order-- > 0 && !root; ) { + for (tree = 0; tree < 2; tree++) { + node = mm.free_trees[tree][order].rb_node; + if (node) { + root = container_of(node, + struct gpu_buddy_block, + rb); + break; + } + } + } + + KUNIT_ASSERT_NOT_NULL(test, root); + KUNIT_EXPECT_EQ(test, root->subtree_max_alignment, expected); + } + + for (i = ARRAY_SIZE(alignments); i-- > 0; ) { + gpu_buddy_free_list(&mm, &allocated[i], 0); + + for (order = 0; order <= mm.max_order; order++) { + for (tree = 0; tree < 2; tree++) { + node = mm.free_trees[tree][order].rb_node; + if (!node) + continue; + + block = container_of(node, struct gpu_buddy_block, rb); + max_subtree_align = max(max_subtree_align, block->subtree_max_alignment); + } + } + + KUNIT_EXPECT_GE(test, max_subtree_align, ilog2(alignments[i])); + } + + gpu_buddy_fini(&mm); +} + +static void gpu_test_buddy_offset_aligned_allocation(struct kunit *test) +{ + struct gpu_buddy_block *block, *tmp; + int num_blocks, i, count = 0; + LIST_HEAD(allocated); + struct gpu_buddy mm; + u64 mm_size = SZ_4M; + LIST_HEAD(freed); + + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), + "buddy_init failed\n"); + + num_blocks = mm_size / SZ_256K; + /* + * Allocate multiple sizes under a fixed offset alignment. + * Ensures alignment handling is independent of allocation size and + * exercises subtree max-alignment pruning for small requests. + */ + for (i = 0; i < num_blocks; i++) + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_256K, + &allocated, 0), + "buddy_alloc hit an error size=%u\n", SZ_8K); + + list_for_each_entry(block, &allocated, link) { + /* Ensure the allocated block uses the expected 8 KB size */ + KUNIT_EXPECT_EQ(test, gpu_buddy_block_size(&mm, block), SZ_8K); + /* Ensure the block starts at a 256 KB-aligned offset for proper alignment */ + KUNIT_EXPECT_EQ(test, gpu_buddy_block_offset(block) & (SZ_256K - 1), 0ULL); + } + gpu_buddy_free_list(&mm, &allocated, 0); + + for (i = 0; i < num_blocks; i++) + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_16K, SZ_256K, + &allocated, 0), + "buddy_alloc hit an error size=%u\n", SZ_16K); + + list_for_each_entry(block, &allocated, link) { + /* Ensure the allocated block uses the expected 16 KB size */ + KUNIT_EXPECT_EQ(test, gpu_buddy_block_size(&mm, block), SZ_16K); + /* Ensure the block starts at a 256 KB-aligned offset for proper alignment */ + KUNIT_EXPECT_EQ(test, gpu_buddy_block_offset(block) & (SZ_256K - 1), 0ULL); + } + + /* + * Free alternating aligned blocks to introduce fragmentation. + * Ensures offset-aligned allocations remain valid after frees and + * verifies subtree max-alignment metadata is correctly maintained. + */ + list_for_each_entry_safe(block, tmp, &allocated, link) { + if (count % 2 == 0) + list_move_tail(&block->link, &freed); + count++; + } + gpu_buddy_free_list(&mm, &freed, 0); + + for (i = 0; i < num_blocks / 2; i++) + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_16K, SZ_256K, + &allocated, 0), + "buddy_alloc hit an error size=%u\n", SZ_16K); + + /* + * Allocate with offset alignment after all slots are used; must fail. + * Confirms that no aligned offsets remain. + */ + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_16K, SZ_256K, + &allocated, 0), + "buddy_alloc hit an error size=%u\n", SZ_16K); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); +} + static void gpu_test_buddy_fragmentation_performance(struct kunit *test) { struct gpu_buddy_block *block, *tmp; @@ -912,6 +1076,8 @@ static struct kunit_case gpu_buddy_tests[] = { KUNIT_CASE(gpu_test_buddy_alloc_range_bias), KUNIT_CASE(gpu_test_buddy_fragmentation_performance), KUNIT_CASE(gpu_test_buddy_alloc_exceeds_max_order), + KUNIT_CASE(gpu_test_buddy_offset_aligned_allocation), + KUNIT_CASE(gpu_test_buddy_subtree_offset_alignment_stress), {} }; -- 2.34.1
