Re: [PATCH v9 3/3] drm/tests: Add a test case for drm buddy clear allocation
On 18/03/2024 21:40, Arunpravin Paneer Selvam wrote: Add a new test case for the drm buddy clear and dirty allocation. Signed-off-by: Arunpravin Paneer Selvam Suggested-by: Matthew Auld --- drivers/gpu/drm/tests/drm_buddy_test.c | 127 + 1 file changed, 127 insertions(+) diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index 454ad9952f56..d355a6e61893 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -19,6 +19,132 @@ static inline u64 get_size(int order, u64 chunk_size) return (1 << order) * chunk_size; } +static void drm_test_buddy_alloc_clear(struct kunit *test) +{ + unsigned long n_pages, total, i = 0; + const unsigned long ps = SZ_4K; + struct drm_buddy_block *block; + const int max_order = 12; + LIST_HEAD(allocated); + struct drm_buddy mm; + unsigned int order; + u64 mm_size, size; Maybe just make these two u32 or unsigned long. That should be big enough, plus avoids any kind of 32b compilation bugs below. + LIST_HEAD(dirty); + LIST_HEAD(clean); + + mm_size = PAGE_SIZE << max_order; s/PAGE_SIZE/SZ_4K/ below also. + KUNIT_EXPECT_FALSE(test, drm_buddy_init(, mm_size, ps)); + + KUNIT_EXPECT_EQ(test, mm.max_order, max_order); + + /** Drop the extra *, since is not actual kernel-doc. Below also. +* Idea is to allocate and free some random portion of the address space, +* returning those pages as non-dirty and randomly alternate between +* requesting dirty and non-dirty pages (not going over the limit +* we freed as non-dirty), putting that into two separate lists. +* Loop over both lists at the end checking that the dirty list +* is indeed all dirty pages and vice versa. Free it all again, +* keeping the dirty/clear status. +*/ + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(, 0, mm_size, + 5 * ps, ps, , + DRM_BUDDY_TOPDOWN_ALLOCATION), + "buddy_alloc hit an error size=%u\n", 5 * ps); + drm_buddy_free_list(, , DRM_BUDDY_CLEARED); + + n_pages = 10; + do { + unsigned long flags; + struct list_head *list; + int slot = i % 2; + + if (slot == 0) { + list = + flags = 0; + } else if (slot == 1) { Could just be else { + list = + flags = DRM_BUDDY_CLEAR_ALLOCATION; + } + + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(, 0, mm_size, + ps, ps, list, + flags), + "buddy_alloc hit an error size=%u\n", ps); + } while (++i < n_pages); + + list_for_each_entry(block, , link) + KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true); + + list_for_each_entry(block, , link) + KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); + + drm_buddy_free_list(, , DRM_BUDDY_CLEARED); + + /** +* Trying to go over the clear limit for some allocation. +* The allocation should never fail with reasonable page-size. +*/ + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(, 0, mm_size, + 10 * ps, ps, , + DRM_BUDDY_CLEAR_ALLOCATION), + "buddy_alloc hit an error size=%u\n", 10 * ps); + + drm_buddy_free_list(, , DRM_BUDDY_CLEARED); + drm_buddy_free_list(, , 0); + drm_buddy_fini(); + + KUNIT_EXPECT_FALSE(test, drm_buddy_init(, mm_size, ps)); + + /** +* Create a new mm. Intentionally fragment the address space by creating +* two alternating lists. Free both lists, one as dirty the other as clean. +* Try to allocate double the previous size with matching min_page_size. The +* allocation should never fail as it calls the force_merge. Also check that +* the page is always dirty after force_merge. Free the page as dirty, then +* repeat the whole thing, increment the order until we hit the max_order. +*/ + + order = 1; + do { + size = PAGE_SIZE << order; + i = 0; + n_pages = mm_size / ps; + do { + struct list_head *list; + int slot = i % 2; + + if (slot == 0) + list = + else if (slot == 1)
[PATCH v9 3/3] drm/tests: Add a test case for drm buddy clear allocation
Add a new test case for the drm buddy clear and dirty allocation. Signed-off-by: Arunpravin Paneer Selvam Suggested-by: Matthew Auld --- drivers/gpu/drm/tests/drm_buddy_test.c | 127 + 1 file changed, 127 insertions(+) diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index 454ad9952f56..d355a6e61893 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -19,6 +19,132 @@ static inline u64 get_size(int order, u64 chunk_size) return (1 << order) * chunk_size; } +static void drm_test_buddy_alloc_clear(struct kunit *test) +{ + unsigned long n_pages, total, i = 0; + const unsigned long ps = SZ_4K; + struct drm_buddy_block *block; + const int max_order = 12; + LIST_HEAD(allocated); + struct drm_buddy mm; + unsigned int order; + u64 mm_size, size; + LIST_HEAD(dirty); + LIST_HEAD(clean); + + mm_size = PAGE_SIZE << max_order; + KUNIT_EXPECT_FALSE(test, drm_buddy_init(, mm_size, ps)); + + KUNIT_EXPECT_EQ(test, mm.max_order, max_order); + + /** +* Idea is to allocate and free some random portion of the address space, +* returning those pages as non-dirty and randomly alternate between +* requesting dirty and non-dirty pages (not going over the limit +* we freed as non-dirty), putting that into two separate lists. +* Loop over both lists at the end checking that the dirty list +* is indeed all dirty pages and vice versa. Free it all again, +* keeping the dirty/clear status. +*/ + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(, 0, mm_size, + 5 * ps, ps, , + DRM_BUDDY_TOPDOWN_ALLOCATION), + "buddy_alloc hit an error size=%u\n", 5 * ps); + drm_buddy_free_list(, , DRM_BUDDY_CLEARED); + + n_pages = 10; + do { + unsigned long flags; + struct list_head *list; + int slot = i % 2; + + if (slot == 0) { + list = + flags = 0; + } else if (slot == 1) { + list = + flags = DRM_BUDDY_CLEAR_ALLOCATION; + } + + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(, 0, mm_size, + ps, ps, list, + flags), + "buddy_alloc hit an error size=%u\n", ps); + } while (++i < n_pages); + + list_for_each_entry(block, , link) + KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true); + + list_for_each_entry(block, , link) + KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); + + drm_buddy_free_list(, , DRM_BUDDY_CLEARED); + + /** +* Trying to go over the clear limit for some allocation. +* The allocation should never fail with reasonable page-size. +*/ + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(, 0, mm_size, + 10 * ps, ps, , + DRM_BUDDY_CLEAR_ALLOCATION), + "buddy_alloc hit an error size=%u\n", 10 * ps); + + drm_buddy_free_list(, , DRM_BUDDY_CLEARED); + drm_buddy_free_list(, , 0); + drm_buddy_fini(); + + KUNIT_EXPECT_FALSE(test, drm_buddy_init(, mm_size, ps)); + + /** +* Create a new mm. Intentionally fragment the address space by creating +* two alternating lists. Free both lists, one as dirty the other as clean. +* Try to allocate double the previous size with matching min_page_size. The +* allocation should never fail as it calls the force_merge. Also check that +* the page is always dirty after force_merge. Free the page as dirty, then +* repeat the whole thing, increment the order until we hit the max_order. +*/ + + order = 1; + do { + size = PAGE_SIZE << order; + i = 0; + n_pages = mm_size / ps; + do { + struct list_head *list; + int slot = i % 2; + + if (slot == 0) + list = + else if (slot == 1) + list = + + KUNIT_ASSERT_FALSE_MSG(test, + drm_buddy_alloc_blocks(, 0, mm_size, + ps, ps, list, 0), +