On 08/04/2024 16:16, Arunpravin Paneer Selvam wrote:
Add a new test case for the drm buddy clear and dirty
allocation.

v2:(Matthew)
   - make size as u32
   - rename PAGE_SIZE with SZ_4K
   - dont fragment the address space for all the order allocation
     iterations. we can do it once and just increment and allocate
     the size.
   - create new mm with non power-of-two size to ensure the multi-root
     force_merge during fini.

Signed-off-by: Arunpravin Paneer Selvam <arunpravin.paneersel...@amd.com>
Suggested-by: Matthew Auld <matthew.a...@intel.com>
---
  drivers/gpu/drm/tests/drm_buddy_test.c | 141 +++++++++++++++++++++++++
  1 file changed, 141 insertions(+)

diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c 
b/drivers/gpu/drm/tests/drm_buddy_test.c
index 4621a860cb05..b07f132f2835 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -224,6 +224,146 @@ static void drm_test_buddy_alloc_range_bias(struct kunit 
*test)
        drm_buddy_fini(&mm);
  }
+static void drm_test_buddy_alloc_clear(struct kunit *test)
+{
+       unsigned long n_pages, total, i = 0;
+       const unsigned long ps = SZ_4K;
+       struct drm_buddy_block *block;
+       const int max_order = 12;
+       LIST_HEAD(allocated);
+       struct drm_buddy mm;
+       unsigned int order;
+       u32 mm_size, size;
+       LIST_HEAD(dirty);
+       LIST_HEAD(clean);
+
+       mm_size = SZ_4K << max_order;
+       KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+       KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+       /*
+        * Idea is to allocate and free some random portion of the address 
space,
+        * returning those pages as non-dirty and randomly alternate between
+        * requesting dirty and non-dirty pages (not going over the limit
+        * we freed as non-dirty), putting that into two separate lists.
+        * Loop over both lists at the end checking that the dirty list
+        * is indeed all dirty pages and vice versa. Free it all again,
+        * keeping the dirty/clear status.
+        */
+       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+                                                           5 * ps, ps, 
&allocated,
+                                                           
DRM_BUDDY_TOPDOWN_ALLOCATION),
+                               "buddy_alloc hit an error size=%lu\n", 5 * ps);
+       drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+
+       n_pages = 10;
+       do {
+               unsigned long flags;
+               struct list_head *list;
+               int slot = i % 2;
+
+               if (slot == 0) {
+                       list = &dirty;
+                       flags = 0;
+               } else {
+                       list = &clean;
+                       flags = DRM_BUDDY_CLEAR_ALLOCATION;
+               }
+
+               KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, 
mm_size,
+                                                                   ps, ps, 
list,
+                                                                   flags),
+                                       "buddy_alloc hit an error size=%lu\n", 
ps);
+       } while (++i < n_pages);
+
+       list_for_each_entry(block, &clean, link)
+               KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
+
+       list_for_each_entry(block, &dirty, link)
+               KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+
+       drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+
+       /*
+        * Trying to go over the clear limit for some allocation.
+        * The allocation should never fail with reasonable page-size.
+        */
+       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+                                                           10 * ps, ps, &clean,
+                                                           
DRM_BUDDY_CLEAR_ALLOCATION),
+                               "buddy_alloc hit an error size=%lu\n", 10 * ps);
+
+       drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+       drm_buddy_free_list(&mm, &dirty, 0);
+       drm_buddy_fini(&mm);
+
+       KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+       /*
+        * Create a new mm. Intentionally fragment the address space by creating
+        * two alternating lists. Free both lists, one as dirty the other as 
clean.
+        * Try to allocate double the previous size with matching 
min_page_size. The
+        * allocation should never fail as it calls the force_merge. Also check 
that
+        * the page is always dirty after force_merge. Free the page as dirty, 
then
+        * repeat the whole thing, increment the order until we hit the 
max_order.
+        */
+
+       i = 0;
+       n_pages = mm_size / ps;
+       do {
+               struct list_head *list;
+               int slot = i % 2;
+
+               if (slot == 0)
+                       list = &dirty;
+               else
+                       list = &clean;
+
+               KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, 
mm_size,
+                                                                   ps, ps, 
list, 0),
+                                       "buddy_alloc hit an error size=%lu\n", 
ps);
+       } while (++i < n_pages);
+
+       drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+       drm_buddy_free_list(&mm, &dirty, 0);
+
+       order = 1;
+       do {
+               size = SZ_4K << order;
+
+               KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, 
mm_size,
+                                                                   size, size, 
&allocated,
+                                                                   
DRM_BUDDY_CLEAR_ALLOCATION),
+                                       "buddy_alloc hit an error size=%u\n", 
size);
+               total = 0;
+               list_for_each_entry(block, &allocated, link) {
+                       if (size != mm_size)
+                               KUNIT_EXPECT_EQ(test, 
drm_buddy_block_is_clear(block), false);
+                       total += drm_buddy_block_size(&mm, block);
+               }
+               KUNIT_EXPECT_EQ(test, total, size);
+
+               drm_buddy_free_list(&mm, &allocated, 0);
+       } while (++order <= max_order);
+
+       drm_buddy_fini(&mm);
+
+       /*
+        * Create a new mm with a non power-of-two size. Allocate a random 
size, free as
+        * cleared and then call fini. This will ensure the multi-root force 
merge during
+        * fini.
+        */
+       mm_size = 12 * SZ_4K;

I don't see any randomness? Maybe something like:

size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);

Otherwise,
Reviewed-by: Matthew Auld <matthew.a...@intel.com>

+       KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+                                                           4 * ps, ps, 
&allocated,
+                                                           
DRM_BUDDY_TOPDOWN_ALLOCATION),
+                               "buddy_alloc hit an error size=%lu\n", 4 * ps);
+       drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+       drm_buddy_fini(&mm);
+}
+
  static void drm_test_buddy_alloc_contiguous(struct kunit *test)
  {
        const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
@@ -584,6 +724,7 @@ static struct kunit_case drm_buddy_tests[] = {
        KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
        KUNIT_CASE(drm_test_buddy_alloc_pathological),
        KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+       KUNIT_CASE(drm_test_buddy_alloc_clear),
        KUNIT_CASE(drm_test_buddy_alloc_range_bias),
        {}
  };

Reply via email to