A puzzle for you...

---
 drivers/gpu/drm/i915/i915_buddy.c           |  17 +-
 drivers/gpu/drm/i915/selftests/i915_buddy.c | 230 +++++++++++++++++++-
 2 files changed, 234 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_buddy.c 
b/drivers/gpu/drm/i915/i915_buddy.c
index c0ac68d71d94..e5667540282a 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -16,7 +16,7 @@ static void mark_allocated(struct i915_buddy_block *block)
        block->header &= ~I915_BUDDY_HEADER_STATE;
        block->header |= I915_BUDDY_ALLOCATED;
 
-       list_del_init(&block->link);
+       list_del(&block->link);
 }
 
 static void mark_free(struct i915_buddy_mm *mm,
@@ -34,7 +34,7 @@ static void mark_split(struct i915_buddy_block *block)
        block->header &= ~I915_BUDDY_HEADER_STATE;
        block->header |= I915_BUDDY_SPLIT;
 
-       list_del_init(&block->link);
+       list_del(&block->link);
 }
 
 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 min_size)
@@ -170,9 +170,6 @@ static int split_block(struct i915_buddy_mm *mm,
 
        block->left->parent = block;
 
-       INIT_LIST_HEAD(&block->left->link);
-       INIT_LIST_HEAD(&block->left->tmp_link);
-
        block->right = kmem_cache_zalloc(mm->blocks, GFP_KERNEL);
        if (!block->right) {
                kmem_cache_free(mm->blocks, block->left);
@@ -184,9 +181,6 @@ static int split_block(struct i915_buddy_mm *mm,
 
        block->right->parent = block;
 
-       INIT_LIST_HEAD(&block->right->link);
-       INIT_LIST_HEAD(&block->right->tmp_link);
-
        mark_free(mm, block->left);
        mark_free(mm, block->right);
 
@@ -213,9 +207,9 @@ get_buddy(struct i915_buddy_block *block)
 static void __i915_buddy_free(struct i915_buddy_mm *mm,
                              struct i915_buddy_block *block)
 {
-       list_del_init(&block->link); /* We have ownership now */
+       struct i915_buddy_block *parent;
 
-       while (block->parent) {
+       while ((parent = block->parent)) {
                struct i915_buddy_block *buddy;
 
                buddy = get_buddy(block);
@@ -228,7 +222,7 @@ static void __i915_buddy_free(struct i915_buddy_mm *mm,
                kmem_cache_free(mm->blocks, block);
                kmem_cache_free(mm->blocks, buddy);
 
-               block = block->parent;
+               block = parent;
        }
 
        mark_free(mm, block);
@@ -248,6 +242,7 @@ void i915_buddy_free_list(struct i915_buddy_mm *mm,
 
        list_for_each_entry_safe(block, on, objects, link)
                i915_buddy_free(mm, block);
+       INIT_LIST_HEAD(objects);
 }
 
 /*
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c 
b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 2159aa9f4867..943a9d08d2f5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -295,7 +295,7 @@ static void igt_mm_config(u64 *size, u64 *min_size)
        *size = s;
 }
 
-static int igt_buddy_alloc(void *arg)
+static int igt_buddy_alloc_smoke(void *arg)
 {
        struct i915_buddy_mm mm;
        int max_order;
@@ -385,6 +385,229 @@ static int igt_buddy_alloc(void *arg)
        return err;
 }
 
+static int igt_buddy_alloc_pessimistic(void *arg)
+{
+       const unsigned int max_order = 16;
+       struct i915_buddy_block *block, *bn;
+       struct i915_buddy_mm mm;
+       unsigned int order;
+       LIST_HEAD(blocks);
+       int err;
+
+       /*
+        * Create a pot-sized mm, then allocate one of each possible
+        * order within. This should leave the mm with exactly one
+        * page left.
+        */
+
+       err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+       if (err) {
+               pr_err("buddy_init failed(%d)\n", err);
+               return err;
+       }
+       GEM_BUG_ON(mm.max_order != max_order);
+
+       for (order = 0; order < max_order; order++) {
+               block = i915_buddy_alloc(&mm, order);
+               if (IS_ERR(block)) {
+                       pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+                               order);
+                       err = PTR_ERR(block);
+                       goto err;
+               }
+
+               list_add_tail(&block->link, &blocks);
+       }
+
+       /* And now the last remaining block available */
+       block = i915_buddy_alloc(&mm, 0);
+       if (IS_ERR(block)) {
+               pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
+               err = PTR_ERR(block);
+               goto err;
+       }
+       list_add_tail(&block->link, &blocks);
+
+       /* Should be completely full! */
+       for (order = max_order; order--; ) {
+               block = i915_buddy_alloc(&mm, order);
+               if (!IS_ERR(block)) {
+                       pr_info("buddy_alloc unexpectedly succeeded at order 
%d, it should be full!",
+                               order);
+                       list_add_tail(&block->link, &blocks);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+       block = list_last_entry(&blocks, typeof(*block), link);
+       list_del(&block->link);
+       i915_buddy_free(&mm, block);
+
+       /* As we free in increasing size, we make available larger blocks */
+       order = 1;
+       list_for_each_entry_safe(block, bn, &blocks, link) {
+               list_del(&block->link);
+               i915_buddy_free(&mm, block);
+
+               block = i915_buddy_alloc(&mm, order);
+               if (IS_ERR(block)) {
+                       pr_info("buddy_alloc (realloc) hit -ENOMEM with 
order=%d\n",
+                               order);
+                       err = PTR_ERR(block);
+                       goto err;
+               }
+               i915_buddy_free(&mm, block);
+               order++;
+       }
+
+       /* To confirm, now the whole mm should be available */
+       block = i915_buddy_alloc(&mm, max_order);
+       if (IS_ERR(block)) {
+               pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+                       max_order);
+               err = PTR_ERR(block);
+               goto err;
+       }
+       i915_buddy_free(&mm, block);
+
+err:
+       i915_buddy_free_list(&mm, &blocks);
+       i915_buddy_fini(&mm);
+       return err;
+}
+
+static int igt_buddy_alloc_optimistic(void *arg)
+{
+       const int max_order = 16;
+       struct i915_buddy_block *block;
+       struct i915_buddy_mm mm;
+       LIST_HEAD(blocks);
+       int order;
+       int err;
+
+       /*
+        * Create a mm with one block of each order available, and
+        * try to allocate them all.
+        */
+
+       err = i915_buddy_init(&mm,
+                             PAGE_SIZE * ((1 << (max_order + 1)) - 1),
+                             PAGE_SIZE);
+       if (err) {
+               pr_err("buddy_init failed(%d)\n", err);
+               return err;
+       }
+       GEM_BUG_ON(mm.max_order != max_order);
+
+       for (order = 0; order <= max_order; order++) {
+               block = i915_buddy_alloc(&mm, order);
+               if (IS_ERR(block)) {
+                       pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+                               order);
+                       err = PTR_ERR(block);
+                       goto err;
+               }
+
+               list_add_tail(&block->link, &blocks);
+       }
+
+       /* Should be completely full! */
+       block = i915_buddy_alloc(&mm, 0);
+       if (!IS_ERR(block)) {
+               pr_info("buddy_alloc unexpectedly succeeded, it should be 
full!");
+               list_add_tail(&block->link, &blocks);
+               err = -EINVAL;
+               goto err;
+       }
+
+err:
+       i915_buddy_free_list(&mm, &blocks);
+       i915_buddy_fini(&mm);
+       return err;
+}
+
+static int igt_buddy_alloc_pathological(void *arg)
+{
+       const int max_order = 16;
+       struct i915_buddy_block *block;
+       struct i915_buddy_mm mm;
+       LIST_HEAD(blocks);
+       LIST_HEAD(buddies);
+       int order, top;
+       int err;
+
+       /*
+        * Create a pot-sized mm, then allocate one of each possible
+        * order within. This should leave the mm with exactly one
+        * page left. Free the largest block, then whittle down again.
+        * Eventually we will have a fully 50% fragmented mm.
+        */
+
+       err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+       if (err) {
+               pr_err("buddy_init failed(%d)\n", err);
+               return err;
+       }
+       GEM_BUG_ON(mm.max_order != max_order);
+
+       for (top = max_order; top; top--) {
+               /* Make room by freeing the largest allocated block */
+               block = list_first_entry_or_null(&blocks, typeof(*block), link);
+               if (block) {
+                       list_del(&block->link);
+                       i915_buddy_free(&mm, block);
+               }
+
+               for (order = top; order--; ) {
+                       block = i915_buddy_alloc(&mm, order);
+                       if (IS_ERR(block)) {
+                               pr_info("buddy_alloc hit -ENOMEM with order=%d, 
top=%d\n",
+                                       order, top);
+                               err = PTR_ERR(block);
+                               goto err;
+                       }
+                       list_add_tail(&block->link, &blocks);
+               }
+
+               block = i915_buddy_alloc(&mm, top);
+               if (!IS_ERR(block)) {
+                       pr_info("buddy_alloc unexpectedly succeeded at 
top-order %d/%d, it should be full!",
+                               top, max_order);
+                       list_add_tail(&block->link, &blocks);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+       /* Split the final block */
+       block = i915_buddy_alloc(&mm, 0);
+       if (IS_ERR(block)) {
+               pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+                               0);
+               err = PTR_ERR(block);
+               goto err;
+       }
+       list_add_tail(&block->link, &blocks);
+
+       /* Nothing larger than blocks of min_size now available */
+       for (order = 1; order <= max_order; order++) {
+               block = i915_buddy_alloc(&mm, order);
+               if (!IS_ERR(block)) {
+                       pr_info("buddy_alloc unexpectedly succeeded at order 
%d, it should be full!",
+                               order);
+                       list_add_tail(&block->link, &blocks);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       i915_buddy_free_list(&mm, &blocks);
+       i915_buddy_fini(&mm);
+       return err;
+}
+
 static int igt_buddy_alloc_range(void *arg)
 {
        struct i915_buddy_mm mm;
@@ -483,7 +706,10 @@ static int igt_buddy_alloc_range(void *arg)
 int i915_buddy_mock_selftests(void)
 {
        static const struct i915_subtest tests[] = {
-               SUBTEST(igt_buddy_alloc),
+               SUBTEST(igt_buddy_alloc_pessimistic),
+               SUBTEST(igt_buddy_alloc_optimistic),
+               SUBTEST(igt_buddy_alloc_pathological),
+               SUBTEST(igt_buddy_alloc_smoke),
                SUBTEST(igt_buddy_alloc_range),
        };
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to