Allocate objects with varying number of pages (which should hopefully
consist of a mixture of contiguous page chunks and so coalesced sg
lists) and check that the sg walkers in insert_pages cope.

Signed-off-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/i915_drv.h               |   2 +-
 drivers/gpu/drm/i915/i915_gem_internal.c      |   4 +-
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 159 ++++++++++++++++++++++++++
 3 files changed, 163 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b84c1d1fa12c..65298edc0e9c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3506,7 +3506,7 @@ i915_gem_object_create_stolen_for_preallocated(struct 
drm_i915_private *dev_priv
 /* i915_gem_internal.c */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
-                               unsigned int size);
+                               unsigned long size);
 
 /* i915_gem_shrinker.c */
 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c 
b/drivers/gpu/drm/i915/i915_gem_internal.c
index 2222863e505f..92f66e959a9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -151,10 +151,12 @@ static const struct drm_i915_gem_object_ops 
i915_gem_object_internal_ops = {
  */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *i915,
-                               unsigned int size)
+                               unsigned long size)
 {
        struct drm_i915_gem_object *obj;
 
+       GEM_BUG_ON(!size);
+
        obj = i915_gem_object_alloc(i915);
        if (!obj)
                return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 97b55e9726d8..5d0e6f60bea7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -22,7 +22,10 @@
  *
  */
 
+#include <linux/prime_numbers.h>
+
 #include "i915_selftest.h"
+#include "huge_gem_object.h"
 
 static int igt_ppgtt_alloc(void *arg)
 {
@@ -85,10 +88,166 @@ static int igt_ppgtt_alloc(void *arg)
        return err;
 }
 
+static struct i915_vma *vma_lookup(struct drm_i915_gem_object *obj,
+                                  struct i915_address_space *vm)
+{
+       return i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+}
+
+static int igt_ppgtt_fill(void *arg)
+{
+       struct drm_i915_private *dev_priv = arg;
+       unsigned long npages, max_pages = 1 << 20, prime;
+       struct drm_i915_gem_object *obj, *on;
+       struct i915_hw_ppgtt *ppgtt;
+       struct i915_vma *vma;
+       LIST_HEAD(objects);
+       int err = 0;
+
+       if (!USES_FULL_PPGTT(dev_priv))
+               return 0;
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       ppgtt = i915_ppgtt_create(dev_priv, NULL, "mock");
+       if (IS_ERR(ppgtt)) {
+               err = PTR_ERR(ppgtt);
+               goto err_unlock;
+       }
+       GEM_BUG_ON(ppgtt->base.total & ~PAGE_MASK);
+
+       max_pages = min_t(u64, max_pages, ppgtt->base.total/2 >> PAGE_SHIFT);
+
+       for_each_prime_number_from(prime, 2, 13) {
+               for (npages = 1; npages <= max_pages; npages *= prime) {
+                       u64 flags;
+
+                       GEM_BUG_ON(!npages);
+                       obj = huge_gem_object(dev_priv,
+                                             PAGE_SIZE,
+                                             npages << PAGE_SHIFT);
+                       if (IS_ERR(obj))
+                               break;
+
+                       list_add(&obj->batch_pool_link, &objects);
+
+                       /* Fill the GTT top down - hope we don't overstep the 
end */
+                       flags = ppgtt->base.total | PIN_OFFSET_FIXED | PIN_USER;
+                       list_for_each_entry(obj, &objects, batch_pool_link) {
+                               vma = vma_lookup(obj, &ppgtt->base);
+                               if (IS_ERR(vma))
+                                       continue;
+
+                               flags -= obj->base.size;
+                               err = i915_vma_pin(vma, 0, 0, flags);
+                               if (err) {
+                                       pr_err("Fill top-down failed with 
err=%d on size=%lu pages (prime=%lu)\n", err, npages, prime);
+                                       goto err;
+                               }
+
+                               i915_vma_unpin(vma);
+                       }
+
+                       flags = ppgtt->base.total | PIN_OFFSET_FIXED | PIN_USER;
+                       list_for_each_entry(obj, &objects, batch_pool_link) {
+                               vma = vma_lookup(obj, &ppgtt->base);
+                               if (IS_ERR(vma))
+                                       continue;
+
+                               flags -= obj->base.size;
+                               if (!drm_mm_node_allocated(&vma->node) ||
+                                   i915_vma_misplaced(vma, 0, 0, flags)) {
+                                       pr_err("Fill top-down moved 
vma.node=%llx + %llx, expected offset %llx\n",
+                                              vma->node.start, vma->node.size,
+                                              flags & PAGE_MASK);
+                                       err = -EINVAL;
+                                       goto err;
+                               }
+
+                               err = i915_vma_unbind(vma);
+                               if (err) {
+                                       pr_err("Fill top-down unbind of 
vma.node=%llx + %llx failed with err=%d\n",
+                                              vma->node.start, vma->node.size,
+                                              err);
+                                       goto err;
+                               }
+                       }
+
+                       /* And again from the bottom */
+                       flags = PIN_OFFSET_FIXED | PIN_USER;
+                       list_for_each_entry(obj, &objects, batch_pool_link) {
+                               vma = vma_lookup(obj, &ppgtt->base);
+                               if (IS_ERR(vma))
+                                       continue;
+
+                               err = i915_vma_pin(vma, 0, 0, flags);
+                               if (err) {
+                                       pr_err("Fill bottom-up failed with 
err=%d on size=%lu pages (prime=%lu)\n", err, npages, prime);
+                                       goto err;
+                               }
+
+                               i915_vma_unpin(vma);
+                               flags += obj->base.size;
+                       }
+
+                       flags = PIN_OFFSET_FIXED | PIN_USER;
+                       list_for_each_entry(obj, &objects, batch_pool_link) {
+                               vma = vma_lookup(obj, &ppgtt->base);
+                               if (IS_ERR(vma))
+                                       continue;
+
+                               if (!drm_mm_node_allocated(&vma->node) ||
+                                   i915_vma_misplaced(vma, 0, 0, flags)) {
+                                       pr_err("Fill top-down moved 
vma.node=%llx + %llx, expected offset %llx\n",
+                                              vma->node.start, vma->node.size,
+                                              flags & PAGE_MASK);
+                                       err = -EINVAL;
+                                       goto err;
+                               }
+
+                               err = i915_vma_unbind(vma);
+                               if (err) {
+                                       pr_err("Fill top-down unbind of 
vma.node=%llx + %llx failed with err=%d\n",
+                                              vma->node.start, vma->node.size,
+                                              err);
+                                       goto err;
+                               }
+
+                               flags += obj->base.size;
+                       }
+               }
+
+               list_for_each_entry_safe(obj, on, &objects, batch_pool_link) {
+                       list_del(&obj->batch_pool_link);
+                       vma = vma_lookup(obj, &ppgtt->base);
+                       if (!IS_ERR(vma))
+                               i915_vma_close(vma);
+
+                       i915_gem_object_put(obj);
+               }
+       }
+err:
+
+       list_for_each_entry_safe(obj, on, &objects, batch_pool_link) {
+               list_del(&obj->batch_pool_link);
+               vma = vma_lookup(obj, &ppgtt->base);
+               if (!IS_ERR(vma))
+                       i915_vma_close(vma);
+
+               i915_gem_object_put(obj);
+       }
+
+       i915_ppgtt_close(&ppgtt->base);
+       i915_ppgtt_put(ppgtt);
+err_unlock:
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+       return err;
+}
+
 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_ppgtt_alloc),
+               SUBTEST(igt_ppgtt_fill),
        };
 
        return i915_subtests(tests, i915);
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to