Check that we can retrieve the right page for a random index, and that
we can map the whole object.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_object.c             |   1 +
 drivers/gpu/drm/i915/selftests/i915_gem_object.c   | 405 +++++++++++++++++++++
 .../gpu/drm/i915/selftests/i915_mock_selftests.h   |   1 +
 3 files changed, 407 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_object.c

diff --git a/drivers/gpu/drm/i915/i915_gem_object.c 
b/drivers/gpu/drm/i915/i915_gem_object.c
index f222980cee34..30a704ea7e3b 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -409,4 +409,5 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object 
*obj,
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/huge_gem_object.c"
+#include "selftests/i915_gem_object.c"
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
new file mode 100644
index 000000000000..1328332150f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#include "mock_gem_device.h"
+
+#define PFN_BIAS 0x1000
+
+static void fake_free_pages(struct drm_i915_gem_object *obj,
+                           struct sg_table *pages)
+{
+       sg_free_table(pages);
+       kfree(pages);
+}
+
+static struct sg_table *
+fake_get_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+       struct sg_table *pages;
+       struct scatterlist *sg;
+       struct rnd_state prng;
+       unsigned long pfn, rem;
+
+       prandom_seed_state(&prng, obj->scratch);
+
+       pages = kmalloc(sizeof(*pages), GFP);
+       if (!pages)
+               return ERR_PTR(-ENOMEM);
+
+       rem = obj->base.size >> PAGE_SHIFT;
+       if (sg_alloc_table(pages, obj->base.size >> PAGE_SHIFT, GFP)) {
+               kfree(pages);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       pfn = PFN_BIAS;
+       for (sg = pages->sgl; sg; sg = sg_next(sg)) {
+               unsigned int len = 1 + prandom_u32_state(&prng) % rem;
+
+               sg_set_page(sg, pfn_to_page(pfn), len * PAGE_SIZE, 0);
+
+               pfn += len;
+               rem -= len;
+               if (!rem) {
+                       sg_mark_end(sg);
+                       break;
+               }
+       }
+       GEM_BUG_ON(rem);
+
+       obj->mm.madv = I915_MADV_DONTNEED;
+       return pages;
+#undef GFP
+}
+
+static void fake_put_pages(struct drm_i915_gem_object *obj,
+                          struct sg_table *pages)
+{
+       fake_free_pages(obj, pages);
+       obj->mm.dirty = false;
+       obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+       .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+       .get_pages = fake_get_pages,
+       .put_pages = fake_put_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_object(struct drm_i915_private *i915, u64 size, u32 seed)
+{
+       struct drm_i915_gem_object *obj;
+
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+       if (overflows_type(size, obj->base.size))
+               return ERR_PTR(-E2BIG);
+
+       obj = i915_gem_object_alloc(i915);
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       drm_gem_private_object_init(&i915->drm, &obj->base, size);
+       i915_gem_object_init(obj, &fake_ops);
+
+       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->cache_level = I915_CACHE_NONE;
+
+       obj->scratch = seed;
+
+       return obj;
+}
+
+static unsigned int *order_forward(unsigned int count, struct rnd_state *prng)
+{
+       unsigned int *order;
+       unsigned int i;
+
+       order = kmalloc(sizeof(*order) * count, GFP_TEMPORARY);
+       if (!order)
+               return NULL;
+
+       for (i = 0; i < count; i++)
+               order[i] = i;
+
+       return order;
+}
+
+static unsigned int *order_backward(unsigned int count, struct rnd_state *prng)
+{
+       unsigned int *order;
+       unsigned int i;
+
+       order = kmalloc(sizeof(*order) * count, GFP_TEMPORARY);
+       if (!order)
+               return NULL;
+
+       for (i = 0; i < count; i++)
+               order[i] = count - i - 1;
+
+       return order;
+}
+
+static unsigned int *order_random(unsigned int count, struct rnd_state *prng)
+{
+       return i915_random_order(count, prng);
+}
+
+static int igt_gem_object_get_page(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       const struct phase {
+               const char *name;
+               unsigned int *(*order)(unsigned int, struct rnd_state *);
+               unsigned int flags;
+       } phases[] = {
+               { "forward", order_forward },
+               { "backward", order_backward },
+               { "random", order_random },
+               {}
+       }, *p;
+       I915_RND_STATE(prng);
+
+       for (p = phases; p->name; p++) {
+               const unsigned int npages = 1024;
+               struct drm_i915_gem_object *obj;
+               unsigned int *order;
+               unsigned int i;
+               int err;
+
+               order = p->order(npages, &prng);
+               if (!order)
+                       return -ENOMEM;
+
+               obj = fake_object(i915,
+                                 npages * PAGE_SIZE,
+                                 prandom_u32_state(&prng));
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       goto err_free;
+               }
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       goto err_put;
+
+               for (i = 0; i < npages; i++) {
+                       unsigned int idx = order[i];
+                       struct page *page;
+
+                       page = i915_gem_object_get_page(obj, idx);
+                       if (page_to_pfn(page) != idx + PFN_BIAS) {
+                               pr_err("object->page[%d:%d] lookup failed, 
direction %s, found pfn %lu, expected %u\n",
+                                      i, idx, p->name, page_to_pfn(page), idx 
+ PFN_BIAS);
+                               err = EINVAL;
+                               goto err_unpin;
+                       }
+               }
+
+err_unpin:
+               i915_gem_object_unpin_pages(obj);
+err_put:
+               i915_gem_object_put(obj);
+err_free:
+               kfree(order);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+#define FAULT BIT(0)
+
+static void map_free_pages(struct sg_table *st)
+{
+       struct scatterlist *sg;
+
+       for (sg = st->sgl; sg; sg = __sg_next(sg)) {
+               if (sg_page(sg))
+                       __free_pages(sg_page(sg), get_order(sg->length));
+       }
+
+       sg_free_table(st);
+       kfree(st);
+}
+
+static struct sg_table *
+map_get_pages(struct drm_i915_gem_object *obj)
+{
+       struct sg_table *pages;
+       struct scatterlist *sg;
+       unsigned int order;
+
+       if (obj->scratch & FAULT)
+               return ERR_PTR(-EFAULT);
+
+       pages = kmalloc(sizeof(*pages), GFP_KERNEL);
+       if (!pages)
+               return ERR_PTR(-ENOMEM);
+
+       if (sg_alloc_table(pages, MAX_ORDER, GFP_KERNEL)) {
+               kfree(pages);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       sg = pages->sgl;
+       for (order = 0; order < MAX_ORDER; order++) {
+               struct page *page;
+               unsigned int *vaddr;
+               unsigned int n;
+
+               page = alloc_pages(GFP_KERNEL, order);
+               if (!page) {
+                       sg_set_page(sg, NULL, 0, 0);
+                       sg_mark_end(sg);
+                       map_free_pages(pages);
+                       kfree(pages);
+                       return ERR_PTR(-ENOMEM);
+               }
+
+               vaddr = kmap(page);
+               for (n = 0; n < 1 << order; n++)
+                       vaddr[n * PAGE_SIZE / sizeof(*vaddr)] = order;
+               kunmap(page);
+
+               sg_set_page(sg, page, PAGE_SIZE << order, 0);
+               sg = sg_next(sg);
+       }
+       GEM_BUG_ON(sg);
+
+       obj->mm.madv = I915_MADV_DONTNEED;
+       return pages;
+#undef GFP
+}
+
+static void map_put_pages(struct drm_i915_gem_object *obj,
+                         struct sg_table *pages)
+{
+       map_free_pages(pages);
+       obj->mm.dirty = false;
+       obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops map_ops = {
+       .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+       .get_pages = map_get_pages,
+       .put_pages = map_put_pages,
+};
+
+static struct drm_i915_gem_object *
+map_object(struct drm_i915_private *i915,
+          unsigned int flags)
+{
+       struct drm_i915_gem_object *obj;
+       unsigned int n, size;
+
+       obj = i915_gem_object_alloc(i915);
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       size = 0;
+       for (n = 0; n < MAX_ORDER; n++) /* lazy! */
+               size += PAGE_SIZE << n;
+
+       drm_gem_private_object_init(&i915->drm, &obj->base, size);
+       i915_gem_object_init(obj, &map_ops);
+
+       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->cache_level = I915_CACHE_NONE;
+
+       obj->scratch = flags;
+
+       return obj;
+}
+
+static int igt_gem_object_pin_map(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct drm_i915_gem_object *obj;
+       const struct {
+               const char *name;
+               unsigned int flags;
+       } phases[] = {
+               { "sync" },
+               { "sync-fault", FAULT },
+               { "sync-after-fault" },
+               {},
+       }, *p;
+       unsigned int *vaddr;
+       unsigned int order;
+       int err = 0;
+
+       for (p = phases; p->name; p++) {
+               obj = map_object(i915, p->flags);
+               if (IS_ERR(obj))
+                       return PTR_ERR(obj);
+
+               vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+
+               if (p->flags & FAULT) {
+                       if (vaddr != ERR_PTR(-EFAULT)) {
+                               pr_err("Expected fault injection!\n");
+                               err = -EINVAL;
+                       }
+                       goto err;
+               }
+
+               if (IS_ERR(vaddr)) {
+                       err = PTR_ERR(vaddr);
+                       goto err;
+               }
+
+               for (order = 0; order < MAX_ORDER; order++) {
+                       unsigned int n;
+
+                       for (n = 0; n < 1 << order; n++) {
+                               if (vaddr[n * PAGE_SIZE / sizeof(*vaddr)] != 
order) {
+                                       pr_err("invalid mapping at order %d, 
page %d: found %d\n",
+                                                       order, n, vaddr[n * 
PAGE_SIZE / sizeof(*vaddr)]);
+                                       err = -EINVAL;
+                                       goto err_unmap;
+                               }
+                       }
+
+                       vaddr += (PAGE_SIZE << order) / sizeof(*vaddr);
+               }
+
+err_unmap:
+               i915_gem_object_unpin_map(obj);
+err:
+               i915_gem_object_put(obj);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+int i915_gem_object_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_gem_object_get_page),
+               SUBTEST(igt_gem_object_pin_map),
+       };
+       struct drm_i915_private *i915;
+       int err;
+
+       i915 = mock_gem_device();
+       if (!i915)
+               return -ENOMEM;
+
+       err = i915_subtests(tests, i915);
+
+       drm_dev_unref(&i915->drm);
+       return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h 
b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 3c862db41c2c..5cae61db5b56 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -14,6 +14,7 @@ selftest(uncore, intel_uncore_mock_selftests)
 selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
 selftest(requests, i915_gem_request_mock_selftests)
 selftest(gem, i915_gem_mock_selftests)
+selftest(object, i915_gem_object_mock_selftests)
 selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
 selftest(vma, i915_vma_mock_selftests)
 selftest(evict, i915_gem_evict_mock_selftests)
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to