Signed-off-by: Matthew Auld <matthew.a...@intel.com>
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 121 +++++++++++++++++-
 1 file changed, 120 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index c36cef61ce3c..4bac15363020 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -9,6 +9,7 @@
 #include "i915_selftest.h"
 
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_pm.h"
 
 #include "gt/intel_gt.h"
@@ -970,7 +971,7 @@ static int gpu_write(struct i915_vma *vma,
                               vma->size >> PAGE_SHIFT, val);
 }
 
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
 {
        unsigned int needs_flush;
        unsigned long n;
@@ -1002,6 +1003,51 @@ static int cpu_check(struct drm_i915_gem_object *obj, 
u32 dword, u32 val)
        return err;
 }
 
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
+{
+       unsigned long n;
+       int err;
+
+       i915_gem_object_lock(obj);
+       err = i915_gem_object_set_to_wc_domain(obj, false);
+       i915_gem_object_unlock(obj);
+       if (err)
+               return err;
+
+       err = i915_gem_object_pin_pages(obj);
+       if (err)
+               return err;
+
+       for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+               u32 __iomem *base;
+               u32 read_val;
+
+               base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+               read_val = ioread32(base + dword);
+               io_mapping_unmap_atomic(base);
+               if (read_val != val) {
+                       pr_err("n=%lu base[%u]=%u, val=%u\n",
+                              n, dword, read_val, val);
+                       err = -EINVAL;
+                       break;
+               }
+       }
+
+       i915_gem_object_unpin_pages(obj);
+       return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+       if (i915_gem_object_has_struct_page(obj))
+               return __cpu_check_shmem(obj, dword, val);
+       else if (i915_gem_object_is_lmem(obj))
+               return __cpu_check_lmem(obj, dword, val);
+
+       return -ENODEV;
+}
+
 static int __igt_write_huge(struct i915_gem_context *ctx,
                            struct intel_engine_cs *engine,
                            struct drm_i915_gem_object *obj,
@@ -1382,6 +1428,78 @@ static int igt_ppgtt_gemfs_huge(void *arg)
        return err;
 }
 
+static int igt_ppgtt_lmem_huge(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_object *obj;
+       static const unsigned int sizes[] = {
+               SZ_64K,
+               SZ_512K,
+               SZ_1M,
+               SZ_2M,
+       };
+       int i;
+       int err;
+
+       if (!HAS_LMEM(i915)) {
+               pr_info("device lacks LMEM support, skipping\n");
+               return 0;
+       }
+
+       /*
+        * Sanity check that the HW uses huge pages correctly through LMEM
+        * -- ensure that our writes land in the right place.
+        */
+
+       for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+               unsigned int size = sizes[i];
+
+               obj = i915_gem_object_create_lmem(i915, size, 
I915_BO_ALLOC_CONTIGUOUS);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       if (err == -E2BIG) {
+                               pr_info("object too big for region!\n");
+                               return 0;
+                       }
+
+                       return err;
+               }
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       goto out_put;
+
+               if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+                       pr_info("LMEM unable to allocate huge-page(s) with 
size=%u\n",
+                               size);
+                       goto out_unpin;
+               }
+
+               err = igt_write_huge(ctx, obj);
+               if (err) {
+                       pr_err("LMEM write-huge failed with size=%u\n", size);
+                       goto out_unpin;
+               }
+
+               i915_gem_object_unpin_pages(obj);
+               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+               i915_gem_object_put(obj);
+       }
+
+       return 0;
+
+out_unpin:
+       i915_gem_object_unpin_pages(obj);
+out_put:
+       i915_gem_object_put(obj);
+
+       if (err == -ENOMEM)
+               err = 0;
+
+       return err;
+}
+
 static int igt_ppgtt_pin_update(void *arg)
 {
        struct i915_gem_context *ctx = arg;
@@ -1732,6 +1850,7 @@ int i915_gem_huge_page_live_selftests(struct 
drm_i915_private *i915)
                SUBTEST(igt_ppgtt_exhaust_huge),
                SUBTEST(igt_ppgtt_gemfs_huge),
                SUBTEST(igt_ppgtt_internal_huge),
+               SUBTEST(igt_ppgtt_lmem_huge),
        };
        struct drm_file *file;
        struct i915_gem_context *ctx;
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to