[Intel-gfx] [PATCH 6/6] drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests

2019-10-18 Thread Matthew Auld
Ditch the dubious static list of sizes to enumerate, in favour of
choosing a random size within the limits of each backing store. With
repeated CI runs this should give us a wider range of object sizes, and
in turn more page-size combinations, while using less machine time.

Signed-off-by: Matthew Auld 
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 198 +-
 1 file changed, 94 insertions(+), 104 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index d4892769b739..3f7ac4473f33 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1314,20 +1314,33 @@ static int igt_ppgtt_exhaust_huge(void *arg)
return err;
 }
 
+static u32 igt_random_size(struct rnd_state *prng,
+  u32 min_page_size,
+  u32 max_page_size)
+{
+   u32 size;
+   u32 mask;
+
+   GEM_BUG_ON(!is_power_of_2(min_page_size));
+   GEM_BUG_ON(!is_power_of_2(max_page_size));
+   GEM_BUG_ON(min_page_size < PAGE_SIZE);
+   GEM_BUG_ON(min_page_size > max_page_size);
+
+   mask = GENMASK_ULL(ilog2(max_page_size), PAGE_SHIFT);
+   size = prandom_u32_state(prng) & mask;
+   if (size < min_page_size)
+   size |= min_page_size;
+
+   return size;
+}
+
 static int igt_ppgtt_internal_huge(void *arg)
 {
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
-   static const unsigned int sizes[] = {
-   SZ_64K,
-   SZ_128K,
-   SZ_256K,
-   SZ_512K,
-   SZ_1M,
-   SZ_2M,
-   };
-   int i;
+   I915_RND_STATE(prng);
+   u32 size;
int err;
 
/*
@@ -1335,42 +1348,36 @@ static int igt_ppgtt_internal_huge(void *arg)
 * -- ensure that our writes land in the right place.
 */
 
-   for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
-   unsigned int size = sizes[i];
-
-   obj = i915_gem_object_create_internal(i915, size);
-   if (IS_ERR(obj))
-   return PTR_ERR(obj);
+   size = igt_random_size(, SZ_64K, SZ_2M);
 
-   err = i915_gem_object_pin_pages(obj);
-   if (err)
-   goto out_put;
-
-   if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
-   pr_info("internal unable to allocate huge-page(s) with 
size=%u\n",
-   size);
-   goto out_unpin;
-   }
+   obj = i915_gem_object_create_internal(i915, size);
+   if (IS_ERR(obj))
+   return PTR_ERR(obj);
 
-   err = igt_write_huge(ctx, obj);
-   if (err) {
-   pr_err("internal write-huge failed with size=%u\n",
-  size);
-   goto out_unpin;
-   }
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   goto out_put;
 
-   i915_gem_object_unpin_pages(obj);
-   __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-   i915_gem_object_put(obj);
+   if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+   pr_info("%s unable to allocate huge-page(s) with size=%u\n",
+   __func__, size);
+   err = -ENOMEM;
+   goto out_unpin;
}
 
-   return 0;
+   err = igt_write_huge(ctx, obj);
+   if (err)
+   pr_err("%s write-huge failed with size=%u\n", __func__, size);
 
 out_unpin:
i915_gem_object_unpin_pages(obj);
+   __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 out_put:
i915_gem_object_put(obj);
 
+   if (err == -ENOMEM)
+   err = 0;
+
return err;
 }
 
@@ -1384,14 +1391,8 @@ static int igt_ppgtt_gemfs_huge(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
-   static const unsigned int sizes[] = {
-   SZ_2M,
-   SZ_4M,
-   SZ_8M,
-   SZ_16M,
-   SZ_32M,
-   };
-   int i;
+   I915_RND_STATE(prng);
+   u32 size;
int err;
 
/*
@@ -1400,46 +1401,40 @@ static int igt_ppgtt_gemfs_huge(void *arg)
 */
 
if (!igt_can_allocate_thp(i915)) {
-   pr_info("missing THP support, skipping\n");
+   pr_info("%s missing THP support, skipping\n", __func__);
return 0;
}
 
-   for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
-   unsigned int size = sizes[i];
-
-   obj = i915_gem_object_create_shmem(i915, size);
- 

[Intel-gfx] [PATCH 5/6] drm/i915/selftests: extend coverage to include LMEM huge-pages

2019-10-18 Thread Matthew Auld
Add LMEM objects to list of backends we test for huge-GTT-pages.

Signed-off-by: Matthew Auld 
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 121 +-
 1 file changed, 120 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index f27772f6779a..d4892769b739 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -9,6 +9,7 @@
 #include "i915_selftest.h"
 
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_pm.h"
 
 #include "gt/intel_gt.h"
@@ -981,7 +982,7 @@ static int gpu_write(struct intel_context *ce,
   vma->size >> PAGE_SHIFT, val);
 }
 
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
 {
unsigned int needs_flush;
unsigned long n;
@@ -1013,6 +1014,51 @@ static int cpu_check(struct drm_i915_gem_object *obj, 
u32 dword, u32 val)
return err;
 }
 
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
+{
+   unsigned long n;
+   int err;
+
+   i915_gem_object_lock(obj);
+   err = i915_gem_object_set_to_wc_domain(obj, false);
+   i915_gem_object_unlock(obj);
+   if (err)
+   return err;
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   return err;
+
+   for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+   u32 __iomem *base;
+   u32 read_val;
+
+   base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+   read_val = ioread32(base + dword);
+   io_mapping_unmap_atomic(base);
+   if (read_val != val) {
+   pr_err("n=%lu base[%u]=%u, val=%u\n",
+  n, dword, read_val, val);
+   err = -EINVAL;
+   break;
+   }
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+   if (i915_gem_object_has_struct_page(obj))
+   return __cpu_check_shmem(obj, dword, val);
+   else if (i915_gem_object_is_lmem(obj))
+   return __cpu_check_lmem(obj, dword, val);
+
+   return -ENODEV;
+}
+
 static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
@@ -1397,6 +1443,78 @@ static int igt_ppgtt_gemfs_huge(void *arg)
return err;
 }
 
+static int igt_ppgtt_lmem_huge(void *arg)
+{
+   struct i915_gem_context *ctx = arg;
+   struct drm_i915_private *i915 = ctx->i915;
+   struct drm_i915_gem_object *obj;
+   static const unsigned int sizes[] = {
+   SZ_64K,
+   SZ_512K,
+   SZ_1M,
+   SZ_2M,
+   };
+   int i;
+   int err;
+
+   if (!HAS_LMEM(i915)) {
+   pr_info("device lacks LMEM support, skipping\n");
+   return 0;
+   }
+
+   /*
+* Sanity check that the HW uses huge pages correctly through LMEM
+* -- ensure that our writes land in the right place.
+*/
+
+   for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+   unsigned int size = sizes[i];
+
+   obj = i915_gem_object_create_lmem(i915, size, 
I915_BO_ALLOC_CONTIGUOUS);
+   if (IS_ERR(obj)) {
+   err = PTR_ERR(obj);
+   if (err == -E2BIG) {
+   pr_info("object too big for region!\n");
+   return 0;
+   }
+
+   return err;
+   }
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   goto out_put;
+
+   if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+   pr_info("LMEM unable to allocate huge-page(s) with 
size=%u\n",
+   size);
+   goto out_unpin;
+   }
+
+   err = igt_write_huge(ctx, obj);
+   if (err) {
+   pr_err("LMEM write-huge failed with size=%u\n", size);
+   goto out_unpin;
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+   i915_gem_object_put(obj);
+   }
+
+   return 0;
+
+out_unpin:
+   i915_gem_object_unpin_pages(obj);
+out_put:
+   i915_gem_object_put(obj);
+
+   if (err == -ENOMEM)
+   err = 0;
+
+   

[Intel-gfx] [PATCH 2/6] drm/i915: setup io-mapping for LMEM

2019-10-18 Thread Matthew Auld
From: Abdiel Janulgue 

Signed-off-by: Abdiel Janulgue 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/intel_region_lmem.c | 28 ++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c 
b/drivers/gpu/drm/i915/intel_region_lmem.c
index 7a3f96e1f766..051069664074 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -36,8 +36,32 @@ lmem_create_object(struct intel_memory_region *mem,
return obj;
 }
 
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+   io_mapping_fini(>iomap);
+   intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+   int ret;
+
+   if (!io_mapping_init_wc(>iomap,
+   mem->io_start,
+   resource_size(>region)))
+   return -EIO;
+
+   ret = intel_memory_region_init_buddy(mem);
+   if (ret)
+   io_mapping_fini(>iomap);
+
+   return ret;
+}
+
 const struct intel_memory_region_ops intel_region_lmem_ops = {
-   .init = intel_memory_region_init_buddy,
-   .release = intel_memory_region_release_buddy,
+   .init = region_lmem_init,
+   .release = region_lmem_release,
.create_object = lmem_create_object,
 };
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 4/6] drm/i915/selftests: add write-dword test for LMEM

2019-10-18 Thread Matthew Auld
Simple test writing to dwords across an object, using various engines in
a randomized order, checking that our writes land from the cpu.

Signed-off-by: Matthew Auld 
---
 .../drm/i915/selftests/intel_memory_region.c  | 167 ++
 1 file changed, 167 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c 
b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 292489371842..8a2d1bded47b 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -7,13 +7,16 @@
 
 #include "../i915_selftest.h"
 
+
 #include "mock_drm.h"
 #include "mock_gem_device.h"
 #include "mock_region.h"
 
+#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
 #include "selftests/igt_flush_test.h"
@@ -256,6 +259,125 @@ static int igt_mock_contiguous(void *arg)
return err;
 }
 
+static int igt_gpu_write_dw(struct intel_context *ce,
+   struct i915_vma *vma,
+   u32 dword,
+   u32 value)
+{
+   return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+  vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+   unsigned long n;
+   int err;
+
+   i915_gem_object_lock(obj);
+   err = i915_gem_object_set_to_wc_domain(obj, false);
+   i915_gem_object_unlock(obj);
+   if (err)
+   return err;
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   return err;
+
+   for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+   u32 __iomem *base;
+   u32 read_val;
+
+   base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+   read_val = ioread32(base + dword);
+   io_mapping_unmap_atomic(base);
+   if (read_val != val) {
+   pr_err("n=%lu base[%u]=%u, val=%u\n",
+  n, dword, read_val, val);
+   err = -EINVAL;
+   break;
+   }
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+struct drm_i915_gem_object *obj)
+{
+   struct drm_i915_private *i915 = ctx->i915;
+   struct i915_address_space *vm = ctx->vm ?: >ggtt.vm;
+   struct i915_gem_engines *engines;
+   struct i915_gem_engines_iter it;
+   struct intel_context *ce;
+   I915_RND_STATE(prng);
+   IGT_TIMEOUT(end_time);
+   unsigned int count;
+   struct i915_vma *vma;
+   int *order;
+   int i, n;
+   int err = 0;
+
+   GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+   n = 0;
+   count = 0;
+   for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+   count++;
+   if (!intel_engine_can_store_dword(ce->engine))
+   continue;
+
+   n++;
+   }
+   i915_gem_context_unlock_engines(ctx);
+   if (!n)
+   return 0;
+
+   order = i915_random_order(count * count, );
+   if (!order)
+   return -ENOMEM;
+
+   vma = i915_vma_instance(obj, vm, NULL);
+   if (IS_ERR(vma)) {
+   err = PTR_ERR(vma);
+   goto out_free;
+   }
+
+   err = i915_vma_pin(vma, 0, 0, PIN_USER);
+   if (err)
+   goto out_free;
+
+   i = 0;
+   engines = i915_gem_context_lock_engines(ctx);
+   do {
+   u32 rng = prandom_u32_state();
+   u32 dword = offset_in_page(rng) / 4;
+
+   ce = engines->engines[order[i] % engines->num_engines];
+   i = (i + 1) % (count * count);
+   if (!ce || !intel_engine_can_store_dword(ce->engine))
+   continue;
+
+   err = igt_gpu_write_dw(ce, vma, dword, rng);
+   if (err)
+   break;
+
+   err = igt_cpu_check(obj, dword, rng);
+   if (err)
+   break;
+   } while (!__igt_timeout(end_time, NULL));
+   i915_gem_context_unlock_engines(ctx);
+
+out_free:
+   kfree(order);
+
+   if (err == -ENOMEM)
+   err = 0;
+
+   return err;
+}
+
 static int igt_lmem_create(void *arg)
 {
struct drm_i915_private *i915 = arg;
@@ -277,6 +399,50 @@ static int igt_lmem_create(void *arg)
return err;
 }
 
+static int igt_lmem_write_gpu(void *arg)
+{
+   struct drm_i915_private *i915 = arg;

[Intel-gfx] [PATCH 1/6] drm/i915: support creating LMEM objects

2019-10-18 Thread Matthew Auld
We currently define LMEM, or local memory, as just another memory
region, like system memory or stolen, which we can expose to userspace
and can be mapped to the CPU via some BAR.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/Makefile |  2 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  | 29 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  | 23 ++
 drivers/gpu/drm/i915/i915_drv.h   |  3 ++
 drivers/gpu/drm/i915/intel_region_lmem.c  | 43 +++
 drivers/gpu/drm/i915/intel_region_lmem.h  | 11 +
 .../drm/i915/selftests/i915_live_selftests.h  |  1 +
 .../drm/i915/selftests/intel_memory_region.c  | 40 +
 8 files changed, 152 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.h
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.c
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e791d9323b51..02e65f3bd35c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -118,6 +118,7 @@ gem-y += \
gem/i915_gem_internal.o \
gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \
+   gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
@@ -146,6 +147,7 @@ i915-y += \
  i915_scheduler.o \
  i915_trace_points.o \
  i915_vma.o \
+ intel_region_lmem.o \
  intel_wopcm.o
 
 # general-purpose microcontroller (GuC) support
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index ..a53e4463570d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+   .get_pages = i915_gem_object_get_pages_buddy,
+   .put_pages = i915_gem_object_put_pages_buddy,
+   .release = i915_gem_object_release_memory_region,
+};
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+   return obj->ops == _gem_lmem_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+   resource_size_t size,
+   unsigned int flags)
+{
+   return 
i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+size, flags);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index ..ebc15fe24f58
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include 
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+   resource_size_t size,
+   unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index db3c3a025a03..7e72eb832ba2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -101,6 +101,8 @@
 #include "i915_vma.h"
 #include "i915_irq.h"
 
+#include "intel_region_lmem.h"
+
 #include "intel_gvt.h"
 
 /* General customization:
@@ -1786,6 +1788,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
 
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c 
b/drivers/gpu/drm/i915/intel_region_lmem.c
new file mode 100644
index ..7a3f96e1f766
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "intel_region_lmem.h"
+
+static struct drm_i915_gem_object *
+lmem_create_objec

[Intel-gfx] [PATCH 3/6] drm/i915/lmem: support kernel mapping

2019-10-18 Thread Matthew Auld
From: Abdiel Janulgue 

We can create LMEM objects, but we also need to support mapping them
into kernel space for internal use.

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Signed-off-by: Steve Hampson 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  |  36 ++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  |   8 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c |  22 +++-
 .../drm/i915/selftests/intel_memory_region.c  | 113 ++
 5 files changed, 180 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index a53e4463570d..dd69c00434f3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -9,11 +9,47 @@
 #include "i915_drv.h"
 
 const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+   .flags = I915_GEM_OBJECT_HAS_IOMEM,
+
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,
 };
 
+/* XXX: Time to vfunc your life up? */
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+  unsigned long n)
+{
+   resource_size_t offset;
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_wc(>mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct 
drm_i915_gem_object *obj,
+ unsigned long n)
+{
+   resource_size_t offset;
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_atomic_wc(>mm.region->iomap, offset);
+}
+
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+   resource_size_t offset;
+
+   GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_wc(>mm.region->iomap, offset, size);
+}
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 {
return obj->ops == _gem_lmem_obj_ops;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index ebc15fe24f58..31a6462bdbb6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -13,6 +13,14 @@ struct drm_i915_gem_object;
 
 extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
 
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+  unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+   unsigned long n);
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index a387e3ee728b..96008374a412 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -31,10 +31,11 @@ struct i915_lut_handle {
 struct drm_i915_gem_object_ops {
unsigned int flags;
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGEBIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY   BIT(2)
-#define I915_GEM_OBJECT_NO_GGTTBIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM  BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY   BIT(3)
+#define I915_GEM_OBJECT_NO_GGTTBIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(5)
 
/* Interface between the GEM object and its backing storage.
 * get_pages() is called once prior to the use of the associated set
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b0ec0959c13f..cf7f5a3cb210 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 struct sg_table *pages,
@@ -172,7 +173,9 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object 
*obj)
void *ptr;
 
ptr = p

Re: [Intel-gfx] [PATCH] drm/i915: Split memory_region initialisation into its own file

2019-10-18 Thread Matthew Auld
On Fri, 18 Oct 2019 at 13:48, Chris Wilson  wrote:
>
> Pull the memory region bookkeeping into its file. Let's start clean and
> see how long it lasts!
>
> Signed-off-by: Chris Wilson 
> Cc: Matthew Auld 

Much better,
Reviewed-by: Matthew Auld 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v3 1/3] drm/i915: enumerate and init each supported region

2019-10-18 Thread Matthew Auld
From: Abdiel Janulgue 

Nothing to enumerate yet...

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Reviewed-by: Chris Wilson 
---
 drivers/gpu/drm/i915/i915_drv.h   |  7 +++
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 56 ---
 drivers/gpu/drm/i915/intel_device_info.h  |  2 +
 drivers/gpu/drm/i915/intel_memory_region.h| 16 ++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  6 ++
 5 files changed, 80 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 40e923b0c2c8..faf645802710 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -684,6 +684,8 @@ struct i915_gem_mm {
 */
struct vfsmount *gemfs;
 
+   struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
+
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -1783,6 +1785,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
 /* Having GuC is not the same as using GuC */
@@ -2001,6 +2005,9 @@ int __must_check i915_gem_evict_for_node(struct 
i915_address_space *vm,
 unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm);
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915);
+int i915_gem_init_memory_regions(struct drm_i915_private *i915);
+
 /* i915_gem_internal.c */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7b15bb891970..7b8e30c72f86 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2744,6 +2744,52 @@ int i915_init_ggtt(struct drm_i915_private *i915)
return 0;
 }
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915)
+{
+   int i;
+
+   i915_gem_cleanup_stolen(i915);
+
+   for (i = 0; i < INTEL_REGION_UNKNOWN; i++) {
+   struct intel_memory_region *region = i915->mm.regions[i];
+
+   if (region)
+   intel_memory_region_put(region);
+   }
+}
+
+int i915_gem_init_memory_regions(struct drm_i915_private *i915)
+{
+   int err, i;
+
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   /* XXX: stolen will become a region at some point */
+   err = i915_gem_init_stolen(i915);
+   if (err)
+   return err;
+
+   for (i = 0; i < INTEL_REGION_UNKNOWN; i++) {
+   struct intel_memory_region *mem = ERR_PTR(-ENODEV);
+
+   if (!HAS_REGION(i915, BIT(i)))
+   continue;
+
+   if (IS_ERR(mem)) {
+   err = PTR_ERR(mem);
+   goto out_cleanup;
+   }
+   }
+
+   return 0;
+
+out_cleanup:
+   i915_gem_cleanup_memory_regions(i915);
+   return err;
+}
+
 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 {
struct i915_vma *vma, *vn;
@@ -2781,6 +2827,8 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
 {
struct pagevec *pvec;
 
+   i915_gem_cleanup_memory_regions(i915);
+
fini_aliasing_ppgtt(>ggtt);
 
ggtt_cleanup_hw(>ggtt);
@@ -2790,8 +2838,6 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
-
-   i915_gem_cleanup_stolen(i915);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3240,11 +3286,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
 
-   /*
-* Initialise stolen early so that we may reserve preallocated
-* objects for the BIOS to KMS transition.
-*/
-   ret = i915_gem_init_stolen(dev_priv);
+   ret = i915_gem_init_memory_regions(dev_priv);
if (ret)
goto out_gtt_cleanup;
 
diff --git a/drivers/gpu/drm/i915/intel_device_info.h 
b/drivers/gpu/drm/i915/intel_device_info.h
index 0cdc2465534b..e9940f932d26 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -160,6 +160,8 @@ struct intel_device_info {
 
unsigned int page_sizes; /* page sizes supported by the HW */
 
+   u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
 
u8 pipe_mask;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h 
b/drivers/gpu/drm/i915/intel_memory_region.

[Intel-gfx] [PATCH v3 3/3] drm/i915: treat stolen as a region

2019-10-18 Thread Matthew Auld
Convert stolen memory over to a region object. Still leaves open the
question with what to do with pre-allocated objects...

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
Reviewed-by: Chris Wilson 
---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 65 +++---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h |  3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c| 14 +
 drivers/gpu/drm/i915/i915_pci.c|  2 +-
 drivers/gpu/drm/i915/intel_memory_region.c |  1 +
 drivers/gpu/drm/i915/intel_memory_region.h |  3 +
 6 files changed, 65 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index c76260ce13e3..57cd8bc2657c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
 
@@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private 
*dev_priv,
return 0;
 }
 
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
 {
if (!drm_mm_initialized(_priv->mm.stolen))
return;
@@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private 
*i915,
}
 }
 
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 {
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -539,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object 
*obj)
 
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
+
+   if (obj->mm.region)
+   i915_gem_object_release_memory_region(obj);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -548,8 +552,9 @@ static const struct drm_i915_gem_object_ops 
i915_gem_object_stolen_ops = {
 };
 
 static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
-  struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+   struct drm_mm_node *stolen,
+   struct intel_memory_region *mem)
 {
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -571,6 +576,9 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
if (err)
goto cleanup;
 
+   if (mem)
+   i915_gem_object_init_memory_region(obj, mem, 0);
+
return obj;
 
 cleanup:
@@ -579,10 +587,12 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return ERR_PTR(err);
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+  resource_size_t size,
+  unsigned int flags)
 {
+   struct drm_i915_private *dev_priv = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -603,7 +613,7 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
goto err_free;
}
 
-   obj = _i915_gem_object_create_stolen(dev_priv, stolen);
+   obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
if (IS_ERR(obj))
goto err_remove;
 
@@ -616,6 +626,43 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return obj;
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
+{
+   return 
i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN],
+size, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   return i915_gem_init_stolen(mem->i915);
+}
+
+static void release_stolen(struct intel_memory_region *mem)
+{
+   i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_ops = {
+   .init = init_stolen,
+   .release = release_stolen,
+   .create_object = _i915_gem_object_create_stolen,
+};
+
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private 
*i915)
+{
+   return intel_memory_region_create(i915,
+ intel_graphics_stolen_res.start,
+   

[Intel-gfx] [PATCH v3 2/3] drm/i915: treat shmem as a region

2019-10-18 Thread Matthew Auld
Convert shmem to an intel_memory_region.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
Reviewed-by: Chris Wilson 
---
 drivers/gpu/drm/i915/gem/i915_gem_object.h|  5 +-
 drivers/gpu/drm/i915/gem/i915_gem_phys.c  |  5 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.c|  7 +-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 79 ++-
 drivers/gpu/drm/i915/i915_drv.h   |  2 +
 drivers/gpu/drm/i915/i915_gem.c   |  9 ---
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 15 
 drivers/gpu/drm/i915/i915_pci.c   | 29 +--
 drivers/gpu/drm/i915/intel_memory_region.c|  8 ++
 drivers/gpu/drm/i915/intel_memory_region.h| 13 ++-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  7 +-
 11 files changed, 131 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index c5e14c9c805c..85921796851f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -25,10 +25,11 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
  const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+resource_size_t size);
 struct drm_i915_gem_object *
 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
-  const void *data, size_t size);
+  const void *data, resource_size_t size);
 
 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c 
b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 768356908160..8043ff63d73f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,6 +16,7 @@
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_object.h"
+#include "i915_gem_region.h"
 #include "i915_scatterlist.h"
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object 
*obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
 
-   if (!IS_ERR_OR_NULL(pages))
+   if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+   i915_gem_object_release_memory_region(obj);
+   }
mutex_unlock(>mm.lock);
return 0;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index d3f7733bc7ed..2f7bcfb9c964 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -6,6 +6,7 @@
 #include "intel_memory_region.h"
 #include "i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
 
 void
 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
@@ -165,5 +166,9 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
 
-   return mem->ops->create_object(mem, size, flags);
+   obj = mem->ops->create_object(mem, size, flags);
+   if (!IS_ERR(obj))
+   trace_i915_gem_object_create(obj);
+
+   return obj;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4c4954e8ce0a..be68b76e13b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,7 +7,9 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_gemfs.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 #include "i915_trace.h"
@@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
 static int shmem_get_pages(struct drm_i915_gem_object *obj)
 {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+   struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
 * If there's no chance of allocating enough pages for the whole
 * object, bail early.
 */
-   if (page_count > totalram_pages())
+   if (obj->base.size > resource_size(>region))
re

[Intel-gfx] [PATCH v2 2/3] drm/i915: treat shmem as a region

2019-10-18 Thread Matthew Auld
Convert shmem to an intel_memory_region.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_object.h|  5 +-
 drivers/gpu/drm/i915/gem/i915_gem_phys.c  |  5 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.c|  7 +-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 79 ++-
 drivers/gpu/drm/i915/i915_drv.h   |  2 +
 drivers/gpu/drm/i915/i915_gem.c   |  9 ---
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 15 
 drivers/gpu/drm/i915/i915_pci.c   | 29 +--
 drivers/gpu/drm/i915/intel_memory_region.c| 10 +++
 drivers/gpu/drm/i915/intel_memory_region.h| 28 ++-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  7 +-
 11 files changed, 149 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index c5e14c9c805c..85921796851f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -25,10 +25,11 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
  const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+resource_size_t size);
 struct drm_i915_gem_object *
 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
-  const void *data, size_t size);
+  const void *data, resource_size_t size);
 
 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c 
b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 768356908160..8043ff63d73f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,6 +16,7 @@
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_object.h"
+#include "i915_gem_region.h"
 #include "i915_scatterlist.h"
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object 
*obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
 
-   if (!IS_ERR_OR_NULL(pages))
+   if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+   i915_gem_object_release_memory_region(obj);
+   }
mutex_unlock(>mm.lock);
return 0;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index d3f7733bc7ed..2f7bcfb9c964 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -6,6 +6,7 @@
 #include "intel_memory_region.h"
 #include "i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
 
 void
 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
@@ -165,5 +166,9 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
 
-   return mem->ops->create_object(mem, size, flags);
+   obj = mem->ops->create_object(mem, size, flags);
+   if (!IS_ERR(obj))
+   trace_i915_gem_object_create(obj);
+
+   return obj;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4c4954e8ce0a..be68b76e13b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,7 +7,9 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_gemfs.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 #include "i915_trace.h"
@@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
 static int shmem_get_pages(struct drm_i915_gem_object *obj)
 {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+   struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
 * If there's no chance of allocating enough pages for the whole
 * object, bail early.
 */
-   if (page_count > totalram_pages())
+   if (obj->base.size > resource_size(>region))
re

[Intel-gfx] [PATCH v2 3/3] drm/i915: treat stolen as a region

2019-10-18 Thread Matthew Auld
Convert stolen memory over to a region object. Still leaves open the
question with what to do with pre-allocated objects...

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 65 +++---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h |  3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c| 14 +
 drivers/gpu/drm/i915/i915_pci.c|  2 +-
 4 files changed, 61 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index c76260ce13e3..57cd8bc2657c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
 
@@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private 
*dev_priv,
return 0;
 }
 
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
 {
if (!drm_mm_initialized(_priv->mm.stolen))
return;
@@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private 
*i915,
}
 }
 
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 {
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -539,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object 
*obj)
 
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
+
+   if (obj->mm.region)
+   i915_gem_object_release_memory_region(obj);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -548,8 +552,9 @@ static const struct drm_i915_gem_object_ops 
i915_gem_object_stolen_ops = {
 };
 
 static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
-  struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+   struct drm_mm_node *stolen,
+   struct intel_memory_region *mem)
 {
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -571,6 +576,9 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
if (err)
goto cleanup;
 
+   if (mem)
+   i915_gem_object_init_memory_region(obj, mem, 0);
+
return obj;
 
 cleanup:
@@ -579,10 +587,12 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return ERR_PTR(err);
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+  resource_size_t size,
+  unsigned int flags)
 {
+   struct drm_i915_private *dev_priv = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -603,7 +613,7 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
goto err_free;
}
 
-   obj = _i915_gem_object_create_stolen(dev_priv, stolen);
+   obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
if (IS_ERR(obj))
goto err_remove;
 
@@ -616,6 +626,43 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return obj;
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
+{
+   return 
i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN],
+size, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   return i915_gem_init_stolen(mem->i915);
+}
+
+static void release_stolen(struct intel_memory_region *mem)
+{
+   i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_ops = {
+   .init = init_stolen,
+   .release = release_stolen,
+   .create_object = _i915_gem_object_create_stolen,
+};
+
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private 
*i915)
+{
+   return intel_memory_region_create(i915,
+ intel_graphics_stolen_res.start,
+ 
resource_size(_graphics_stolen_res),
+ 

[Intel-gfx] [PATCH v2 1/3] drm/i915: enumerate and init each supported region

2019-10-18 Thread Matthew Auld
From: Abdiel Janulgue 

Nothing to enumerate yet...

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/i915_drv.h   |  7 +++
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 56 ---
 drivers/gpu/drm/i915/intel_device_info.h  |  2 +
 drivers/gpu/drm/i915/intel_memory_region.h|  4 ++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  6 ++
 5 files changed, 68 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 40e923b0c2c8..faf645802710 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -684,6 +684,8 @@ struct i915_gem_mm {
 */
struct vfsmount *gemfs;
 
+   struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
+
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -1783,6 +1785,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
 /* Having GuC is not the same as using GuC */
@@ -2001,6 +2005,9 @@ int __must_check i915_gem_evict_for_node(struct 
i915_address_space *vm,
 unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm);
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915);
+int i915_gem_init_memory_regions(struct drm_i915_private *i915);
+
 /* i915_gem_internal.c */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7b15bb891970..7b8e30c72f86 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2744,6 +2744,52 @@ int i915_init_ggtt(struct drm_i915_private *i915)
return 0;
 }
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915)
+{
+   int i;
+
+   i915_gem_cleanup_stolen(i915);
+
+   for (i = 0; i < INTEL_REGION_UNKNOWN; i++) {
+   struct intel_memory_region *region = i915->mm.regions[i];
+
+   if (region)
+   intel_memory_region_put(region);
+   }
+}
+
+int i915_gem_init_memory_regions(struct drm_i915_private *i915)
+{
+   int err, i;
+
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   /* XXX: stolen will become a region at some point */
+   err = i915_gem_init_stolen(i915);
+   if (err)
+   return err;
+
+   for (i = 0; i < INTEL_REGION_UNKNOWN; i++) {
+   struct intel_memory_region *mem = ERR_PTR(-ENODEV);
+
+   if (!HAS_REGION(i915, BIT(i)))
+   continue;
+
+   if (IS_ERR(mem)) {
+   err = PTR_ERR(mem);
+   goto out_cleanup;
+   }
+   }
+
+   return 0;
+
+out_cleanup:
+   i915_gem_cleanup_memory_regions(i915);
+   return err;
+}
+
 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 {
struct i915_vma *vma, *vn;
@@ -2781,6 +2827,8 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
 {
struct pagevec *pvec;
 
+   i915_gem_cleanup_memory_regions(i915);
+
fini_aliasing_ppgtt(>ggtt);
 
ggtt_cleanup_hw(>ggtt);
@@ -2790,8 +2838,6 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
-
-   i915_gem_cleanup_stolen(i915);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3240,11 +3286,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
 
-   /*
-* Initialise stolen early so that we may reserve preallocated
-* objects for the BIOS to KMS transition.
-*/
-   ret = i915_gem_init_stolen(dev_priv);
+   ret = i915_gem_init_memory_regions(dev_priv);
if (ret)
goto out_gtt_cleanup;
 
diff --git a/drivers/gpu/drm/i915/intel_device_info.h 
b/drivers/gpu/drm/i915/intel_device_info.h
index 0cdc2465534b..e9940f932d26 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -160,6 +160,8 @@ struct intel_device_info {
 
unsigned int page_sizes; /* page sizes supported by the HW */
 
+   u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
 
u8 pipe_mask;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h 
b/drivers/gpu/drm/i915/intel_memory_region.h
index 52c141b6108c..2c165a7a5ab4

[Intel-gfx] [PATCH 2/4] drm/i915: enumerate and init each supported region

2019-10-17 Thread Matthew Auld
From: Abdiel Janulgue 

Nothing to enumerate yet...

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/i915_drv.h   |  5 ++
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 56 ---
 drivers/gpu/drm/i915/intel_memory_region.h|  4 ++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  6 ++
 4 files changed, 64 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2dab731c1e80..b983187b45cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -684,6 +684,8 @@ struct i915_gem_mm {
 */
struct vfsmount *gemfs;
 
+   struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
+
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -2003,6 +2005,9 @@ int __must_check i915_gem_evict_for_node(struct 
i915_address_space *vm,
 unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm);
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915);
+int i915_gem_init_memory_regions(struct drm_i915_private *i915);
+
 /* i915_gem_internal.c */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7b15bb891970..7b8e30c72f86 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2744,6 +2744,52 @@ int i915_init_ggtt(struct drm_i915_private *i915)
return 0;
 }
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915)
+{
+   int i;
+
+   i915_gem_cleanup_stolen(i915);
+
+   for (i = 0; i < INTEL_REGION_UNKNOWN; i++) {
+   struct intel_memory_region *region = i915->mm.regions[i];
+
+   if (region)
+   intel_memory_region_put(region);
+   }
+}
+
+int i915_gem_init_memory_regions(struct drm_i915_private *i915)
+{
+   int err, i;
+
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   /* XXX: stolen will become a region at some point */
+   err = i915_gem_init_stolen(i915);
+   if (err)
+   return err;
+
+   for (i = 0; i < INTEL_REGION_UNKNOWN; i++) {
+   struct intel_memory_region *mem = ERR_PTR(-ENODEV);
+
+   if (!HAS_REGION(i915, BIT(i)))
+   continue;
+
+   if (IS_ERR(mem)) {
+   err = PTR_ERR(mem);
+   goto out_cleanup;
+   }
+   }
+
+   return 0;
+
+out_cleanup:
+   i915_gem_cleanup_memory_regions(i915);
+   return err;
+}
+
 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 {
struct i915_vma *vma, *vn;
@@ -2781,6 +2827,8 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
 {
struct pagevec *pvec;
 
+   i915_gem_cleanup_memory_regions(i915);
+
fini_aliasing_ppgtt(>ggtt);
 
ggtt_cleanup_hw(>ggtt);
@@ -2790,8 +2838,6 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
-
-   i915_gem_cleanup_stolen(i915);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3240,11 +3286,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
 
-   /*
-* Initialise stolen early so that we may reserve preallocated
-* objects for the BIOS to KMS transition.
-*/
-   ret = i915_gem_init_stolen(dev_priv);
+   ret = i915_gem_init_memory_regions(dev_priv);
if (ret)
goto out_gtt_cleanup;
 
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h 
b/drivers/gpu/drm/i915/intel_memory_region.h
index 52c141b6108c..2c165a7a5ab4 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -18,6 +18,10 @@ struct drm_i915_gem_object;
 struct intel_memory_region;
 struct sg_table;
 
+enum intel_region_id {
+   INTEL_REGION_UNKNOWN = 0, /* Should be last */
+};
+
 #define I915_ALLOC_MIN_PAGE_SIZE  BIT(0)
 #define I915_ALLOC_CONTIGUOUS BIT(1)
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c 
b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9c22d41b30cd..bc5e3c67409f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -74,6 +74,8 @@ static void mock_device_release(struct drm_device *dev)
 
i915_gemfs_fini(i915);
 
+   i915_gem_cleanup_memory_regions(i915);
+
drm_mode_config_cleanup(>drm);
 
drm_dev_fini(>drm);
@@ -196,6 +198,10

[Intel-gfx] [PATCH 1/4] drm/i915: Add memory region information to device_info

2019-10-17 Thread Matthew Auld
From: Abdiel Janulgue 

Exposes available regions for the platform. Needed in later patches.

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_drv.h  | 2 ++
 drivers/gpu/drm/i915/intel_device_info.h | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 88956f37d96c..2dab731c1e80 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1783,6 +1783,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
 /* Having GuC is not the same as using GuC */
diff --git a/drivers/gpu/drm/i915/intel_device_info.h 
b/drivers/gpu/drm/i915/intel_device_info.h
index 0cdc2465534b..e9940f932d26 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -160,6 +160,8 @@ struct intel_device_info {
 
unsigned int page_sizes; /* page sizes supported by the HW */
 
+   u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
 
u8 pipe_mask;
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 4/4] drm/i915: treat stolen as a region

2019-10-17 Thread Matthew Auld
Convert stolen memory over to a region object. Still leaves open the
question with what to do with pre-allocated objects...

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 65 +++---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h |  3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c| 14 +
 drivers/gpu/drm/i915/i915_pci.c|  2 +-
 4 files changed, 61 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index c76260ce13e3..0b6c45edbf96 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
 
@@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private 
*dev_priv,
return 0;
 }
 
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
 {
if (!drm_mm_initialized(_priv->mm.stolen))
return;
@@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private 
*i915,
}
 }
 
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 {
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -539,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object 
*obj)
 
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
+
+   if (obj->mm.region)
+   i915_gem_object_release_memory_region(obj);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -548,8 +552,9 @@ static const struct drm_i915_gem_object_ops 
i915_gem_object_stolen_ops = {
 };
 
 static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
-  struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+   struct drm_mm_node *stolen,
+   struct intel_memory_region *mem)
 {
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -571,6 +576,9 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
if (err)
goto cleanup;
 
+   if (mem)
+   i915_gem_object_init_memory_region(obj, mem, 0);
+
return obj;
 
 cleanup:
@@ -579,10 +587,12 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return ERR_PTR(err);
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+  resource_size_t size,
+  unsigned int flags)
 {
+   struct drm_i915_private *dev_priv = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -603,7 +613,7 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
goto err_free;
}
 
-   obj = _i915_gem_object_create_stolen(dev_priv, stolen);
+   obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
if (IS_ERR(obj))
goto err_remove;
 
@@ -616,6 +626,43 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return obj;
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
+{
+   return 
i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN],
+size, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   return i915_gem_init_stolen(mem->i915);
+}
+
+static void release_stolen(struct intel_memory_region *mem)
+{
+   i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_ops = {
+   .init = init_stolen,
+   .release = release_stolen,
+   .create_object = _i915_gem_object_create_stolen,
+};
+
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private 
*i915)
+{
+   return intel_memory_region_create(i915,
+ intel_graphics_stolen_res.start,
+ 
resource_size(_graphics_stolen_res),
+ 

[Intel-gfx] [PATCH 3/4] drm/i915: treat shmem as a region

2019-10-17 Thread Matthew Auld
Convert shmem to an intel_memory_region.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_phys.c  |  5 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.c|  7 +-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 74 ++-
 drivers/gpu/drm/i915/i915_drv.h   |  2 +
 drivers/gpu/drm/i915/i915_gem.c   |  9 ---
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 15 
 drivers/gpu/drm/i915/i915_pci.c   | 29 ++--
 drivers/gpu/drm/i915/intel_memory_region.c| 10 +++
 drivers/gpu/drm/i915/intel_memory_region.h| 28 ++-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  7 +-
 10 files changed, 143 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c 
b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 768356908160..8043ff63d73f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,6 +16,7 @@
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_object.h"
+#include "i915_gem_region.h"
 #include "i915_scatterlist.h"
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object 
*obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
 
-   if (!IS_ERR_OR_NULL(pages))
+   if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+   i915_gem_object_release_memory_region(obj);
+   }
mutex_unlock(>mm.lock);
return 0;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index d3f7733bc7ed..2f7bcfb9c964 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -6,6 +6,7 @@
 #include "intel_memory_region.h"
 #include "i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
 
 void
 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
@@ -165,5 +166,9 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
 
-   return mem->ops->create_object(mem, size, flags);
+   obj = mem->ops->create_object(mem, size, flags);
+   if (!IS_ERR(obj))
+   trace_i915_gem_object_create(obj);
+
+   return obj;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4c4954e8ce0a..c3bde41de505 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,7 +7,9 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_gemfs.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 #include "i915_trace.h"
@@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
 static int shmem_get_pages(struct drm_i915_gem_object *obj)
 {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+   struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
 * If there's no chance of allocating enough pages for the whole
 * object, bail early.
 */
-   if (page_count > totalram_pages())
+   if (obj->base.size > resource_size(>region))
return -ENOMEM;
 
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,6 +420,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
 
 static void shmem_release(struct drm_i915_gem_object *obj)
 {
+   i915_gem_object_release_memory_region(obj);
+
fput(obj->base.filp);
 }
 
@@ -434,9 +439,9 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.release = shmem_release,
 };
 
-static int create_shmem(struct drm_i915_private *i915,
-   struct drm_gem_object *obj,
-   size_t size)
+static int __create_shmem(struct drm_i915_private *i915,
+ struct drm_gem_object *obj,
+ size_t size)
 {
unsigned long flags = VM_NORESERVE;
struct file *filp;
@@ -455,31 +460,23 @@ static int create_shmem(struct drm_i915_private *i915,
return 0;
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
+static struct drm_i915_gem_object *
+create_shmem(struct intel_memory_region *mem,
+resource_size_t size,
+unsigned int flags)
 {
+   

Re: [Intel-gfx] [PATCH] drm/i915: Remove leftover vma->obj->pages_pin_count on insert/remove

2019-10-15 Thread Matthew Auld

On 15/10/2019 11:01, Chris Wilson wrote:

We now do the page pin count upfront in vma_get_pages/vma_put_pages, so
that we do the allocations before we enter the vm->mutex. Then our vma
page references are tracking in vma->pages_count and the extra
obj->pages_pin_count being performed in i915_vma_insert/i915_vma_remove
is redundant, and worse throws off the shrinker's logic on when it can
free an object by unbinding it.

Could you also add Reported-by: Daniele Ceraolo Spurio 




Reported-by: Matthew Auld 
Signed-off-by: Chris Wilson 
Cc: Matthew Auld 

Reviewed-by: Matthew Auld 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH] drm/i915: Drop obj.page_pin_count after a failed vma->set_pages()

2019-10-15 Thread Matthew Auld
On Tue, 15 Oct 2019 at 10:39, Chris Wilson  wrote:
>
> Before we attempt to set_pages on the vma, we claim a
> obj.pages_pin_count for it. If we subsequently fail to set the pages on
> the vma, we need to drop our pinning before returning the error.
>
> Reported-by: Matthew Auld 
> Signed-off-by: Chris Wilson 
> Cc: Matthew Auld 
Reviewed-by: Matthew Auld 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH v2] drm/i915/selftests: Fixup naked 64b divide

2019-10-14 Thread Matthew Auld

On 13/10/2019 12:45, Chris Wilson wrote:

drivers/gpu/drm/i915/intel_memory_region.o: in function `igt_mock_contiguous':
drivers/gpu/drm/i915/selftests/intel_memory_region.c:166: undefined reference 
to `__umoddi3'

v2: promote target to u64 for consistency across all builds

Reported-by: kbuild test robot 
Fixes: 2f0b97ca0211 ("drm/i915/region: support contiguous allocations")
Signed-off-by: Chris Wilson 
Cc: Matthew Auld 

Reviewed-by: Matthew Auld 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH] drm/i915/selftests: Serialise write to scratch with its vma binding

2019-10-11 Thread Matthew Auld
On Fri, 11 Oct 2019 at 20:36, Chris Wilson  wrote:
>
> Add the missing serialisation on the request for a write into a vma to
> wait until that vma is bound before being executed by the GPU.
>
> Signed-off-by: Chris Wilson 
> Cc: Matthew Auld 
Reviewed-by: Matthew Auld 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH i-g-t] debugfs: Define DROP_RCU

2019-10-11 Thread Matthew Auld
On Fri, 11 Oct 2019 at 19:37, Chris Wilson  wrote:
>
> Corresponding kernel commit 54895010a893 ("drm/i915: Add an rcu_barrier
> option to i915_drop_caches")
>
> Signed-off-by: Chris Wilson 
> Cc: Matthew Auld 
Reviewed-by: Matthew Auld 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH] drm/i915: Add an rcu_barrier option to i915_drop_caches

2019-10-11 Thread Matthew Auld
On Fri, 11 Oct 2019 at 18:38, Chris Wilson  wrote:
>
> Sometimes a test has to wait for RCU to complete a grace period and
> perform its callbacks, for example waiting for a close(fd) to actually
> perform the fput(filp) and so trigger all the callbacks such as closing
> GEM contexts. There is no trivial means of triggering an RCU barrier
> from userspace, so add one for our convenience in
> debugfs/i915_drop_caches
>
> Signed-off-by: Chris Wilson 
> Cc: Matthew Auld 
Reviewed-by: Matthew Auld 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH] drm/i915/gt: Give engine->kernel_context distinct timeline lock classes

2019-10-08 Thread Matthew Auld
On Tue, 8 Oct 2019 at 19:59, Chris Wilson  wrote:
>
> Assign a separate lockclass to the perma-pinned timelines of the
> kernel_context, such that we can use them from within the user timelines
> should we ever need to inject GPU operations to fixup faults during
> request construction.
>
> Signed-off-by: Chris Wilson 
> Cc: Tvrtko Ursulin 
> Cc: Matthew Auld 
Reviewed-by: Matthew Auld 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 3/3] drm/i915/region: support volatile objects

2019-10-08 Thread Matthew Auld
Volatile objects are marked as DONTNEED while pinned, therefore once
unpinned the backing store can be discarded. This is limited to kernel
internal objects.

Signed-off-by: Matthew Auld 
Signed-off-by: CQ Tang 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
Reviewed-by: Chris Wilson 
---
 drivers/gpu/drm/i915/gem/i915_gem_internal.c| 17 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h  | 12 
 .../gpu/drm/i915/gem/i915_gem_object_types.h|  9 -
 drivers/gpu/drm/i915/gem/i915_gem_pages.c   |  6 ++
 drivers/gpu/drm/i915/gem/i915_gem_region.c  | 17 -
 drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 12 
 drivers/gpu/drm/i915/intel_memory_region.c  |  5 +
 drivers/gpu/drm/i915/intel_memory_region.h  |  6 ++
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c   |  5 ++---
 9 files changed, 68 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c 
b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 0c41e04ab8fa..5ae694c24df4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -117,13 +117,6 @@ static int i915_gem_object_get_pages_internal(struct 
drm_i915_gem_object *obj)
goto err;
}
 
-   /* Mark the pages as dontneed whilst they are still pinned. As soon
-* as they are unpinned they are allowed to be reaped by the shrinker,
-* and the caller is expected to repopulate - the contents of this
-* object are only valid whilst active and pinned.
-*/
-   obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
return 0;
@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct 
drm_i915_gem_object *obj,
internal_free_pages(pages);
 
obj->mm.dirty = false;
-   obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -188,6 +180,15 @@ i915_gem_object_create_internal(struct drm_i915_private 
*i915,
drm_gem_private_object_init(>drm, >base, size);
i915_gem_object_init(obj, _gem_object_internal_ops);
 
+   /*
+* Mark the object as volatile, such that the pages are marked as
+* dontneed whilst they are still pinned. As soon as they are unpinned
+* they are allowed to be reaped by the shrinker, and the caller is
+* expected to repopulate - the contents of this object are only valid
+* whilst active and pinned.
+*/
+   i915_gem_object_set_volatile(obj);
+
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index dfd16d65630f..c5e14c9c805c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -145,6 +145,18 @@ i915_gem_object_is_contiguous(const struct 
drm_i915_gem_object *obj)
return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
 }
 
+static inline bool
+i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
+{
+   return obj->flags & I915_BO_ALLOC_VOLATILE;
+}
+
+static inline void
+i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
+{
+   obj->flags |= I915_BO_ALLOC_VOLATILE;
+}
+
 static inline bool
 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
 unsigned long flags)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index c6a712cf7d7a..a387e3ee728b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -121,7 +121,8 @@ struct drm_i915_gem_object {
 
unsigned long flags;
 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
-#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+#define I915_BO_ALLOC_VOLATILE   BIT(1)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
 
/*
 * Is the object to be mapped as read-only to the GPU
@@ -172,6 +173,12 @@ struct drm_i915_gem_object {
 * List of memory region blocks allocated for this object.
 */
struct list_head blocks;
+   /**
+* Element within memory_region->objects or region->purgeable
+* if the object is marked as DONTNEED. Access is protected by
+* region->obj_lock.
+*/
+   struct list_head region_link;
 
struct sg_table *pages;
void *mapping;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 2e941f093a20..b0ec0959c13f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i

[Intel-gfx] [PATCH 0/3] intel_memory_region bits

2019-10-08 Thread Matthew Auld
First block of patches from the 'LMEM basics' series.

Matthew Auld (3):
  drm/i915: introduce intel_memory_region
  drm/i915/region: support contiguous allocations
  drm/i915/region: support volatile objects

 drivers/gpu/drm/i915/Makefile |   2 +
 drivers/gpu/drm/i915/gem/i915_gem_internal.c  |  17 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.h|  18 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  20 ++
 drivers/gpu/drm/i915/gem/i915_gem_pages.c |   6 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 169 +++
 drivers/gpu/drm/i915/gem/i915_gem_region.h|  29 ++
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  97 +-
 drivers/gpu/drm/i915/i915_drv.h   |   1 +
 drivers/gpu/drm/i915/intel_memory_region.c| 203 +
 drivers/gpu/drm/i915/intel_memory_region.h|  90 ++
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |   5 +-
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 .../drm/i915/selftests/intel_memory_region.c  | 280 ++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   1 +
 drivers/gpu/drm/i915/selftests/mock_region.c  |  59 
 drivers/gpu/drm/i915/selftests/mock_region.h  |  16 +
 17 files changed, 995 insertions(+), 19 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.h
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.h

-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 2/3] drm/i915/region: support contiguous allocations

2019-10-08 Thread Matthew Auld
Some kernel internal objects may need to be allocated as a contiguous
block, also thinking ahead the various kernel io_mapping interfaces seem
to expect it, although this is purely a limitation in the kernel
API...so perhaps something to be improved.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
Cc: Michael J Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_object.h|   6 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   4 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c|  15 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.h|   3 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  71 
 drivers/gpu/drm/i915/intel_memory_region.c|   9 +-
 drivers/gpu/drm/i915/intel_memory_region.h|   3 +-
 .../drm/i915/selftests/intel_memory_region.c  | 165 ++
 drivers/gpu/drm/i915/selftests/mock_region.c  |   2 +-
 9 files changed, 239 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 086a9bf5adcc..dfd16d65630f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -139,6 +139,12 @@ i915_gem_object_is_readonly(const struct 
drm_i915_gem_object *obj)
return obj->base.vma_node.readonly;
 }
 
+static inline bool
+i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
+{
+   return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
+}
+
 static inline bool
 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
 unsigned long flags)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 11390586cfe1..c6a712cf7d7a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -119,6 +119,10 @@ struct drm_i915_gem_object {
 
I915_SELFTEST_DECLARE(struct list_head st_link);
 
+   unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+
/*
 * Is the object to be mapped as read-only to the GPU
 * Only honoured if hardware has relevant pte bit
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index fad3a94641e9..89708b3fa03d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -23,10 +23,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
 {
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = >mm.blocks;
-   unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
resource_size_t size = obj->base.size;
resource_size_t prev_end;
struct i915_buddy_block *block;
+   unsigned int flags;
struct sg_table *st;
struct scatterlist *sg;
unsigned int sg_page_sizes;
@@ -41,6 +41,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
return -ENOMEM;
}
 
+   flags = I915_ALLOC_MIN_PAGE_SIZE;
+   if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+   flags |= I915_ALLOC_CONTIGUOUS;
+
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
if (ret)
goto err_free_sg;
@@ -55,7 +59,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
list_for_each_entry(block, blocks, link) {
u64 block_size, offset;
 
-   block_size = i915_buddy_block_size(>mm, block);
+   block_size = min_t(u64, size,
+  i915_buddy_block_size(>mm, block));
offset = i915_buddy_block_offset(block);
 
GEM_BUG_ON(overflows_type(block_size, sg->length));
@@ -96,10 +101,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
 }
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-   struct intel_memory_region *mem)
+   struct intel_memory_region *mem,
+   unsigned long flags)
 {
INIT_LIST_HEAD(>mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
+   obj->flags |= flags;
 }
 
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
@@ -120,6 +127,8 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
 * future.
 */
 
+   GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+
if (!mem)
return ERR_PTR(-ENODEV);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h 
b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index ebddc86d78f7..f2ff6f8bff74 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -17,7 +17,8 @@ void i915_gem_object_

[Intel-gfx] [PATCH 1/3] drm/i915: introduce intel_memory_region

2019-10-08 Thread Matthew Auld
Support memory regions, as defined by a given (start, end), and allow
creating GEM objects which are backed by said region. The immediate goal
here is to have something to represent our device memory, but later on
we also want to represent every memory domain with a region, so stolen,
shmem, and of course device. At some point we are probably going to want
use a common struct here, such that we are better aligned with say TTM.

Signed-off-by: Matthew Auld 
Signed-off-by: Abdiel Janulgue 
Signed-off-by: Niranjana Vishwanathapura 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/Makefile |   2 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 145 +
 drivers/gpu/drm/i915/gem/i915_gem_region.h|  28 +++
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  78 +++
 drivers/gpu/drm/i915/i915_drv.h   |   1 +
 drivers/gpu/drm/i915/intel_memory_region.c| 191 ++
 drivers/gpu/drm/i915/intel_memory_region.h|  83 
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 .../drm/i915/selftests/intel_memory_region.c  | 115 +++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   1 +
 drivers/gpu/drm/i915/selftests/mock_region.c  |  59 ++
 drivers/gpu/drm/i915/selftests/mock_region.h  |  16 ++
 13 files changed, 729 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.h
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a6006aa715ff..e791d9323b51 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -51,6 +51,7 @@ i915-y += i915_drv.o \
  i915_utils.o \
  intel_csr.o \
  intel_device_info.o \
+ intel_memory_region.o \
  intel_pch.o \
  intel_pm.o \
  intel_runtime_pm.o \
@@ -121,6 +122,7 @@ gem-y += \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
+   gem/i915_gem_region.o \
gem/i915_gem_shmem.o \
gem/i915_gem_shrinker.o \
gem/i915_gem_stolen.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index c00b4f077f9e..11390586cfe1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -160,6 +160,15 @@ struct drm_i915_gem_object {
atomic_t pages_pin_count;
atomic_t shrink_pin;
 
+   /**
+* Memory region for this object.
+*/
+   struct intel_memory_region *region;
+   /**
+* List of memory region blocks allocated for this object.
+*/
+   struct list_head blocks;
+
struct sg_table *pages;
void *mapping;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
new file mode 100644
index ..fad3a94641e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_gem_region.h"
+#include "i915_drv.h"
+
+void
+i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+   struct sg_table *pages)
+{
+   __intel_memory_region_put_pages_buddy(obj->mm.region, >mm.blocks);
+
+   obj->mm.dirty = false;
+   sg_free_table(pages);
+   kfree(pages);
+}
+
+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+   struct intel_memory_region *mem = obj->mm.region;
+   struct list_head *blocks = >mm.blocks;
+   unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
+   resource_size_t size = obj->base.size;
+   resource_size_t prev_end;
+   struct i915_buddy_block *block;
+   struct sg_table *st;
+   struct scatterlist *sg;
+   unsigned int sg_page_sizes;
+   int ret;
+
+   st = kmalloc(sizeof(*st), GFP_KERNEL);
+   if (!st)
+   return -ENOMEM;
+
+   if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+   kfree(st);
+   return -ENOMEM;
+   }
+
+   ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+   if (ret)
+   goto err_free_sg;
+
+   GEM_BUG_ON(list_empty(blocks));
+
+   sg = st->sgl;
+   st->nents = 0;
+   

Re: [Intel-gfx] [PATCH] drm/i915/selftests: Assign the intel_runtime_pm pointer for mock_uncore

2019-10-08 Thread Matthew Auld
On Tue, 8 Oct 2019 at 15:50, Chris Wilson  wrote:
>
> Couple up our mock_uncore to know about the fake global device and its
> runtime powermanagement.
>
> Signed-off-by: Chris Wilson 
> Cc: Matthew Auld 

Thanks,
Reviewed-by: Matthew Auld 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH v3 02/21] drm/i915: introduce intel_memory_region

2019-10-08 Thread Matthew Auld

On 08/10/2019 09:59, Chris Wilson wrote:

Quoting Matthew Auld (2019-10-04 18:04:33)

+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+   struct intel_memory_region *mem = obj->mm.region;
+   struct list_head *blocks = >mm.blocks;
+   unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
+   resource_size_t size = obj->base.size;
+   resource_size_t prev_end;
+   struct i915_buddy_block *block;
+   struct sg_table *st;
+   struct scatterlist *sg;
+   unsigned int sg_page_sizes;
+   unsigned long i;
+   int ret;
+
+   st = kmalloc(sizeof(*st), GFP_KERNEL);
+   if (!st)
+   return -ENOMEM;
+
+   if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+   kfree(st);
+   return -ENOMEM;
+   }
+
+   ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+   if (ret)
+   goto err_free_sg;
+
+   GEM_BUG_ON(list_empty(blocks));
+
+   sg = st->sgl;
+   st->nents = 0;
+   sg_page_sizes = 0;
+   i = 0;
+
+   list_for_each_entry(block, blocks, link) {
+   u64 block_size, offset;
+
+   block_size = i915_buddy_block_size(>mm, block);
+   offset = i915_buddy_block_offset(block);
+
+   GEM_BUG_ON(overflows_type(block_size, sg->length));
+
+   if (!i || offset != prev_end ||
+   add_overflows_t(typeof(sg->length), sg->length, 
block_size)) {
+   if (i) {


i is only being here to detect the start.
prev_end = (resource_size_t)-1;
Then I don't have to worry about i overflowing, although that seems
harmless, and as ridiculous as that may be.


+   sg_page_sizes |= sg->length;
+   sg = __sg_next(sg);
+   }
+
+   sg_dma_address(sg) = mem->region.start + offset;
+   sg_dma_len(sg) = block_size;
+
+   sg->length = block_size;
+
+   st->nents++;
+   } else {
+   sg->length += block_size;
+   sg_dma_len(sg) += block_size;
+   }
+
+   prev_end = offset + block_size;
+   i++;
+   };
+
+   sg_page_sizes |= sg->length;
+   sg_mark_end(sg);
+   i915_sg_trim(st);
+
+   __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+   return 0;
+
+err_free_sg:
+   sg_free_table(st);
+   kfree(st);
+   return ret;
+}



+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+   struct drm_i915_gem_object *obj;
+
+   if (!mem)
+   return ERR_PTR(-ENODEV);
+
+   size = round_up(size, mem->min_page_size);
+
+   GEM_BUG_ON(!size);
+   GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
+


Put the FIXME here from the start


+   if (size >> PAGE_SHIFT > INT_MAX)
+   return ERR_PTR(-E2BIG);
+
+   if (overflows_type(size, obj->base.size))
+   return ERR_PTR(-E2BIG);
+
+   return mem->ops->create_object(mem, size, flags);
+}


I'm happy with this. But I would like a discussion on why
resource_size_t was chosen and for that rationale to be captured in the
code comments. At the end of the day, we need to bump obj->base.size to
suit.


No good reason, I guess it just stems from using "struct resource 
region". Maybe for the object_create interface we should just use u64?


On that topic should we also stop using "struct resource" here, since 
that's starting to get confusing, maybe use u64 there also?



-Chris


___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v3 17/21] drm/i915: error capture with no ggtt slot

2019-10-04 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

If the aperture is not available in HW we can't use a ggtt slot and wc
copy, so fall back to regular kmap.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 19 
 drivers/gpu/drm/i915/i915_gpu_error.c | 66 ++-
 2 files changed, 65 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ad123b84880d..235028ad684f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2661,7 +2661,8 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
 {
ggtt_release_guc_top(ggtt);
-   drm_mm_remove_node(>error_capture);
+   if (drm_mm_node_allocated(>error_capture))
+   drm_mm_remove_node(>error_capture);
 }
 
 static int init_ggtt(struct i915_ggtt *ggtt)
@@ -2692,13 +2693,15 @@ static int init_ggtt(struct i915_ggtt *ggtt)
if (ret)
return ret;
 
-   /* Reserve a mappable slot for our lockless error capture */
-   ret = drm_mm_insert_node_in_range(>vm.mm, >error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
-   if (ret)
-   return ret;
+   if (ggtt->mappable_end) {
+   /* Reserve a mappable slot for our lockless error capture */
+   ret = drm_mm_insert_node_in_range(>vm.mm, 
>error_capture,
+ PAGE_SIZE, 0, 
I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+   if (ret)
+   return ret;
+   }
 
/*
 * The upper portion of the GuC address space has a sizeable hole
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5cf4eed5add8..26c3d9c75ab5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -40,6 +40,7 @@
 #include "display/intel_overlay.h"
 
 #include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
 
 #include "i915_drv.h"
 #include "i915_gpu_error.h"
@@ -235,6 +236,7 @@ struct compress {
struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
+   bool wc;
 };
 
 static bool compress_init(struct compress *c)
@@ -292,7 +294,7 @@ static int compress_page(struct compress *c,
struct z_stream_s *zstream = >zstream;
 
zstream->next_in = src;
-   if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+   if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
 
@@ -367,6 +369,7 @@ static void err_compression_marker(struct 
drm_i915_error_state_buf *m)
 
 struct compress {
struct pagevec pool;
+   bool wc;
 };
 
 static bool compress_init(struct compress *c)
@@ -389,7 +392,7 @@ static int compress_page(struct compress *c,
if (!ptr)
return -ENOMEM;
 
-   if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+   if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
 
@@ -970,7 +973,6 @@ i915_error_object_create(struct drm_i915_private *i915,
struct drm_i915_error_object *dst;
unsigned long num_pages;
struct sgt_iter iter;
-   dma_addr_t dma;
int ret;
 
might_sleep();
@@ -996,17 +998,54 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->page_count = 0;
dst->unused = 0;
 
+   compress->wc = i915_gem_object_is_lmem(vma->obj) ||
+  drm_mm_node_allocated(>error_capture);
+
ret = -EINVAL;
-   for_each_sgt_daddr(dma, iter, vma->pages) {
+   if (drm_mm_node_allocated(>error_capture)) {
void __iomem *s;
+   dma_addr_t dma;
 
-   ggtt->vm.insert_page(>vm, dma, slot, I915_CACHE_NONE, 0);
+   for_each_sgt_daddr(dma, iter, vma->pages) {
+   ggtt->vm.insert_page(>vm, dma, slot,
+I915_CACHE_NONE, 0);
 
-   s = io_mapping_map_wc(>iomap, slot, PAGE_SIZE);
-   ret = compress_page(compress, (void  __force *)s, dst);
-   io_mapping_unmap(s);
-   if (ret)
-   break;
+   

[Intel-gfx] [PATCH v3 13/21] drm/i915: treat stolen as a region

2019-10-04 Thread Matthew Auld
Convert stolen memory over to a region object. Still leaves open the
question with what to do with pre-allocated objects...

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 65 +++---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h |  3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c| 14 +
 drivers/gpu/drm/i915/i915_pci.c|  2 +-
 4 files changed, 61 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 2a34355b738f..a452f992e71c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
 
@@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private 
*dev_priv,
return 0;
 }
 
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
 {
if (!drm_mm_initialized(_priv->mm.stolen))
return;
@@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private 
*i915,
}
 }
 
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 {
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -539,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object 
*obj)
 
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
+
+   if (obj->mm.region)
+   i915_gem_object_release_memory_region(obj);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -548,8 +552,9 @@ static const struct drm_i915_gem_object_ops 
i915_gem_object_stolen_ops = {
 };
 
 static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
-  struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+   struct drm_mm_node *stolen,
+   struct intel_memory_region *mem)
 {
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -566,6 +571,9 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
 
+   if (mem)
+   i915_gem_object_init_memory_region(obj, mem, 0);
+
if (i915_gem_object_pin_pages(obj))
goto cleanup;
 
@@ -576,10 +584,12 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return NULL;
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+  resource_size_t size,
+  unsigned int flags)
 {
+   struct drm_i915_private *dev_priv = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -598,7 +608,7 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
if (ret)
goto err_free;
 
-   obj = _i915_gem_object_create_stolen(dev_priv, stolen);
+   obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
if (obj == NULL) {
ret = -ENOMEM;
goto err_remove;
@@ -613,6 +623,43 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return ERR_PTR(ret);
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
+{
+   return 
i915_gem_object_create_region(dev_priv->mm.regions[INTEL_MEMORY_STOLEN],
+size, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   return i915_gem_init_stolen(mem->i915);
+}
+
+static void release_stolen(struct intel_memory_region *mem)
+{
+   i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_ops = {
+   .init = init_stolen,
+   .release = release_stolen,
+   .create_object = _i915_gem_object_create_stolen,
+};
+
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private 
*i915)
+{
+   return intel_memory_region_create(i915,
+

[Intel-gfx] [PATCH v3 18/21] drm/i915: Don't try to place HWS in non-existing mappable region

2019-10-04 Thread Matthew Auld
From: Michal Wajdeczko 

HWS placement restrictions can't just rely on HAS_LLC flag.

Signed-off-by: Michal Wajdeczko 
Signed-off-by: Matthew Auld 
Cc: Daniele Ceraolo Spurio 
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c 
b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 80fd072ac719..f6799ef9915a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -513,7 +513,8 @@ static int pin_ggtt_status_page(struct intel_engine_cs 
*engine,
unsigned int flags;
 
flags = PIN_GLOBAL;
-   if (!HAS_LLC(engine->i915))
+   if (!HAS_LLC(engine->i915) &&
+   i915_ggtt_has_aperture(>i915->ggtt))
/*
 * On g33, we cannot place HWS above 256MiB, so
 * restrict its pinning to the low mappable arena.
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v3 14/21] drm/i915: define i915_ggtt_has_aperture

2019-10-04 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

The following patches in the series will use it to avoid certain
operations when the mappable aperture is not available in HW.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_gtt.h | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h 
b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0a18fdfe63ff..62b586c4cf71 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -570,6 +570,11 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
 int i915_init_ggtt(struct drm_i915_private *dev_priv);
 void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);
 
+static inline bool i915_ggtt_has_aperture(struct i915_ggtt *ggtt)
+{
+   return ggtt->mappable_end > 0;
+}
+
 int i915_ppgtt_init_hw(struct intel_gt *gt);
 
 struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v3 12/21] drm/i915: treat shmem as a region

2019-10-04 Thread Matthew Auld
Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_phys.c  |  5 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 14 +++-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 74 ++-
 drivers/gpu/drm/i915/i915_drv.h   |  2 +
 drivers/gpu/drm/i915/i915_gem.c   |  9 ---
 drivers/gpu/drm/i915/i915_gem_gtt.c   |  3 +-
 drivers/gpu/drm/i915/i915_pci.c   | 29 ++--
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  6 +-
 8 files changed, 99 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c 
b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 768356908160..8043ff63d73f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,6 +16,7 @@
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_object.h"
+#include "i915_gem_region.h"
 #include "i915_scatterlist.h"
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object 
*obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
 
-   if (!IS_ERR_OR_NULL(pages))
+   if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+   i915_gem_object_release_memory_region(obj);
+   }
mutex_unlock(>mm.lock);
return 0;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 663254b3da21..0c03c2c40dbc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -6,6 +6,7 @@
 #include "intel_memory_region.h"
 #include "i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
 
 void
 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
@@ -146,11 +147,22 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
 
+   /*
+* There is a prevalence of the assumption that we fit the object's
+* page count inside a 32bit _signed_ variable. Let's document this and
+* catch if we ever need to fix it. In the meantime, if you do spot
+* such a local variable, please consider fixing!
+*/
+
if (size >> PAGE_SHIFT > INT_MAX)
return ERR_PTR(-E2BIG);
 
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
 
-   return mem->ops->create_object(mem, size, flags);
+   obj = mem->ops->create_object(mem, size, flags);
+   if (!IS_ERR(obj))
+   trace_i915_gem_object_create(obj);
+
+   return obj;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4c4954e8ce0a..f468a3424df4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,7 +7,9 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_gemfs.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 #include "i915_trace.h"
@@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
 static int shmem_get_pages(struct drm_i915_gem_object *obj)
 {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+   struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
 * If there's no chance of allocating enough pages for the whole
 * object, bail early.
 */
-   if (page_count > totalram_pages())
+   if (obj->base.size > resource_size(>region))
return -ENOMEM;
 
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,6 +420,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
 
 static void shmem_release(struct drm_i915_gem_object *obj)
 {
+   i915_gem_object_release_memory_region(obj);
+
fput(obj->base.filp);
 }
 
@@ -434,9 +439,9 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.release = shmem_release,
 };
 
-static int create_shmem(struct drm_i915_private *i915,
-   struct drm_gem_object *obj,
-   size_t size)
+static int __create_shmem(struct drm_i915_private *i915,
+ struct drm_gem_object *obj,
+ size_t size)
 {
unsigned long flags = VM_NORESERVE;
struct file *filp;
@@ -455,31 +46

[Intel-gfx] [PATCH v3 21/21] HAX drm/i915: add the fake lmem region

2019-10-04 Thread Matthew Auld
Intended for upstream testing so that we can still exercise the LMEM
plumbing and !HAS_MAPPABLE_APERTURE paths. Smoke tested on Skull Canyon
device. This works by allocating an intel_memory_region for a reserved
portion of system memory, which we treat like LMEM. For the LMEMBAR we
steal the aperture and 1:1 it map to the stolen region.

To enable simply set i915_fake_lmem_start= on the kernel cmdline with the
start of reserved region(see memmap=). The size of the region we can
use is determined by the size of the mappable aperture, so the size of
reserved region should be >= mappable_end.

eg. memmap=2G$16G i915_fake_lmem_start=0x4

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 arch/x86/kernel/early-quirks.c | 26 +++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c   |  3 +
 drivers/gpu/drm/i915/i915_drv.c|  8 ++
 drivers/gpu/drm/i915/i915_gem_gtt.c|  3 +
 drivers/gpu/drm/i915/intel_memory_region.h |  6 ++
 drivers/gpu/drm/i915/intel_region_lmem.c   | 90 ++
 drivers/gpu/drm/i915/intel_region_lmem.h   |  5 ++
 include/drm/i915_drm.h |  3 +
 8 files changed, 144 insertions(+)

diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6f6b1d04dadf..9b04655e3926 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -603,6 +603,32 @@ static void __init intel_graphics_quirks(int num, int 
slot, int func)
}
 }
 
+struct resource intel_graphics_fake_lmem_res __ro_after_init = 
DEFINE_RES_MEM(0, 0);
+EXPORT_SYMBOL(intel_graphics_fake_lmem_res);
+
+static int __init early_i915_fake_lmem_init(char *s)
+{
+   u64 start;
+   int ret;
+
+   if (*s == '=')
+   s++;
+
+   ret = kstrtoull(s, 16, );
+   if (ret)
+   return ret;
+
+   intel_graphics_fake_lmem_res.start = start;
+   intel_graphics_fake_lmem_res.end = SZ_2G; /* Placeholder; depends on 
aperture size */
+
+   printk(KERN_INFO "Intel graphics fake LMEM starts at %pa\n",
+  _graphics_fake_lmem_res.start);
+
+   return 0;
+}
+
+early_param("i915_fake_lmem_start", early_i915_fake_lmem_init);
+
 static void __init force_disable_hpet(int num, int slot, int func)
 {
 #ifdef CONFIG_HPET_TIMER
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index fd8b63dd154d..b48f95ba336f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -23,6 +23,7 @@ void __iomem *i915_gem_object_lmem_io_map_page(struct 
drm_i915_gem_object *obj,
resource_size_t offset;
 
offset = i915_gem_object_get_dma_address(obj, n);
+   offset -= obj->mm.region->region.start;
 
return io_mapping_map_wc(>mm.region->iomap, offset, PAGE_SIZE);
 }
@@ -33,6 +34,7 @@ void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct 
drm_i915_gem_object
resource_size_t offset;
 
offset = i915_gem_object_get_dma_address(obj, n);
+   offset -= obj->mm.region->region.start;
 
return io_mapping_map_atomic_wc(>mm.region->iomap, offset);
 }
@@ -46,6 +48,7 @@ void __iomem *i915_gem_object_lmem_io_map(struct 
drm_i915_gem_object *obj,
GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
 
offset = i915_gem_object_get_dma_address(obj, n);
+   offset -= obj->mm.region->region.start;
 
return io_mapping_map_wc(>mm.region->iomap, offset, size);
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 024da582ba0f..fdd548299156 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1526,6 +1526,14 @@ int i915_driver_probe(struct pci_dev *pdev, const struct 
pci_device_id *ent)
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
 
+   /* Check if we support fake LMEM -- enable for live selftests */
+   if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live &&
+   intel_graphics_fake_lmem_res.start) {
+   mkwrite_device_info(dev_priv)->memory_regions =
+   REGION_SMEM | REGION_LMEM | REGION_STOLEN;
+   GEM_BUG_ON(!HAS_LMEM(dev_priv));
+   }
+
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 235028ad684f..146b2c0573e8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2778,6 +2778,9 @@ int i915_gem_init_memory_regions(struct drm_i915_private 
*i915)
case INTEL_STOLEN:
mem = i915_gem_stolen_setup(i915);
break;
+   case INTEL_LMEM:
+   mem = intel_se

[Intel-gfx] [PATCH v3 04/21] drm/i915/region: support volatile objects

2019-10-04 Thread Matthew Auld
Volatile objects are marked as DONTNEED while pinned, therefore once
unpinned the backing store can be discarded. This is limited to kernel
internal objects.

Signed-off-by: Matthew Auld 
Signed-off-by: CQ Tang 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_internal.c| 17 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h  | 12 
 .../gpu/drm/i915/gem/i915_gem_object_types.h|  9 -
 drivers/gpu/drm/i915/gem/i915_gem_pages.c   |  6 ++
 drivers/gpu/drm/i915/gem/i915_gem_region.c  | 13 +
 drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 12 
 drivers/gpu/drm/i915/intel_memory_region.c  |  4 
 drivers/gpu/drm/i915/intel_memory_region.h  |  5 +
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c   |  5 ++---
 9 files changed, 63 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c 
b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 0c41e04ab8fa..5ae694c24df4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -117,13 +117,6 @@ static int i915_gem_object_get_pages_internal(struct 
drm_i915_gem_object *obj)
goto err;
}
 
-   /* Mark the pages as dontneed whilst they are still pinned. As soon
-* as they are unpinned they are allowed to be reaped by the shrinker,
-* and the caller is expected to repopulate - the contents of this
-* object are only valid whilst active and pinned.
-*/
-   obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
return 0;
@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct 
drm_i915_gem_object *obj,
internal_free_pages(pages);
 
obj->mm.dirty = false;
-   obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -188,6 +180,15 @@ i915_gem_object_create_internal(struct drm_i915_private 
*i915,
drm_gem_private_object_init(>drm, >base, size);
i915_gem_object_init(obj, _gem_object_internal_ops);
 
+   /*
+* Mark the object as volatile, such that the pages are marked as
+* dontneed whilst they are still pinned. As soon as they are unpinned
+* they are allowed to be reaped by the shrinker, and the caller is
+* expected to repopulate - the contents of this object are only valid
+* whilst active and pinned.
+*/
+   i915_gem_object_set_volatile(obj);
+
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index dfd16d65630f..c5e14c9c805c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -145,6 +145,18 @@ i915_gem_object_is_contiguous(const struct 
drm_i915_gem_object *obj)
return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
 }
 
+static inline bool
+i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
+{
+   return obj->flags & I915_BO_ALLOC_VOLATILE;
+}
+
+static inline void
+i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
+{
+   obj->flags |= I915_BO_ALLOC_VOLATILE;
+}
+
 static inline bool
 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
 unsigned long flags)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index c6a712cf7d7a..a387e3ee728b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -121,7 +121,8 @@ struct drm_i915_gem_object {
 
unsigned long flags;
 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
-#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+#define I915_BO_ALLOC_VOLATILE   BIT(1)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
 
/*
 * Is the object to be mapped as read-only to the GPU
@@ -172,6 +173,12 @@ struct drm_i915_gem_object {
 * List of memory region blocks allocated for this object.
 */
struct list_head blocks;
+   /**
+* Element within memory_region->objects or region->purgeable
+* if the object is marked as DONTNEED. Access is protected by
+* region->obj_lock.
+*/
+   struct list_head region_link;
 
struct sg_table *pages;
void *mapping;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 2e941f093a20..b0ec0959c13f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -18,6 +

[Intel-gfx] [PATCH v3 15/21] drm/i915: do not map aperture if it is not available.

2019-10-04 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

Skip both setup and cleanup of the aperture mapping if the HW doesn't
have an aperture bar.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 32 ++---
 1 file changed, 20 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 44900596fec6..ad123b84880d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2823,7 +2823,9 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
i915_address_space_fini(>vm);
 
arch_phys_wc_del(ggtt->mtrr);
-   io_mapping_fini(>iomap);
+
+   if (ggtt->iomap.size)
+   io_mapping_fini(>iomap);
 }
 
 /**
@@ -3034,10 +3036,13 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err;
 
/* TODO: We're not aware of mappable constraints on gen8 yet */
-   ggtt->gmadr =
-   (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
-pci_resource_len(pdev, 2));
-   ggtt->mappable_end = resource_size(>gmadr);
+   /* FIXME: We probably need to add do device_info or runtime_info */
+   if (!HAS_LMEM(dev_priv)) {
+   ggtt->gmadr =
+   (struct resource) 
DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+pci_resource_len(pdev, 
2));
+   ggtt->mappable_end = resource_size(>gmadr);
+   }
 
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
@@ -3260,14 +3265,17 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
 
-   if (!io_mapping_init_wc(>iomap,
-   ggtt->gmadr.start,
-   ggtt->mappable_end)) {
-   ggtt->vm.cleanup(>vm);
-   return -EIO;
-   }
+   if (ggtt->mappable_end) {
+   if (!io_mapping_init_wc(>iomap,
+   ggtt->gmadr.start,
+   ggtt->mappable_end)) {
+   ggtt->vm.cleanup(>vm);
+   return -EIO;
+   }
 
-   ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+   ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+   }
 
i915_ggtt_init_fences(ggtt);
 
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v3 00/21] LMEM basics

2019-10-04 Thread Matthew Auld
The basic LMEM bits, minus the uAPI, pruning, etc. The goal is to support
basic LMEM object creation within the kernel. From there we can start with the
dumb buffer support, and then the other display related bits.

Quick respin with a bunch of minor tweaks + rebasing on the now merged
struct_mutex removal patches.

Abdiel Janulgue (4):
  drm/i915: Add memory region information to device_info
  drm/i915: setup io-mapping for LMEM
  drm/i915/lmem: support kernel mapping
  drm/i915: enumerate and init each supported region

CQ Tang (1):
  drm/i915/stolen: make the object creation interface consistent

Daniele Ceraolo Spurio (4):
  drm/i915: define i915_ggtt_has_aperture
  drm/i915: do not map aperture if it is not available.
  drm/i915: set num_fence_regs to 0 if there is no aperture
  drm/i915: error capture with no ggtt slot

Matthew Auld (11):
  drm/i915: introduce intel_memory_region
  drm/i915/region: support contiguous allocations
  drm/i915/region: support volatile objects
  drm/i915: support creating LMEM objects
  drm/i915/selftests: add write-dword test for LMEM
  drm/i915/selftests: extend coverage to include LMEM huge-pages
  drm/i915: treat shmem as a region
  drm/i915: treat stolen as a region
  drm/i915: don't allocate the ring in stolen if we lack aperture
  drm/i915/selftests: check for missing aperture
  HAX drm/i915: add the fake lmem region

Michal Wajdeczko (1):
  drm/i915: Don't try to place HWS in non-existing mappable region

 arch/x86/kernel/early-quirks.c|  26 +
 drivers/gpu/drm/i915/Makefile |   4 +
 drivers/gpu/drm/i915/display/intel_display.c  |   2 +-
 drivers/gpu/drm/i915/display/intel_fbdev.c|   4 +-
 drivers/gpu/drm/i915/display/intel_overlay.c  |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_internal.c  |  17 +-
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  |  68 ++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  |  31 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h|  18 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  29 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c |  28 +-
 drivers/gpu/drm/i915/gem/i915_gem_phys.c  |   5 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 168 +
 drivers/gpu/drm/i915/gem/i915_gem_region.h|  29 +
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c |  74 ++-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c|  99 ++-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h|   3 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 218 ++-
 .../i915/gem/selftests/i915_gem_coherency.c   |   5 +-
 .../drm/i915/gem/selftests/i915_gem_mman.c|   6 +
 drivers/gpu/drm/i915/gt/intel_engine_cs.c |   3 +-
 drivers/gpu/drm/i915/gt/intel_gt.c|   2 +-
 drivers/gpu/drm/i915/gt/intel_rc6.c   |   8 +-
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c|   6 +-
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  14 +-
 drivers/gpu/drm/i915/i915_drv.c   |   8 +
 drivers/gpu/drm/i915/i915_drv.h   |  13 +
 drivers/gpu/drm/i915/i915_gem.c   |   9 -
 drivers/gpu/drm/i915/i915_gem_fence_reg.c |   6 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 117 +++-
 drivers/gpu/drm/i915/i915_gem_gtt.h   |   5 +
 drivers/gpu/drm/i915/i915_gpu_error.c |  66 +-
 drivers/gpu/drm/i915/i915_pci.c   |  29 +-
 drivers/gpu/drm/i915/intel_device_info.h  |   2 +
 drivers/gpu/drm/i915/intel_memory_region.c| 212 ++
 drivers/gpu/drm/i915/intel_memory_region.h| 125 
 drivers/gpu/drm/i915/intel_region_lmem.c  | 157 +
 drivers/gpu/drm/i915/intel_region_lmem.h  |  16 +
 drivers/gpu/drm/i915/selftests/i915_gem.c |   4 +
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |   8 +-
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 .../drm/i915/selftests/intel_memory_region.c  | 602 ++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   9 +-
 drivers/gpu/drm/i915/selftests/mock_region.c  |  59 ++
 drivers/gpu/drm/i915/selftests/mock_region.h  |  16 +
 include/drm/i915_drm.h|   3 +
 47 files changed, 2186 insertions(+), 151 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.h
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.c
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.h
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.h

-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org

[Intel-gfx] [PATCH v3 19/21] drm/i915: don't allocate the ring in stolen if we lack aperture

2019-10-04 Thread Matthew Auld
Since we have no way access it from the CPU. For such cases just
fallback to internal objects.

Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 311fdc0a21bc..9ed6f52d178f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1273,7 +1273,9 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt 
*ggtt, int size)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
 
-   obj = i915_gem_object_create_stolen(i915, size);
+   obj = ERR_PTR(-ENODEV);
+   if (i915_ggtt_has_aperture(ggtt))
+   obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v3 01/21] drm/i915/stolen: make the object creation interface consistent

2019-10-04 Thread Matthew Auld
From: CQ Tang 

Our other backends return an actual error value upon failure. Do the
same for stolen objects, which currently just return NULL on failure.

Signed-off-by: CQ Tang 
Signed-off-by: Matthew Auld 
Cc: Chris Wilson 
Reviewed-by: Chris Wilson 
---
 drivers/gpu/drm/i915/display/intel_display.c |  2 +-
 drivers/gpu/drm/i915/display/intel_fbdev.c   |  4 +--
 drivers/gpu/drm/i915/display/intel_overlay.c |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c   | 36 +++-
 drivers/gpu/drm/i915/gt/intel_gt.c   |  2 +-
 drivers/gpu/drm/i915/gt/intel_rc6.c  |  8 ++---
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c   |  2 +-
 7 files changed, 30 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c 
b/drivers/gpu/drm/i915/display/intel_display.c
index 5acc39f32d0c..c15975a410bc 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -3066,7 +3066,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 base_aligned,
 base_aligned,
 size_aligned);
-   if (!obj)
+   if (IS_ERR(obj))
return false;
 
switch (plane_config->tiling) {
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c 
b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 97cde017670a..3d1061470e76 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
 * important and we should probably use that space with FBC or other
 * features. */
-   obj = NULL;
+   obj = ERR_PTR(-ENODEV);
if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
-   if (obj == NULL)
+   if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c 
b/drivers/gpu/drm/i915/display/intel_overlay.c
index daea112cbb87..2360f19f9694 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1291,7 +1291,7 @@ static int get_registers(struct intel_overlay *overlay, 
bool use_phys)
int err;
 
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
-   if (obj == NULL)
+   if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index fad98a921cde..2a34355b738f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -585,28 +585,32 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
int ret;
 
if (!drm_mm_initialized(_priv->mm.stolen))
-   return NULL;
+   return ERR_PTR(-ENODEV);
 
if (size == 0)
-   return NULL;
+   return ERR_PTR(-EINVAL);
 
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
-   return NULL;
+   return ERR_PTR(-ENOMEM);
 
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
-   if (ret) {
-   kfree(stolen);
-   return NULL;
-   }
+   if (ret)
+   goto err_free;
 
obj = _i915_gem_object_create_stolen(dev_priv, stolen);
-   if (obj)
-   return obj;
+   if (obj == NULL) {
+   ret = -ENOMEM;
+   goto err_remove;
+   }
 
+   return obj;
+
+err_remove:
i915_gem_stolen_remove_node(dev_priv, stolen);
+err_free:
kfree(stolen);
-   return NULL;
+   return ERR_PTR(ret);
 }
 
 struct drm_i915_gem_object *
@@ -622,7 +626,7 @@ i915_gem_object_create_stolen_for_preallocated(struct 
drm_i915_private *dev_priv
int ret;
 
if (!drm_mm_initialized(_priv->mm.stolen))
-   return NULL;
+   return ERR_PTR(-ENODEV);
 
DRM_DEBUG_DRIVER("creating preallocated stolen object: 
stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
 _offset, _offset, );
@@ -631,11 +635,11 @@ i915_gem_object_create_stolen_for_preallocated(struct 
drm_i915_private *dev_priv
if (WARN_ON(size == 0) ||
WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
-   return NULL;
+   return ERR_PTR(-EINVAL);
 

[Intel-gfx] [PATCH v3 11/21] drm/i915: enumerate and init each supported region

2019-10-04 Thread Matthew Auld
From: Abdiel Janulgue 

Nothing to enumerate yet...

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/i915_drv.h   |  3 +
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 70 +--
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  6 ++
 3 files changed, 72 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ff8e21e585a3..0233265405b4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2351,6 +2351,9 @@ int __must_check i915_gem_evict_for_node(struct 
i915_address_space *vm,
 unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm);
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915);
+int i915_gem_init_memory_regions(struct drm_i915_private *i915);
+
 /* i915_gem_internal.c */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7b15bb891970..450ec8779252 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2744,6 +2744,66 @@ int i915_init_ggtt(struct drm_i915_private *i915)
return 0;
 }
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915)
+{
+   int i;
+
+   i915_gem_cleanup_stolen(i915);
+
+   for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+   struct intel_memory_region *region = i915->mm.regions[i];
+
+   if (region)
+   intel_memory_region_put(region);
+   }
+}
+
+int i915_gem_init_memory_regions(struct drm_i915_private *i915)
+{
+   int err, i;
+
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   /* XXX: stolen will become a region at some point */
+   err = i915_gem_init_stolen(i915);
+   if (err)
+   return err;
+
+   for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+   struct intel_memory_region *mem = NULL;
+   u32 type;
+
+   if (!HAS_REGION(i915, BIT(i)))
+   continue;
+
+   type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
+   switch (type) {
+   default:
+   break;
+   }
+
+   if (IS_ERR(mem)) {
+   err = PTR_ERR(mem);
+   DRM_ERROR("Failed to setup region(%d) type=%d\n", err, 
type);
+   goto out_cleanup;
+   }
+
+   mem->id = intel_region_map[i];
+   mem->type = type;
+   mem->instance = 
MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
+
+   i915->mm.regions[i] = mem;
+   }
+
+   return 0;
+
+out_cleanup:
+   i915_gem_cleanup_memory_regions(i915);
+   return err;
+}
+
 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 {
struct i915_vma *vma, *vn;
@@ -2781,6 +2841,8 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
 {
struct pagevec *pvec;
 
+   i915_gem_cleanup_memory_regions(i915);
+
fini_aliasing_ppgtt(>ggtt);
 
ggtt_cleanup_hw(>ggtt);
@@ -2790,8 +2852,6 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
-
-   i915_gem_cleanup_stolen(i915);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3240,11 +3300,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
 
-   /*
-* Initialise stolen early so that we may reserve preallocated
-* objects for the BIOS to KMS transition.
-*/
-   ret = i915_gem_init_stolen(dev_priv);
+   ret = i915_gem_init_memory_regions(dev_priv);
if (ret)
goto out_gtt_cleanup;
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c 
b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 8264d38fa690..6d47f0aeac0c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -74,6 +74,8 @@ static void mock_device_release(struct drm_device *dev)
 
i915_gemfs_fini(i915);
 
+   i915_gem_cleanup_memory_regions(i915);
+
drm_mode_config_cleanup(>drm);
 
drm_dev_fini(>drm);
@@ -196,6 +198,10 @@ struct drm_i915_private *mock_gem_device(void)
 
WARN_ON(i915_gemfs_init(i915));
 
+   err = i915_gem_init_memory_regions(i915);
+   if (err)
+   goto err_context;
+
return i915;
 
 err_context:
-- 
2.20.1

___
Intel

[Intel-gfx] [PATCH v3 03/21] drm/i915/region: support contiguous allocations

2019-10-04 Thread Matthew Auld
Some kernel internal objects may need to be allocated as a contiguous
block, also thinking ahead the various kernel io_mapping interfaces seem
to expect it, although this is purely a limitation in the kernel
API...so perhaps something to be improved.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
Cc: Michael J Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_object.h|   6 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   4 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c|  15 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.h|   3 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  71 
 drivers/gpu/drm/i915/intel_memory_region.c|   9 +-
 drivers/gpu/drm/i915/intel_memory_region.h|   3 +-
 .../drm/i915/selftests/intel_memory_region.c  | 165 ++
 drivers/gpu/drm/i915/selftests/mock_region.c  |   2 +-
 9 files changed, 239 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 086a9bf5adcc..dfd16d65630f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -139,6 +139,12 @@ i915_gem_object_is_readonly(const struct 
drm_i915_gem_object *obj)
return obj->base.vma_node.readonly;
 }
 
+static inline bool
+i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
+{
+   return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
+}
+
 static inline bool
 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
 unsigned long flags)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 11390586cfe1..c6a712cf7d7a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -119,6 +119,10 @@ struct drm_i915_gem_object {
 
I915_SELFTEST_DECLARE(struct list_head st_link);
 
+   unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+
/*
 * Is the object to be mapped as read-only to the GPU
 * Only honoured if hardware has relevant pte bit
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 5fc6e4540f82..04cb9f72945e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -23,10 +23,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
 {
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = >mm.blocks;
-   unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
resource_size_t size = obj->base.size;
resource_size_t prev_end;
struct i915_buddy_block *block;
+   unsigned int flags;
struct sg_table *st;
struct scatterlist *sg;
unsigned int sg_page_sizes;
@@ -42,6 +42,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
return -ENOMEM;
}
 
+   flags = I915_ALLOC_MIN_PAGE_SIZE;
+   if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+   flags |= I915_ALLOC_CONTIGUOUS;
+
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
if (ret)
goto err_free_sg;
@@ -56,7 +60,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
list_for_each_entry(block, blocks, link) {
u64 block_size, offset;
 
-   block_size = i915_buddy_block_size(>mm, block);
+   block_size = min_t(u64, size,
+  i915_buddy_block_size(>mm, block));
offset = i915_buddy_block_offset(block);
 
GEM_BUG_ON(overflows_type(block_size, sg->length));
@@ -98,10 +103,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
 }
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-   struct intel_memory_region *mem)
+   struct intel_memory_region *mem,
+   unsigned long flags)
 {
INIT_LIST_HEAD(>mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
+   obj->flags = flags;
 }
 
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
@@ -116,6 +123,8 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
 {
struct drm_i915_gem_object *obj;
 
+   GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+
if (!mem)
return ERR_PTR(-ENODEV);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h 
b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index ebddc86d78f7..f2ff6f8bff74 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -17,7 +17,8 @@ void i915_gem_object_

[Intel-gfx] [PATCH v3 06/21] drm/i915: support creating LMEM objects

2019-10-04 Thread Matthew Auld
We currently define LMEM, or local memory, as just another memory
region, like system memory or stolen, which we can expose to userspace
and can be mapped to the CPU via some BAR.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/Makefile |  2 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  | 29 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  | 23 ++
 drivers/gpu/drm/i915/i915_drv.h   |  5 +++
 drivers/gpu/drm/i915/intel_memory_region.c| 10 +
 drivers/gpu/drm/i915/intel_memory_region.h| 30 +
 drivers/gpu/drm/i915/intel_region_lmem.c  | 43 +++
 drivers/gpu/drm/i915/intel_region_lmem.h  | 11 +
 .../drm/i915/selftests/i915_live_selftests.h  |  1 +
 .../drm/i915/selftests/intel_memory_region.c  | 40 +
 10 files changed, 194 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.h
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.c
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6f0817bcc688..3154874e32b0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -117,6 +117,7 @@ gem-y += \
gem/i915_gem_internal.o \
gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \
+   gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
@@ -145,6 +146,7 @@ i915-y += \
  i915_scheduler.o \
  i915_trace_points.o \
  i915_vma.o \
+ intel_region_lmem.o \
  intel_wopcm.o
 
 # general-purpose microcontroller (GuC) support
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index ..aadd5d5a69da
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+   .get_pages = i915_gem_object_get_pages_buddy,
+   .put_pages = i915_gem_object_put_pages_buddy,
+   .release = i915_gem_object_release_memory_region,
+};
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+   return obj->ops == _gem_lmem_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+   resource_size_t size,
+   unsigned int flags)
+{
+   return 
i915_gem_object_create_region(i915->mm.regions[INTEL_MEMORY_LMEM],
+size, flags);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index ..ebc15fe24f58
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include 
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+   resource_size_t size,
+   unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b7be46faef2d..ff8e21e585a3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -100,6 +100,8 @@
 #include "i915_vma.h"
 #include "i915_irq.h"
 
+#include "intel_region_lmem.h"
+
 #include "intel_gvt.h"
 
 /* General customization:
@@ -679,6 +681,8 @@ struct i915_gem_mm {
 */
struct vfsmount *gemfs;
 
+   struct intel_memory_region *regions[INTEL_MEMORY_UNKNOWN];
+
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -2130,6 +2134,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
 
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c 
b/drivers/gpu/drm/i915/intel_memory_region.c
index fe808899cbf8..920f9590f410 100644
--- a/drivers/gpu/drm/i91

[Intel-gfx] [PATCH v3 16/21] drm/i915: set num_fence_regs to 0 if there is no aperture

2019-10-04 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

We can't fence anything without aperture.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Stuart Summers 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_fence_reg.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c 
b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 487b7261f7ed..911aab45fce3 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -831,8 +831,10 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
 
detect_bit_6_swizzle(i915);
 
-   if (INTEL_GEN(i915) >= 7 &&
-   !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+   if (!i915_ggtt_has_aperture(ggtt))
+   num_fences = 0;
+   else if (INTEL_GEN(i915) >= 7 &&
+!(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
num_fences = 32;
else if (INTEL_GEN(i915) >= 4 ||
 IS_I945G(i915) || IS_I945GM(i915) ||
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v3 08/21] drm/i915/lmem: support kernel mapping

2019-10-04 Thread Matthew Auld
From: Abdiel Janulgue 

We can create LMEM objects, but we also need to support mapping them
into kernel space for internal use.

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Signed-off-by: Steve Hampson 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  |  36 ++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  |   8 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c |  22 +++-
 .../drm/i915/selftests/intel_memory_region.c  | 115 ++
 5 files changed, 182 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index aadd5d5a69da..fd8b63dd154d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -9,11 +9,47 @@
 #include "i915_drv.h"
 
 const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+   .flags = I915_GEM_OBJECT_HAS_IOMEM,
+
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,
 };
 
+/* XXX: Time to vfunc your life up? */
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+  unsigned long n)
+{
+   resource_size_t offset;
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_wc(>mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct 
drm_i915_gem_object *obj,
+ unsigned long n)
+{
+   resource_size_t offset;
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_atomic_wc(>mm.region->iomap, offset);
+}
+
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+   resource_size_t offset;
+
+   GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_wc(>mm.region->iomap, offset, size);
+}
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 {
return obj->ops == _gem_lmem_obj_ops;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index ebc15fe24f58..31a6462bdbb6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -13,6 +13,14 @@ struct drm_i915_gem_object;
 
 extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
 
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+  unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+   unsigned long n);
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index a387e3ee728b..96008374a412 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -31,10 +31,11 @@ struct i915_lut_handle {
 struct drm_i915_gem_object_ops {
unsigned int flags;
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGEBIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY   BIT(2)
-#define I915_GEM_OBJECT_NO_GGTTBIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM  BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY   BIT(3)
+#define I915_GEM_OBJECT_NO_GGTTBIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(5)
 
/* Interface between the GEM object and its backing storage.
 * get_pages() is called once prior to the use of the associated set
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b0ec0959c13f..cf7f5a3cb210 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 struct sg_table *pages,
@@ -172,7 +173,9 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object 
*obj)
void *ptr;
 
ptr = p

[Intel-gfx] [PATCH v3 02/21] drm/i915: introduce intel_memory_region

2019-10-04 Thread Matthew Auld
Support memory regions, as defined by a given (start, end), and allow
creating GEM objects which are backed by said region. The immediate goal
here is to have something to represent our device memory, but later on
we also want to represent every memory domain with a region, so stolen,
shmem, and of course device. At some point we are probably going to want
use a common struct here, such that we are better aligned with say TTM.

Signed-off-by: Matthew Auld 
Signed-off-by: Abdiel Janulgue 
Signed-off-by: Niranjana Vishwanathapura 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/Makefile |   2 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 134 
 drivers/gpu/drm/i915/gem/i915_gem_region.h|  28 +++
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  78 +++
 drivers/gpu/drm/i915/i915_drv.h   |   1 +
 drivers/gpu/drm/i915/intel_memory_region.c| 191 ++
 drivers/gpu/drm/i915/intel_memory_region.h|  83 
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 .../drm/i915/selftests/intel_memory_region.c  | 115 +++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   1 +
 drivers/gpu/drm/i915/selftests/mock_region.c  |  59 ++
 drivers/gpu/drm/i915/selftests/mock_region.h  |  16 ++
 13 files changed, 718 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.h
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 06e1876d0250..6f0817bcc688 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -50,6 +50,7 @@ i915-y += i915_drv.o \
  i915_utils.o \
  intel_csr.o \
  intel_device_info.o \
+ intel_memory_region.o \
  intel_pch.o \
  intel_pm.o \
  intel_runtime_pm.o \
@@ -120,6 +121,7 @@ gem-y += \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
+   gem/i915_gem_region.o \
gem/i915_gem_shmem.o \
gem/i915_gem_shrinker.o \
gem/i915_gem_stolen.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index c00b4f077f9e..11390586cfe1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -160,6 +160,15 @@ struct drm_i915_gem_object {
atomic_t pages_pin_count;
atomic_t shrink_pin;
 
+   /**
+* Memory region for this object.
+*/
+   struct intel_memory_region *region;
+   /**
+* List of memory region blocks allocated for this object.
+*/
+   struct list_head blocks;
+
struct sg_table *pages;
void *mapping;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
new file mode 100644
index ..5fc6e4540f82
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_gem_region.h"
+#include "i915_drv.h"
+
+void
+i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+   struct sg_table *pages)
+{
+   __intel_memory_region_put_pages_buddy(obj->mm.region, >mm.blocks);
+
+   obj->mm.dirty = false;
+   sg_free_table(pages);
+   kfree(pages);
+}
+
+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+   struct intel_memory_region *mem = obj->mm.region;
+   struct list_head *blocks = >mm.blocks;
+   unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
+   resource_size_t size = obj->base.size;
+   resource_size_t prev_end;
+   struct i915_buddy_block *block;
+   struct sg_table *st;
+   struct scatterlist *sg;
+   unsigned int sg_page_sizes;
+   unsigned long i;
+   int ret;
+
+   st = kmalloc(sizeof(*st), GFP_KERNEL);
+   if (!st)
+   return -ENOMEM;
+
+   if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+   kfree(st);
+   return -ENOMEM;
+   }
+
+   ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+   if (ret)
+   goto err_free_sg;
+
+   GEM_BUG_ON(list_empty(blocks));
+
+   sg = st->sgl;
+  

[Intel-gfx] [PATCH v3 20/21] drm/i915/selftests: check for missing aperture

2019-10-04 Thread Matthew Auld
We may be missing support for the mappable aperture on some platforms.

Signed-off-by: Matthew Auld 
Cc: Daniele Ceraolo Spurio 
---
 .../drm/i915/gem/selftests/i915_gem_coherency.c|  5 -
 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c |  6 ++
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c   | 14 ++
 drivers/gpu/drm/i915/selftests/i915_gem.c  |  4 
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c  |  3 +++
 5 files changed, 27 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 549810f70aeb..e82255526782 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -259,7 +259,10 @@ static bool always_valid(struct drm_i915_private *i915)
 
 static bool needs_fence_registers(struct drm_i915_private *i915)
 {
-   return !intel_gt_is_wedged(>gt);
+   if (intel_gt_is_wedged(>gt))
+   return false;
+
+   return i915->ggtt.num_fences;
 }
 
 static bool needs_mi_store_dword(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index cfa52c525691..468603fa6218 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -301,6 +301,9 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
 
+   if (!i915_ggtt_has_aperture(>ggtt))
+   return 0;
+
/* We want to check the page mapping and fencing of a large object
 * mmapped through the GTT. The object we create is larger than can
 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
@@ -431,6 +434,9 @@ static int igt_smoke_tiling(void *arg)
IGT_TIMEOUT(end);
int err;
 
+   if (!i915_ggtt_has_aperture(>ggtt))
+   return 0;
+
/*
 * igt_partial_tiling() does an exhastive check of partial tiling
 * chunking, but will undoubtably run out of time. Here, we do a
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c 
b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index e8a40df79bd0..c7896d4bf7ae 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1149,8 +1149,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
struct i915_request *rq;
struct evict_vma arg;
struct hang h;
+   unsigned int pin_flags;
int err;
 
+   if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
+   return 0;
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
 
@@ -1186,10 +1190,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
goto out_obj;
}
 
-   err = i915_vma_pin(arg.vma, 0, 0,
-  i915_vma_is_ggtt(arg.vma) ?
-  PIN_GLOBAL | PIN_MAPPABLE :
-  PIN_USER);
+   pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
+
+   if (flags & EXEC_OBJECT_NEEDS_FENCE)
+   pin_flags |= PIN_MAPPABLE;
+
+   err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
if (err) {
i915_request_add(rq);
goto out_obj;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c 
b/drivers/gpu/drm/i915/selftests/i915_gem.c
index bfa40a5b6d98..6f80a92c967b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -42,6 +42,10 @@ static void trash_stolen(struct drm_i915_private *i915)
unsigned long page;
u32 prng = 0x12345678;
 
+   /* XXX: fsck. needs some more thought... */
+   if (!i915_ggtt_has_aperture(ggtt))
+   return;
+
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.start + page;
u32 __iomem *s;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index ebe735df6504..4037e2cdca43 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1148,6 +1148,9 @@ static int igt_ggtt_page(void *arg)
unsigned int *order, n;
int err;
 
+   if (!i915_ggtt_has_aperture(ggtt))
+   return 0;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v3 09/21] drm/i915/selftests: add write-dword test for LMEM

2019-10-04 Thread Matthew Auld
Simple test writing to dwords across an object, using various engines in
a randomized order, checking that our writes land from the cpu.

Signed-off-by: Matthew Auld 
---
 .../drm/i915/selftests/intel_memory_region.c  | 167 ++
 1 file changed, 167 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c 
b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 48244d885521..76c0ab0aa78c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -7,13 +7,16 @@
 
 #include "../i915_selftest.h"
 
+
 #include "mock_drm.h"
 #include "mock_gem_device.h"
 #include "mock_region.h"
 
+#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
 #include "selftests/igt_flush_test.h"
@@ -254,6 +257,125 @@ static int igt_mock_contiguous(void *arg)
return err;
 }
 
+static int igt_gpu_write_dw(struct intel_context *ce,
+   struct i915_vma *vma,
+   u32 dword,
+   u32 value)
+{
+   return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+  vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+   unsigned long n;
+   int err;
+
+   i915_gem_object_lock(obj);
+   err = i915_gem_object_set_to_wc_domain(obj, false);
+   i915_gem_object_unlock(obj);
+   if (err)
+   return err;
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   return err;
+
+   for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+   u32 __iomem *base;
+   u32 read_val;
+
+   base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+   read_val = ioread32(base + dword);
+   io_mapping_unmap_atomic(base);
+   if (read_val != val) {
+   pr_err("n=%lu base[%u]=%u, val=%u\n",
+  n, dword, read_val, val);
+   err = -EINVAL;
+   break;
+   }
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+struct drm_i915_gem_object *obj)
+{
+   struct drm_i915_private *i915 = ctx->i915;
+   struct i915_address_space *vm = ctx->vm ?: >ggtt.vm;
+   struct i915_gem_engines *engines;
+   struct i915_gem_engines_iter it;
+   struct intel_context *ce;
+   I915_RND_STATE(prng);
+   IGT_TIMEOUT(end_time);
+   unsigned int count;
+   struct i915_vma *vma;
+   int *order;
+   int i, n;
+   int err = 0;
+
+   GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+   n = 0;
+   count = 0;
+   for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+   count++;
+   if (!intel_engine_can_store_dword(ce->engine))
+   continue;
+
+   n++;
+   }
+   i915_gem_context_unlock_engines(ctx);
+   if (!n)
+   return 0;
+
+   order = i915_random_order(count * count, );
+   if (!order)
+   return -ENOMEM;
+
+   vma = i915_vma_instance(obj, vm, NULL);
+   if (IS_ERR(vma)) {
+   err = PTR_ERR(vma);
+   goto out_free;
+   }
+
+   err = i915_vma_pin(vma, 0, 0, PIN_USER);
+   if (err)
+   goto out_free;
+
+   i = 0;
+   engines = i915_gem_context_lock_engines(ctx);
+   do {
+   u32 rng = prandom_u32_state();
+   u32 dword = offset_in_page(rng) / 4;
+
+   ce = engines->engines[order[i] % engines->num_engines];
+   i = (i + 1) % (count * count);
+   if (!ce || !intel_engine_can_store_dword(ce->engine))
+   continue;
+
+   err = igt_gpu_write_dw(ce, vma, dword, rng);
+   if (err)
+   break;
+
+   err = igt_cpu_check(obj, dword, rng);
+   if (err)
+   break;
+   } while (!__igt_timeout(end_time, NULL));
+   i915_gem_context_unlock_engines(ctx);
+
+out_free:
+   kfree(order);
+
+   if (err == -ENOMEM)
+   err = 0;
+
+   return err;
+}
+
 static int igt_lmem_create(void *arg)
 {
struct drm_i915_private *i915 = arg;
@@ -275,6 +397,50 @@ static int igt_lmem_create(void *arg)
return err;
 }
 
+static int igt_lmem_write_gpu(void *arg)
+{
+   struct drm_i915_private *i915 = arg;

[Intel-gfx] [PATCH v3 10/21] drm/i915/selftests: extend coverage to include LMEM huge-pages

2019-10-04 Thread Matthew Auld
Signed-off-by: Matthew Auld 
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 121 +-
 1 file changed, 120 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index f27772f6779a..d4892769b739 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -9,6 +9,7 @@
 #include "i915_selftest.h"
 
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_pm.h"
 
 #include "gt/intel_gt.h"
@@ -981,7 +982,7 @@ static int gpu_write(struct intel_context *ce,
   vma->size >> PAGE_SHIFT, val);
 }
 
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
 {
unsigned int needs_flush;
unsigned long n;
@@ -1013,6 +1014,51 @@ static int cpu_check(struct drm_i915_gem_object *obj, 
u32 dword, u32 val)
return err;
 }
 
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
+{
+   unsigned long n;
+   int err;
+
+   i915_gem_object_lock(obj);
+   err = i915_gem_object_set_to_wc_domain(obj, false);
+   i915_gem_object_unlock(obj);
+   if (err)
+   return err;
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   return err;
+
+   for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+   u32 __iomem *base;
+   u32 read_val;
+
+   base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+   read_val = ioread32(base + dword);
+   io_mapping_unmap_atomic(base);
+   if (read_val != val) {
+   pr_err("n=%lu base[%u]=%u, val=%u\n",
+  n, dword, read_val, val);
+   err = -EINVAL;
+   break;
+   }
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+   if (i915_gem_object_has_struct_page(obj))
+   return __cpu_check_shmem(obj, dword, val);
+   else if (i915_gem_object_is_lmem(obj))
+   return __cpu_check_lmem(obj, dword, val);
+
+   return -ENODEV;
+}
+
 static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
@@ -1397,6 +1443,78 @@ static int igt_ppgtt_gemfs_huge(void *arg)
return err;
 }
 
+static int igt_ppgtt_lmem_huge(void *arg)
+{
+   struct i915_gem_context *ctx = arg;
+   struct drm_i915_private *i915 = ctx->i915;
+   struct drm_i915_gem_object *obj;
+   static const unsigned int sizes[] = {
+   SZ_64K,
+   SZ_512K,
+   SZ_1M,
+   SZ_2M,
+   };
+   int i;
+   int err;
+
+   if (!HAS_LMEM(i915)) {
+   pr_info("device lacks LMEM support, skipping\n");
+   return 0;
+   }
+
+   /*
+* Sanity check that the HW uses huge pages correctly through LMEM
+* -- ensure that our writes land in the right place.
+*/
+
+   for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+   unsigned int size = sizes[i];
+
+   obj = i915_gem_object_create_lmem(i915, size, 
I915_BO_ALLOC_CONTIGUOUS);
+   if (IS_ERR(obj)) {
+   err = PTR_ERR(obj);
+   if (err == -E2BIG) {
+   pr_info("object too big for region!\n");
+   return 0;
+   }
+
+   return err;
+   }
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   goto out_put;
+
+   if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+   pr_info("LMEM unable to allocate huge-page(s) with 
size=%u\n",
+   size);
+   goto out_unpin;
+   }
+
+   err = igt_write_huge(ctx, obj);
+   if (err) {
+   pr_err("LMEM write-huge failed with size=%u\n", size);
+   goto out_unpin;
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+   i915_gem_object_put(obj);
+   }
+
+   return 0;
+
+out_unpin:
+   i915_gem_object_unpin_pages(obj);
+out_put:
+   i915_gem_object_put(obj);
+
+   if (err == -ENOMEM)
+   err = 0;
+
+   return err;
+}
+
 static int igt_ppgtt_pin_update(void *arg)
 {
 

[Intel-gfx] [PATCH v3 05/21] drm/i915: Add memory region information to device_info

2019-10-04 Thread Matthew Auld
From: Abdiel Janulgue 

Exposes available regions for the platform. Shared memory will
always be available.

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_drv.h  | 2 ++
 drivers/gpu/drm/i915/intel_device_info.h | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index dcbc6e6394fc..b7be46faef2d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2129,6 +2129,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
 /* Having GuC is not the same as using GuC */
diff --git a/drivers/gpu/drm/i915/intel_device_info.h 
b/drivers/gpu/drm/i915/intel_device_info.h
index 0cdc2465534b..e9940f932d26 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -160,6 +160,8 @@ struct intel_device_info {
 
unsigned int page_sizes; /* page sizes supported by the HW */
 
+   u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
 
u8 pipe_mask;
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v3 07/21] drm/i915: setup io-mapping for LMEM

2019-10-04 Thread Matthew Auld
From: Abdiel Janulgue 

Signed-off-by: Abdiel Janulgue 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/intel_region_lmem.c | 28 ++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c 
b/drivers/gpu/drm/i915/intel_region_lmem.c
index 7a3f96e1f766..051069664074 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -36,8 +36,32 @@ lmem_create_object(struct intel_memory_region *mem,
return obj;
 }
 
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+   io_mapping_fini(>iomap);
+   intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+   int ret;
+
+   if (!io_mapping_init_wc(>iomap,
+   mem->io_start,
+   resource_size(>region)))
+   return -EIO;
+
+   ret = intel_memory_region_init_buddy(mem);
+   if (ret)
+   io_mapping_fini(>iomap);
+
+   return ret;
+}
+
 const struct intel_memory_region_ops intel_region_lmem_ops = {
-   .init = intel_memory_region_init_buddy,
-   .release = intel_memory_region_release_buddy,
+   .init = region_lmem_init,
+   .release = region_lmem_release,
.create_object = lmem_create_object,
 };
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 19/22] drm/i915: don't allocate the ring in stolen if we lack aperture

2019-10-03 Thread Matthew Auld
Since we have no way access it from the CPU. For such cases just
fallback to internal objects.

Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index e220c09c6f32..c48f1d20af5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1273,7 +1273,9 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt 
*ggtt, int size)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
 
-   obj = i915_gem_object_create_stolen(i915, size);
+   obj = ERR_PTR(-ENODEV);
+   if (HAS_MAPPABLE_APERTURE(i915))
+   obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 20/22] drm/i915/selftests: fallback to using the gpu to trash stolen

2019-10-03 Thread Matthew Auld
If we lack a mappable aperture, opt for nuking stolen memory with the
blitter engine.

Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/selftests/i915_gem.c | 95 +++
 1 file changed, 80 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c 
b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 37593831b539..c4d7599af4f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -6,6 +6,8 @@
 
 #include 
 
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
 #include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
@@ -34,14 +36,25 @@ static int switch_to_context(struct drm_i915_private *i915,
return 0;
 }
 
-static void trash_stolen(struct drm_i915_private *i915)
+static void trash_stolen_cpu(struct drm_i915_private *i915)
 {
struct i915_ggtt *ggtt = >ggtt;
const u64 slot = ggtt->error_capture.start;
const resource_size_t size = resource_size(>dsm);
+   intel_wakeref_t wakeref;
unsigned long page;
u32 prng = 0x12345678;
 
+   /*
+* As a final sting in the tail, invalidate stolen. Under a real S4,
+* stolen is lost and needs to be refilled on resume. However, under
+* CI we merely do S4-device testing (as full S4 is too unreliable
+* for automated testing across a cluster), so to simulate the effect
+* of stolen being trashed across S4, we trash it ourselves.
+*/
+
+   wakeref = intel_runtime_pm_get(>runtime_pm);
+
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.start + page;
u32 __iomem *s;
@@ -58,24 +71,53 @@ static void trash_stolen(struct drm_i915_private *i915)
}
 
ggtt->vm.clear_range(>vm, slot, PAGE_SIZE);
+
+   intel_runtime_pm_put(>runtime_pm, wakeref);
 }
 
-static void simulate_hibernate(struct drm_i915_private *i915)
+static int trash_stolen_gpu(struct i915_gem_context *ctx)
 {
-   intel_wakeref_t wakeref;
+   struct drm_i915_private *i915 = ctx->vm->i915;
+   const resource_size_t size = resource_size(>dsm);
+   struct intel_memory_region *clone;
+   struct drm_i915_gem_object *obj;
+   struct intel_context *ce;
+   u32 prng = 0x12345678;
+   int err;
 
-   wakeref = intel_runtime_pm_get(>runtime_pm);
+   if (!HAS_ENGINE(i915, BCS0))
+   return 0;
 
-   /*
-* As a final sting in the tail, invalidate stolen. Under a real S4,
-* stolen is lost and needs to be refilled on resume. However, under
-* CI we merely do S4-device testing (as full S4 is too unreliable
-* for automated testing across a cluster), so to simulate the effect
-* of stolen being trashed across S4, we trash it ourselves.
-*/
-   trash_stolen(i915);
+   if (!size)
+   return 0;
 
-   intel_runtime_pm_put(>runtime_pm, wakeref);
+   clone = mock_region_create(i915, i915->dsm.start, size, PAGE_SIZE, 0);
+   if (IS_ERR(clone))
+   return PTR_ERR(clone);
+
+   obj = i915_gem_object_create_region(clone, size, 0);
+   if (IS_ERR(obj)) {
+   err = PTR_ERR(obj);
+   goto out_region;
+   }
+
+   ce = i915_gem_context_get_engine(ctx, BCS0);
+   if (IS_ERR(ce)) {
+   err = PTR_ERR(ce);
+   goto out_put;
+   }
+
+   mutex_lock(>drm.struct_mutex);
+   err = i915_gem_object_fill_blt(obj, ce, prng);
+   mutex_unlock(>drm.struct_mutex);
+
+   intel_context_put(ce);
+out_put:
+   i915_gem_object_put(obj);
+out_region:
+   intel_memory_region_put(clone);
+
+   return err;
 }
 
 static int pm_prepare(struct drm_i915_private *i915)
@@ -148,6 +190,21 @@ static int igt_gem_suspend(void *arg)
if (err)
goto out;
 
+   /*
+* If we lack the mappable aperture we can't really access stolen from
+* the cpu, but we can always trash it from the gpu, we just need to do
+* so early, before we start suspending stuff. We shouldn't see any
+* hangs doing this so early, since things like ring state won't be
+* allocated in stolen if we can't access it from the cpu. Although if
+* that's the case maybe there is not much point in bothering with this
+* anyway...
+*/
+   if (!HAS_MAPPABLE_APERTURE(i915)) {
+   err = trash_stolen_gpu(ctx);
+   if (err)
+   goto out;
+   }
+
err = pm_prepare(i915);
if (err)
goto out;
@@ -155,7 +212,8 @@ static int igt_gem_suspend(void *arg)
pm_suspend(i915);
 
/* Here be dragons! Note that with S3RST any S3 may become S4! */
-   sim

[Intel-gfx] [PATCH v2 21/22] drm/i915/selftests: check for missing aperture

2019-10-03 Thread Matthew Auld
We may be missing support for the mappable aperture on some platforms.

Signed-off-by: Matthew Auld 
Cc: Daniele Ceraolo Spurio 
---
 .../drm/i915/gem/selftests/i915_gem_coherency.c|  5 -
 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c |  6 ++
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c   | 14 ++
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c  |  3 +++
 4 files changed, 23 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 0ff7a89aadca..07faeada86eb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -246,7 +246,10 @@ static bool always_valid(struct drm_i915_private *i915)
 
 static bool needs_fence_registers(struct drm_i915_private *i915)
 {
-   return !intel_gt_is_wedged(>gt);
+   if (intel_gt_is_wedged(>gt))
+   return false;
+
+   return i915->ggtt.num_fences;
 }
 
 static bool needs_mi_store_dword(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index aefe557527f8..cb880d73ef73 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -301,6 +301,9 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
 
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   return 0;
+
/* We want to check the page mapping and fencing of a large object
 * mmapped through the GTT. The object we create is larger than can
 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
@@ -433,6 +436,9 @@ static int igt_smoke_tiling(void *arg)
IGT_TIMEOUT(end);
int err;
 
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   return 0;
+
/*
 * igt_partial_tiling() does an exhastive check of partial tiling
 * chunking, but will undoubtably run out of time. Here, we do a
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c 
b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 9c0c8441c22a..0378e59c803e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1189,8 +1189,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
struct i915_request *rq;
struct evict_vma arg;
struct hang h;
+   unsigned int pin_flags;
int err;
 
+   if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
+   return 0;
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
 
@@ -1227,10 +1231,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
goto out_obj;
}
 
-   err = i915_vma_pin(arg.vma, 0, 0,
-  i915_vma_is_ggtt(arg.vma) ?
-  PIN_GLOBAL | PIN_MAPPABLE :
-  PIN_USER);
+   pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
+
+   if (flags & EXEC_OBJECT_NEEDS_FENCE)
+   pin_flags |= PIN_MAPPABLE;
+
+   err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
if (err) {
i915_request_add(rq);
goto out_obj;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 3a84d1083289..6a9cb3bb5962 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1152,6 +1152,9 @@ static int igt_ggtt_page(void *arg)
unsigned int *order, n;
int err;
 
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   return 0;
+
mutex_lock(>drm.struct_mutex);
 
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 08/22] drm/i915/lmem: support kernel mapping

2019-10-03 Thread Matthew Auld
From: Abdiel Janulgue 

We can create LMEM objects, but we also need to support mapping them
into kernel space for internal use.

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Signed-off-by: Steve Hampson 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  |  36 ++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  |   8 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c |  22 +++-
 .../drm/i915/selftests/intel_memory_region.c  | 118 ++
 5 files changed, 185 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 26a23304df32..1a045858b3b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -9,11 +9,47 @@
 #include "i915_drv.h"
 
 const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+   .flags = I915_GEM_OBJECT_HAS_IOMEM,
+
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,
 };
 
+/* XXX: Time to vfunc your life up? */
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+  unsigned long n)
+{
+   resource_size_t offset;
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_wc(>mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct 
drm_i915_gem_object *obj,
+ unsigned long n)
+{
+   resource_size_t offset;
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_atomic_wc(>mm.region->iomap, offset);
+}
+
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+   resource_size_t offset;
+
+   GEM_BUG_ON(!(obj->flags & I915_BO_ALLOC_CONTIGUOUS));
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_wc(>mm.region->iomap, offset, size);
+}
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 {
struct intel_memory_region *region = obj->mm.region;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index ebc15fe24f58..31a6462bdbb6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -13,6 +13,14 @@ struct drm_i915_gem_object;
 
 extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
 
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+  unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+   unsigned long n);
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 9a8579b67357..b75a14a61d24 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -30,10 +30,11 @@ struct i915_lut_handle {
 struct drm_i915_gem_object_ops {
unsigned int flags;
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGEBIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY   BIT(2)
-#define I915_GEM_OBJECT_NO_GGTTBIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM  BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY   BIT(3)
+#define I915_GEM_OBJECT_NO_GGTTBIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(5)
 
/* Interface between the GEM object and its backing storage.
 * get_pages() is called once prior to the use of the associated set
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b0ec0959c13f..cf7f5a3cb210 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 struct sg_table *pages,
@@ -172,7 +173,9 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object 
*obj)
   

[Intel-gfx] [PATCH v2 10/22] drm/i915/selftests: extend coverage to include LMEM huge-pages

2019-10-03 Thread Matthew Auld
Signed-off-by: Matthew Auld 
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 121 +-
 1 file changed, 120 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 1772d4cbf3d2..85de3a6fd7a8 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -9,6 +9,7 @@
 #include "i915_selftest.h"
 
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_pm.h"
 
 #include "gt/intel_gt.h"
@@ -971,7 +972,7 @@ static int gpu_write(struct intel_context *ce,
   vma->size >> PAGE_SHIFT, val);
 }
 
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
 {
unsigned int needs_flush;
unsigned long n;
@@ -1003,6 +1004,51 @@ static int cpu_check(struct drm_i915_gem_object *obj, 
u32 dword, u32 val)
return err;
 }
 
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
+{
+   unsigned long n;
+   int err;
+
+   i915_gem_object_lock(obj);
+   err = i915_gem_object_set_to_wc_domain(obj, false);
+   i915_gem_object_unlock(obj);
+   if (err)
+   return err;
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   return err;
+
+   for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+   u32 __iomem *base;
+   u32 read_val;
+
+   base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+   read_val = ioread32(base + dword);
+   io_mapping_unmap_atomic(base);
+   if (read_val != val) {
+   pr_err("n=%lu base[%u]=%u, val=%u\n",
+  n, dword, read_val, val);
+   err = -EINVAL;
+   break;
+   }
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+   if (i915_gem_object_has_struct_page(obj))
+   return __cpu_check_shmem(obj, dword, val);
+   else if (i915_gem_object_is_lmem(obj))
+   return __cpu_check_lmem(obj, dword, val);
+
+   return -ENODEV;
+}
+
 static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
@@ -1387,6 +1433,78 @@ static int igt_ppgtt_gemfs_huge(void *arg)
return err;
 }
 
+static int igt_ppgtt_lmem_huge(void *arg)
+{
+   struct i915_gem_context *ctx = arg;
+   struct drm_i915_private *i915 = ctx->i915;
+   struct drm_i915_gem_object *obj;
+   static const unsigned int sizes[] = {
+   SZ_64K,
+   SZ_512K,
+   SZ_1M,
+   SZ_2M,
+   };
+   int i;
+   int err;
+
+   if (!HAS_LMEM(i915)) {
+   pr_info("device lacks LMEM support, skipping\n");
+   return 0;
+   }
+
+   /*
+* Sanity check that the HW uses huge pages correctly through LMEM
+* -- ensure that our writes land in the right place.
+*/
+
+   for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+   unsigned int size = sizes[i];
+
+   obj = i915_gem_object_create_lmem(i915, size, 
I915_BO_ALLOC_CONTIGUOUS);
+   if (IS_ERR(obj)) {
+   err = PTR_ERR(obj);
+   if (err == -E2BIG) {
+   pr_info("object too big for region!\n");
+   return 0;
+   }
+
+   return err;
+   }
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   goto out_put;
+
+   if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+   pr_info("LMEM unable to allocate huge-page(s) with 
size=%u\n",
+   size);
+   goto out_unpin;
+   }
+
+   err = igt_write_huge(ctx, obj);
+   if (err) {
+   pr_err("LMEM write-huge failed with size=%u\n", size);
+   goto out_unpin;
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+   i915_gem_object_put(obj);
+   }
+
+   return 0;
+
+out_unpin:
+   i915_gem_object_unpin_pages(obj);
+out_put:
+   i915_gem_object_put(obj);
+
+   if (err == -ENOMEM)
+   err = 0;
+
+   return err;
+}
+
 static int igt_ppgtt_pin_update(void *arg)
 {
 

[Intel-gfx] [PATCH v2 22/22] HAX drm/i915: add the fake lmem region

2019-10-03 Thread Matthew Auld
Intended for upstream testing so that we can still exercise the LMEM
plumbing and !HAS_MAPPABLE_APERTURE paths. Smoke tested on Skull Canyon
device. This works by allocating an intel_memory_region for a reserved
portion of system memory, which we treat like LMEM. For the LMEMBAR we
steal the aperture and 1:1 it map to the stolen region.

To enable simply set i915_fake_lmem_start= on the kernel cmdline with the
start of reserved region(see memmap=). The size of the region we can
use is determined by the size of the mappable aperture, so the size of
reserved region should be >= mappable_end.

eg. memmap=2G$16G i915_fake_lmem_start=0x4

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 arch/x86/kernel/early-quirks.c | 26 +++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c   |  3 +
 drivers/gpu/drm/i915/i915_drv.c|  8 ++
 drivers/gpu/drm/i915/i915_gem_gtt.c|  3 +
 drivers/gpu/drm/i915/intel_memory_region.h |  6 ++
 drivers/gpu/drm/i915/intel_region_lmem.c   | 90 ++
 drivers/gpu/drm/i915/intel_region_lmem.h   |  5 ++
 include/drm/i915_drm.h |  3 +
 8 files changed, 144 insertions(+)

diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6f6b1d04dadf..9b04655e3926 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -603,6 +603,32 @@ static void __init intel_graphics_quirks(int num, int 
slot, int func)
}
 }
 
+struct resource intel_graphics_fake_lmem_res __ro_after_init = 
DEFINE_RES_MEM(0, 0);
+EXPORT_SYMBOL(intel_graphics_fake_lmem_res);
+
+static int __init early_i915_fake_lmem_init(char *s)
+{
+   u64 start;
+   int ret;
+
+   if (*s == '=')
+   s++;
+
+   ret = kstrtoull(s, 16, );
+   if (ret)
+   return ret;
+
+   intel_graphics_fake_lmem_res.start = start;
+   intel_graphics_fake_lmem_res.end = SZ_2G; /* Placeholder; depends on 
aperture size */
+
+   printk(KERN_INFO "Intel graphics fake LMEM starts at %pa\n",
+  _graphics_fake_lmem_res.start);
+
+   return 0;
+}
+
+early_param("i915_fake_lmem_start", early_i915_fake_lmem_init);
+
 static void __init force_disable_hpet(int num, int slot, int func)
 {
 #ifdef CONFIG_HPET_TIMER
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 1a045858b3b2..a8c3d52eb44c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -23,6 +23,7 @@ void __iomem *i915_gem_object_lmem_io_map_page(struct 
drm_i915_gem_object *obj,
resource_size_t offset;
 
offset = i915_gem_object_get_dma_address(obj, n);
+   offset -= obj->mm.region->region.start;
 
return io_mapping_map_wc(>mm.region->iomap, offset, PAGE_SIZE);
 }
@@ -33,6 +34,7 @@ void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct 
drm_i915_gem_object
resource_size_t offset;
 
offset = i915_gem_object_get_dma_address(obj, n);
+   offset -= obj->mm.region->region.start;
 
return io_mapping_map_atomic_wc(>mm.region->iomap, offset);
 }
@@ -46,6 +48,7 @@ void __iomem *i915_gem_object_lmem_io_map(struct 
drm_i915_gem_object *obj,
GEM_BUG_ON(!(obj->flags & I915_BO_ALLOC_CONTIGUOUS));
 
offset = i915_gem_object_get_dma_address(obj, n);
+   offset -= obj->mm.region->region.start;
 
return io_mapping_map_wc(>mm.region->iomap, offset, size);
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3306c6bb515a..e53e70b4fd39 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1526,6 +1526,14 @@ int i915_driver_probe(struct pci_dev *pdev, const struct 
pci_device_id *ent)
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
 
+   /* Check if we support fake LMEM -- enable for live selftests */
+   if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live &&
+   intel_graphics_fake_lmem_res.start) {
+   mkwrite_device_info(dev_priv)->memory_regions =
+   REGION_SMEM | REGION_LMEM | REGION_STOLEN;
+   GEM_BUG_ON(!HAS_LMEM(dev_priv));
+   }
+
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 096f50f1dda5..e7832dc1e7d3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2778,6 +2778,9 @@ int i915_gem_init_memory_regions(struct drm_i915_private 
*i915)
case INTEL_STOLEN:
mem = i915_gem_stolen_setup(i915);
break;
+   case INTEL_LMEM:
+   mem

[Intel-gfx] [PATCH v2 15/22] drm/i915: do not map aperture if it is not available.

2019-10-03 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

Skip both setup and cleanup of the aperture mapping if the HW doesn't
have an aperture bar.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 34 ++---
 1 file changed, 21 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 209686c23d21..64bf47eaa2a1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2827,7 +2827,9 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
mutex_unlock(>drm.struct_mutex);
 
arch_phys_wc_del(ggtt->mtrr);
-   io_mapping_fini(>iomap);
+
+   if (ggtt->iomap.size)
+   io_mapping_fini(>iomap);
 }
 
 /**
@@ -3038,10 +3040,13 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err;
 
/* TODO: We're not aware of mappable constraints on gen8 yet */
-   ggtt->gmadr =
-   (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
-pci_resource_len(pdev, 2));
-   ggtt->mappable_end = resource_size(>gmadr);
+   /* FIXME: We probably need to add do device_info or runtime_info */
+   if (!HAS_LMEM(dev_priv)) {
+   ggtt->gmadr =
+   (struct resource) 
DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+pci_resource_len(pdev, 
2));
+   ggtt->mappable_end = resource_size(>gmadr);
+   }
 
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
@@ -3267,15 +3272,18 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
 
-   if (!io_mapping_init_wc(>iomap,
-   ggtt->gmadr.start,
-   ggtt->mappable_end)) {
-   ggtt->vm.cleanup(>vm);
-   ret = -EIO;
-   goto out;
-   }
+   if (ggtt->mappable_end) {
+   if (!io_mapping_init_wc(>iomap,
+   ggtt->gmadr.start,
+   ggtt->mappable_end)) {
+   ggtt->vm.cleanup(>vm);
+   ret = -EIO;
+   goto out;
+   }
 
-   ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+   ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+   }
 
i915_ggtt_init_fences(ggtt);
 
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 07/22] drm/i915: setup io-mapping for LMEM

2019-10-03 Thread Matthew Auld
From: Abdiel Janulgue 

Signed-off-by: Abdiel Janulgue 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/intel_region_lmem.c | 28 ++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c 
b/drivers/gpu/drm/i915/intel_region_lmem.c
index 7a3f96e1f766..051069664074 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -36,8 +36,32 @@ lmem_create_object(struct intel_memory_region *mem,
return obj;
 }
 
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+   io_mapping_fini(>iomap);
+   intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+   int ret;
+
+   if (!io_mapping_init_wc(>iomap,
+   mem->io_start,
+   resource_size(>region)))
+   return -EIO;
+
+   ret = intel_memory_region_init_buddy(mem);
+   if (ret)
+   io_mapping_fini(>iomap);
+
+   return ret;
+}
+
 const struct intel_memory_region_ops intel_region_lmem_ops = {
-   .init = intel_memory_region_init_buddy,
-   .release = intel_memory_region_release_buddy,
+   .init = region_lmem_init,
+   .release = region_lmem_release,
.create_object = lmem_create_object,
 };
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 16/22] drm/i915: set num_fence_regs to 0 if there is no aperture

2019-10-03 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

We can't fence anything without aperture.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Stuart Summers 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_fence_reg.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c 
b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 615a9f4ef30c..0f1507285128 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -828,8 +828,10 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
 
detect_bit_6_swizzle(i915);
 
-   if (INTEL_GEN(i915) >= 7 &&
-   !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+   if (!ggtt->mappable_end)
+   num_fences = 0;
+   else if (INTEL_GEN(i915) >= 7 &&
+!(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
num_fences = 32;
else if (INTEL_GEN(i915) >= 4 ||
 IS_I945G(i915) || IS_I945GM(i915) ||
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 18/22] drm/i915: Don't try to place HWS in non-existing mappable region

2019-10-03 Thread Matthew Auld
From: Michal Wajdeczko 

HWS placement restrictions can't just rely on HAS_LLC flag.

Signed-off-by: Michal Wajdeczko 
Cc: Daniele Ceraolo Spurio 
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c 
b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 80fd072ac719..c64f91bbdf3b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -513,7 +513,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs 
*engine,
unsigned int flags;
 
flags = PIN_GLOBAL;
-   if (!HAS_LLC(engine->i915))
+   if (!HAS_LLC(engine->i915) && HAS_MAPPABLE_APERTURE(engine->i915))
/*
 * On g33, we cannot place HWS above 256MiB, so
 * restrict its pinning to the low mappable arena.
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 12/22] drm/i915: treat shmem as a region

2019-10-03 Thread Matthew Auld
Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_phys.c  |  5 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 14 +++-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 68 ++-
 drivers/gpu/drm/i915/i915_drv.h   |  2 +
 drivers/gpu/drm/i915/i915_gem.c   |  9 ---
 drivers/gpu/drm/i915/i915_gem_gtt.c   |  3 +-
 drivers/gpu/drm/i915/i915_pci.c   | 29 +---
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  6 +-
 8 files changed, 95 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c 
b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 768356908160..8043ff63d73f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,6 +16,7 @@
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_object.h"
+#include "i915_gem_region.h"
 #include "i915_scatterlist.h"
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object 
*obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
 
-   if (!IS_ERR_OR_NULL(pages))
+   if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+   i915_gem_object_release_memory_region(obj);
+   }
mutex_unlock(>mm.lock);
return 0;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 663254b3da21..0c03c2c40dbc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -6,6 +6,7 @@
 #include "intel_memory_region.h"
 #include "i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
 
 void
 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
@@ -146,11 +147,22 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
 
+   /*
+* There is a prevalence of the assumption that we fit the object's
+* page count inside a 32bit _signed_ variable. Let's document this and
+* catch if we ever need to fix it. In the meantime, if you do spot
+* such a local variable, please consider fixing!
+*/
+
if (size >> PAGE_SHIFT > INT_MAX)
return ERR_PTR(-E2BIG);
 
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
 
-   return mem->ops->create_object(mem, size, flags);
+   obj = mem->ops->create_object(mem, size, flags);
+   if (!IS_ERR(obj))
+   trace_i915_gem_object_create(obj);
+
+   return obj;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4c4954e8ce0a..554504349eb0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,7 +7,9 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_gemfs.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 #include "i915_trace.h"
@@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
 static int shmem_get_pages(struct drm_i915_gem_object *obj)
 {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+   struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
 * If there's no chance of allocating enough pages for the whole
 * object, bail early.
 */
-   if (page_count > totalram_pages())
+   if (obj->base.size > resource_size(>region))
return -ENOMEM;
 
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,6 +420,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
 
 static void shmem_release(struct drm_i915_gem_object *obj)
 {
+   i915_gem_object_release_memory_region(obj);
+
fput(obj->base.filp);
 }
 
@@ -434,7 +439,7 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.release = shmem_release,
 };
 
-static int create_shmem(struct drm_i915_private *i915,
+static int __create_shmem(struct drm_i915_private *i915,
struct drm_gem_object *obj,
size_t size)
 {
@@ -455,31 +460,23 @@ static int create_shmem(struct drm_i915_private *i915,
return 0;
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i91

[Intel-gfx] [PATCH v2 09/22] drm/i915/selftests: add write-dword test for LMEM

2019-10-03 Thread Matthew Auld
Simple test writing to dwords across an object, using various engines in
a randomized order, checking that our writes land from the cpu.

Signed-off-by: Matthew Auld 
---
 .../drm/i915/selftests/intel_memory_region.c  | 171 ++
 1 file changed, 171 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c 
b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 1ad755e75e57..afb248f175df 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -7,13 +7,16 @@
 
 #include "../i915_selftest.h"
 
+
 #include "mock_drm.h"
 #include "mock_gem_device.h"
 #include "mock_region.h"
 
+#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
 #include "selftests/igt_flush_test.h"
@@ -255,6 +258,125 @@ static int igt_mock_contiguous(void *arg)
return err;
 }
 
+static int igt_gpu_write_dw(struct intel_context *ce,
+   struct i915_vma *vma,
+   u32 dword,
+   u32 value)
+{
+   return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+  vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+   unsigned long n;
+   int err;
+
+   i915_gem_object_lock(obj);
+   err = i915_gem_object_set_to_wc_domain(obj, false);
+   i915_gem_object_unlock(obj);
+   if (err)
+   return err;
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   return err;
+
+   for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+   u32 __iomem *base;
+   u32 read_val;
+
+   base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+   read_val = ioread32(base + dword);
+   io_mapping_unmap_atomic(base);
+   if (read_val != val) {
+   pr_err("n=%lu base[%u]=%u, val=%u\n",
+  n, dword, read_val, val);
+   err = -EINVAL;
+   break;
+   }
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+struct drm_i915_gem_object *obj)
+{
+   struct drm_i915_private *i915 = ctx->i915;
+   struct i915_address_space *vm = ctx->vm ?: >ggtt.vm;
+   struct i915_gem_engines *engines;
+   struct i915_gem_engines_iter it;
+   struct intel_context *ce;
+   I915_RND_STATE(prng);
+   IGT_TIMEOUT(end_time);
+   unsigned int count;
+   struct i915_vma *vma;
+   int *order;
+   int i, n;
+   int err = 0;
+
+   GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+   n = 0;
+   count = 0;
+   for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+   count++;
+   if (!intel_engine_can_store_dword(ce->engine))
+   continue;
+
+   n++;
+   }
+   i915_gem_context_unlock_engines(ctx);
+   if (!n)
+   return 0;
+
+   order = i915_random_order(count * count, );
+   if (!order)
+   return -ENOMEM;
+
+   vma = i915_vma_instance(obj, vm, NULL);
+   if (IS_ERR(vma)) {
+   err = PTR_ERR(vma);
+   goto out_free;
+   }
+
+   err = i915_vma_pin(vma, 0, 0, PIN_USER);
+   if (err)
+   goto out_free;
+
+   i = 0;
+   engines = i915_gem_context_lock_engines(ctx);
+   do {
+   u32 rng = prandom_u32_state();
+   u32 dword = offset_in_page(rng) / 4;
+
+   ce = engines->engines[order[i] % engines->num_engines];
+   i = (i + 1) % (count * count);
+   if (!ce || !intel_engine_can_store_dword(ce->engine))
+   continue;
+
+   err = igt_gpu_write_dw(ce, vma, dword, rng);
+   if (err)
+   break;
+
+   err = igt_cpu_check(obj, dword, rng);
+   if (err)
+   break;
+   } while (!__igt_timeout(end_time, NULL));
+   i915_gem_context_unlock_engines(ctx);
+
+out_free:
+   kfree(order);
+
+   if (err == -ENOMEM)
+   err = 0;
+
+   return err;
+}
+
 static int igt_lmem_create(void *arg)
 {
struct drm_i915_private *i915 = arg;
@@ -276,6 +398,54 @@ static int igt_lmem_create(void *arg)
return err;
 }
 
+static int igt_lmem_write_gpu(void *arg)
+{
+   struct drm_i915_private *i915 = arg;

[Intel-gfx] [PATCH v2 17/22] drm/i915: error capture with no ggtt slot

2019-10-03 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

If the aperture is not available in HW we can't use a ggtt slot and wc
copy, so fall back to regular kmap.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 19 
 drivers/gpu/drm/i915/i915_gpu_error.c | 65 ++-
 2 files changed, 64 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 64bf47eaa2a1..096f50f1dda5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2661,7 +2661,8 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
 {
ggtt_release_guc_top(ggtt);
-   drm_mm_remove_node(>error_capture);
+   if (drm_mm_node_allocated(>error_capture))
+   drm_mm_remove_node(>error_capture);
 }
 
 static int init_ggtt(struct i915_ggtt *ggtt)
@@ -2692,13 +2693,15 @@ static int init_ggtt(struct i915_ggtt *ggtt)
if (ret)
return ret;
 
-   /* Reserve a mappable slot for our lockless error capture */
-   ret = drm_mm_insert_node_in_range(>vm.mm, >error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
-   if (ret)
-   return ret;
+   if (ggtt->mappable_end) {
+   /* Reserve a mappable slot for our lockless error capture */
+   ret = drm_mm_insert_node_in_range(>vm.mm, 
>error_capture,
+ PAGE_SIZE, 0, 
I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+   if (ret)
+   return ret;
+   }
 
/*
 * The upper portion of the GuC address space has a sizeable hole
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6384a06aa5bf..c6c96f0c6b28 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -40,6 +40,7 @@
 #include "display/intel_overlay.h"
 
 #include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
 
 #include "i915_drv.h"
 #include "i915_gpu_error.h"
@@ -235,6 +236,7 @@ struct compress {
struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
+   bool wc;
 };
 
 static bool compress_init(struct compress *c)
@@ -292,7 +294,7 @@ static int compress_page(struct compress *c,
struct z_stream_s *zstream = >zstream;
 
zstream->next_in = src;
-   if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+   if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
 
@@ -367,6 +369,7 @@ static void err_compression_marker(struct 
drm_i915_error_state_buf *m)
 
 struct compress {
struct pagevec pool;
+   bool wc;
 };
 
 static bool compress_init(struct compress *c)
@@ -389,7 +392,7 @@ static int compress_page(struct compress *c,
if (!ptr)
return -ENOMEM;
 
-   if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+   if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
 
@@ -970,7 +973,6 @@ i915_error_object_create(struct drm_i915_private *i915,
struct drm_i915_error_object *dst;
unsigned long num_pages;
struct sgt_iter iter;
-   dma_addr_t dma;
int ret;
 
might_sleep();
@@ -996,17 +998,54 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->page_count = 0;
dst->unused = 0;
 
+   compress->wc = i915_gem_object_is_lmem(vma->obj) ||
+  drm_mm_node_allocated(>error_capture);
+
ret = -EINVAL;
-   for_each_sgt_daddr(dma, iter, vma->pages) {
+   if (drm_mm_node_allocated(>error_capture)) {
void __iomem *s;
+   dma_addr_t dma;
 
-   ggtt->vm.insert_page(>vm, dma, slot, I915_CACHE_NONE, 0);
+   for_each_sgt_daddr(dma, iter, vma->pages) {
+   ggtt->vm.insert_page(>vm, dma, slot,
+I915_CACHE_NONE, 0);
 
-   s = io_mapping_map_wc(>iomap, slot, PAGE_SIZE);
-   ret = compress_page(compress, (void  __force *)s, dst);
-   io_mapping_unmap(s);
-   if (ret)
-   break;
+   

[Intel-gfx] [PATCH v2 11/22] drm/i915: enumerate and init each supported region

2019-10-03 Thread Matthew Auld
From: Abdiel Janulgue 

Nothing to enumerate yet...

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/i915_drv.h   |  3 +
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 70 +--
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  6 ++
 3 files changed, 72 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a2673f41d93b..26966b34c731 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2387,6 +2387,9 @@ int __must_check i915_gem_evict_for_node(struct 
i915_address_space *vm,
 unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm);
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915);
+int i915_gem_init_memory_regions(struct drm_i915_private *i915);
+
 /* i915_gem_internal.c */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e62e9d1a1307..f1e42c2e18d7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2744,6 +2744,66 @@ int i915_init_ggtt(struct drm_i915_private *i915)
return 0;
 }
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915)
+{
+   int i;
+
+   i915_gem_cleanup_stolen(i915);
+
+   for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+   struct intel_memory_region *region = i915->mm.regions[i];
+
+   if (region)
+   intel_memory_region_put(region);
+   }
+}
+
+int i915_gem_init_memory_regions(struct drm_i915_private *i915)
+{
+   int err, i;
+
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   /* XXX: stolen will become a region at some point */
+   err = i915_gem_init_stolen(i915);
+   if (err)
+   return err;
+
+   for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+   struct intel_memory_region *mem = NULL;
+   u32 type;
+
+   if (!HAS_REGION(i915, BIT(i)))
+   continue;
+
+   type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
+   switch (type) {
+   default:
+   break;
+   }
+
+   if (IS_ERR(mem)) {
+   err = PTR_ERR(mem);
+   DRM_ERROR("Failed to setup region(%d) type=%d\n", err, 
type);
+   goto out_cleanup;
+   }
+
+   mem->id = intel_region_map[i];
+   mem->type = type;
+   mem->instance = 
MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
+
+   i915->mm.regions[i] = mem;
+   }
+
+   return 0;
+
+out_cleanup:
+   i915_gem_cleanup_memory_regions(i915);
+   return err;
+}
+
 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 {
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -2785,6 +2845,8 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
 {
struct pagevec *pvec;
 
+   i915_gem_cleanup_memory_regions(i915);
+
fini_aliasing_ppgtt(>ggtt);
 
ggtt_cleanup_hw(>ggtt);
@@ -2794,8 +2856,6 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
-
-   i915_gem_cleanup_stolen(i915);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3251,11 +3311,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
 
-   /*
-* Initialise stolen early so that we may reserve preallocated
-* objects for the BIOS to KMS transition.
-*/
-   ret = i915_gem_init_stolen(dev_priv);
+   ret = i915_gem_init_memory_regions(dev_priv);
if (ret)
goto out_gtt_cleanup;
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c 
b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9809b17083de..4ceaaa529da7 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -82,6 +82,8 @@ static void mock_device_release(struct drm_device *dev)
 
i915_gemfs_fini(i915);
 
+   i915_gem_cleanup_memory_regions(i915);
+
drm_mode_config_cleanup(>drm);
 
drm_dev_fini(>drm);
@@ -218,6 +220,10 @@ struct drm_i915_private *mock_gem_device(void)
 
WARN_ON(i915_gemfs_init(i915));
 
+   err = i915_gem_init_memory_regions(i915);
+   if (err)
+   goto err_context;
+
return i915;
 
 err_context:
-- 
2.20.1

__

[Intel-gfx] [PATCH v2 14/22] drm/i915: define HAS_MAPPABLE_APERTURE

2019-10-03 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

The following patches in the series will use it to avoid certain
operations when aperture is not available in HW.

Signed-off-by: Daniele Ceraolo Spurio 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_drv.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c5cb2feda27..7824a31ee448 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2119,6 +2119,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
(INTEL_INFO(dev_priv)->display.overlay_needs_physical)
 
+#define HAS_MAPPABLE_APERTURE(dev_priv) (dev_priv->ggtt.mappable_end > 0)
+
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev_priv)(IS_I830(dev_priv) || 
IS_I845G(dev_priv))
 
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 13/22] drm/i915: treat stolen as a region

2019-10-03 Thread Matthew Auld
Convert stolen memory over to a region object. Still leaves open the
question with what to do with pre-allocated objects...

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 66 +++---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h |  3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c| 14 +
 drivers/gpu/drm/i915/i915_pci.c|  2 +-
 4 files changed, 62 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 3dd295bb61f6..a91ef9fe98cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
 
@@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private 
*dev_priv,
return 0;
 }
 
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
 {
if (!drm_mm_initialized(_priv->mm.stolen))
return;
@@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private 
*i915,
}
 }
 
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 {
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -539,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object 
*obj)
 
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
+
+   if (obj->mm.region)
+   i915_gem_object_release_memory_region(obj);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -548,8 +552,9 @@ static const struct drm_i915_gem_object_ops 
i915_gem_object_stolen_ops = {
 };
 
 static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
-  struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+   struct drm_mm_node *stolen,
+   struct intel_memory_region *mem)
 {
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -566,6 +571,9 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
 
+   if (mem)
+   i915_gem_object_init_memory_region(obj, mem, 0);
+
if (i915_gem_object_pin_pages(obj))
goto cleanup;
 
@@ -576,10 +584,12 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return NULL;
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+  resource_size_t size,
+  unsigned int flags)
 {
+   struct drm_i915_private *dev_priv = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -598,7 +608,7 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
if (ret)
goto err_free;
 
-   obj = _i915_gem_object_create_stolen(dev_priv, stolen);
+   obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
if (obj == NULL) {
ret = -ENOMEM;
goto err_remove;
@@ -613,6 +623,44 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return ERR_PTR(ret);
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
+{
+
+   return 
i915_gem_object_create_region(dev_priv->mm.regions[INTEL_MEMORY_STOLEN],
+size, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   return i915_gem_init_stolen(mem->i915);
+}
+
+static void release_stolen(struct intel_memory_region *mem)
+{
+   i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_ops = {
+   .init = init_stolen,
+   .release = release_stolen,
+   .create_object = _i915_gem_object_create_stolen,
+};
+
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private 
*i915)
+{
+   return intel_memory_region_create(i915,
+

[Intel-gfx] [PATCH v2 03/22] drm/i915/region: support contiguous allocations

2019-10-03 Thread Matthew Auld
Some kernel internal objects may need to be allocated as a contiguous
block, also thinking ahead the various kernel io_mapping interfaces seem
to expect it, although this is purely a limitation in the kernel
API...so perhaps something to be improved.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
Cc: Michael J Ruhl 
---
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   4 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c|  15 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.h|   3 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  63 ---
 drivers/gpu/drm/i915/intel_memory_region.c|   9 +-
 drivers/gpu/drm/i915/intel_memory_region.h|   3 +-
 .../drm/i915/selftests/intel_memory_region.c  | 165 ++
 drivers/gpu/drm/i915/selftests/mock_region.c  |   2 +-
 8 files changed, 229 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index cff527e79661..2960aa0c79f4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -118,6 +118,10 @@ struct drm_i915_gem_object {
 
I915_SELFTEST_DECLARE(struct list_head st_link);
 
+   unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+
/*
 * Is the object to be mapped as read-only to the GPU
 * Only honoured if hardware has relevant pte bit
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 5fc6e4540f82..04cb9f72945e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -23,10 +23,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
 {
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = >mm.blocks;
-   unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
resource_size_t size = obj->base.size;
resource_size_t prev_end;
struct i915_buddy_block *block;
+   unsigned int flags;
struct sg_table *st;
struct scatterlist *sg;
unsigned int sg_page_sizes;
@@ -42,6 +42,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
return -ENOMEM;
}
 
+   flags = I915_ALLOC_MIN_PAGE_SIZE;
+   if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+   flags |= I915_ALLOC_CONTIGUOUS;
+
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
if (ret)
goto err_free_sg;
@@ -56,7 +60,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
list_for_each_entry(block, blocks, link) {
u64 block_size, offset;
 
-   block_size = i915_buddy_block_size(>mm, block);
+   block_size = min_t(u64, size,
+  i915_buddy_block_size(>mm, block));
offset = i915_buddy_block_offset(block);
 
GEM_BUG_ON(overflows_type(block_size, sg->length));
@@ -98,10 +103,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
 }
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-   struct intel_memory_region *mem)
+   struct intel_memory_region *mem,
+   unsigned long flags)
 {
INIT_LIST_HEAD(>mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
+   obj->flags = flags;
 }
 
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
@@ -116,6 +123,8 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
 {
struct drm_i915_gem_object *obj;
 
+   GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+
if (!mem)
return ERR_PTR(-ENODEV);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h 
b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index ebddc86d78f7..f2ff6f8bff74 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -17,7 +17,8 @@ void i915_gem_object_put_pages_buddy(struct 
drm_i915_gem_object *obj,
 struct sg_table *pages);
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-   struct intel_memory_region *mem);
+   struct intel_memory_region *mem,
+   unsigned long flags);
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 1313c7f93ef8..2549c233465c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/g

[Intel-gfx] [PATCH v2 02/22] drm/i915: introduce intel_memory_region

2019-10-03 Thread Matthew Auld
Support memory regions, as defined by a given (start, end), and allow
creating GEM objects which are backed by said region. The immediate goal
here is to have something to represent our device memory, but later on
we also want to represent every memory domain with a region, so stolen,
shmem, and of course device. At some point we are probably going to want
use a common struct here, such that we are better aligned with say TTM.

Signed-off-by: Matthew Auld 
Signed-off-by: Abdiel Janulgue 
Signed-off-by: Niranjana Vishwanathapura 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/Makefile |   2 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 134 
 drivers/gpu/drm/i915/gem/i915_gem_region.h|  28 +++
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  73 +++
 drivers/gpu/drm/i915/i915_drv.h   |   1 +
 drivers/gpu/drm/i915/intel_memory_region.c| 191 ++
 drivers/gpu/drm/i915/intel_memory_region.h|  83 
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 .../drm/i915/selftests/intel_memory_region.c  | 118 +++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   1 +
 drivers/gpu/drm/i915/selftests/mock_region.c  |  59 ++
 drivers/gpu/drm/i915/selftests/mock_region.h  |  16 ++
 13 files changed, 716 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.h
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d2b53b5add81..da61b0dfe105 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -50,6 +50,7 @@ i915-y += i915_drv.o \
  i915_utils.o \
  intel_csr.o \
  intel_device_info.o \
+ intel_memory_region.o \
  intel_pch.o \
  intel_pm.o \
  intel_runtime_pm.o \
@@ -119,6 +120,7 @@ gem-y += \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
+   gem/i915_gem_region.o \
gem/i915_gem_shmem.o \
gem/i915_gem_shrinker.o \
gem/i915_gem_stolen.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e1aab2fd1cd9..cff527e79661 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -159,6 +159,15 @@ struct drm_i915_gem_object {
atomic_t pages_pin_count;
atomic_t shrink_pin;
 
+   /**
+* Memory region for this object.
+*/
+   struct intel_memory_region *region;
+   /**
+* List of memory region blocks allocated for this object.
+*/
+   struct list_head blocks;
+
struct sg_table *pages;
void *mapping;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
new file mode 100644
index ..5fc6e4540f82
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_gem_region.h"
+#include "i915_drv.h"
+
+void
+i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+   struct sg_table *pages)
+{
+   __intel_memory_region_put_pages_buddy(obj->mm.region, >mm.blocks);
+
+   obj->mm.dirty = false;
+   sg_free_table(pages);
+   kfree(pages);
+}
+
+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+   struct intel_memory_region *mem = obj->mm.region;
+   struct list_head *blocks = >mm.blocks;
+   unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
+   resource_size_t size = obj->base.size;
+   resource_size_t prev_end;
+   struct i915_buddy_block *block;
+   struct sg_table *st;
+   struct scatterlist *sg;
+   unsigned int sg_page_sizes;
+   unsigned long i;
+   int ret;
+
+   st = kmalloc(sizeof(*st), GFP_KERNEL);
+   if (!st)
+   return -ENOMEM;
+
+   if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+   kfree(st);
+   return -ENOMEM;
+   }
+
+   ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+   if (ret)
+   goto err_free_sg;
+
+   GEM_BUG_ON(list_empty(blocks));
+
+   sg = st->sgl;
+  

[Intel-gfx] [PATCH v2 05/22] drm/i915: Add memory region information to device_info

2019-10-03 Thread Matthew Auld
From: Abdiel Janulgue 

Exposes available regions for the platform. Shared memory will
always be available.

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_drv.h  | 2 ++
 drivers/gpu/drm/i915/intel_device_info.h | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3a32f9e00905..f7b56a07a035 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2163,6 +2163,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
 /* Having GuC is not the same as using GuC */
diff --git a/drivers/gpu/drm/i915/intel_device_info.h 
b/drivers/gpu/drm/i915/intel_device_info.h
index 0cdc2465534b..e9940f932d26 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -160,6 +160,8 @@ struct intel_device_info {
 
unsigned int page_sizes; /* page sizes supported by the HW */
 
+   u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
 
u8 pipe_mask;
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 04/22] drm/i915/region: support volatile objects

2019-10-03 Thread Matthew Auld
Volatile objects are marked as DONTNEED while pinned, therefore once
unpinned the backing store can be discarded. This is limited to kernel
internal objects.

Signed-off-by: Matthew Auld 
Signed-off-by: CQ Tang 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_internal.c| 17 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h  |  6 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h|  9 -
 drivers/gpu/drm/i915/gem/i915_gem_pages.c   |  6 ++
 drivers/gpu/drm/i915/gem/i915_gem_region.c  | 13 +
 drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 12 
 drivers/gpu/drm/i915/intel_memory_region.c  |  4 
 drivers/gpu/drm/i915/intel_memory_region.h  |  5 +
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c   |  5 ++---
 9 files changed, 57 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c 
b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 0c41e04ab8fa..5e72cb1cc2d3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -117,13 +117,6 @@ static int i915_gem_object_get_pages_internal(struct 
drm_i915_gem_object *obj)
goto err;
}
 
-   /* Mark the pages as dontneed whilst they are still pinned. As soon
-* as they are unpinned they are allowed to be reaped by the shrinker,
-* and the caller is expected to repopulate - the contents of this
-* object are only valid whilst active and pinned.
-*/
-   obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
return 0;
@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct 
drm_i915_gem_object *obj,
internal_free_pages(pages);
 
obj->mm.dirty = false;
-   obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -188,6 +180,15 @@ i915_gem_object_create_internal(struct drm_i915_private 
*i915,
drm_gem_private_object_init(>drm, >base, size);
i915_gem_object_init(obj, _gem_object_internal_ops);
 
+   /*
+* Mark the object as volatile, such that the pages are marked as
+* dontneed whilst they are still pinned. As soon as they are unpinned
+* they are allowed to be reaped by the shrinker, and the caller is
+* expected to repopulate - the contents of this object are only valid
+* whilst active and pinned.
+*/
+   obj->flags = I915_BO_ALLOC_VOLATILE;
+
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 53c7069ba3e8..0c24436049e4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -122,6 +122,12 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object 
*obj);
 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
  struct dma_fence *fence);
 
+static inline bool
+i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
+{
+   return obj->flags & I915_BO_ALLOC_VOLATILE;
+}
+
 static inline void
 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2960aa0c79f4..9a8579b67357 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -120,7 +120,8 @@ struct drm_i915_gem_object {
 
unsigned long flags;
 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
-#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+#define I915_BO_ALLOC_VOLATILE   BIT(1)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
 
/*
 * Is the object to be mapped as read-only to the GPU
@@ -171,6 +172,12 @@ struct drm_i915_gem_object {
 * List of memory region blocks allocated for this object.
 */
struct list_head blocks;
+   /**
+* Element within memory_region->objects or region->purgeable
+* if the object is marked as DONTNEED. Access is protected by
+* region->obj_lock.
+*/
+   struct list_head region_link;
 
struct sg_table *pages;
void *mapping;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 2e941f093a20..b0ec0959c13f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -18,6 +18,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object 
*obj,
 
lockdep_assert_held(>mm.lock);
 

[Intel-gfx] [PATCH v2 00/22] LMEM basics

2019-10-03 Thread Matthew Auld
The basic LMEM bits, minus the uAPI, pruning, etc. The goal is to support
basic LMEM object creation within the kernel. From there we can start with the
dumb buffer support, and then the other display related bits.

Abdiel Janulgue (4):
  drm/i915: Add memory region information to device_info
  drm/i915: setup io-mapping for LMEM
  drm/i915/lmem: support kernel mapping
  drm/i915: enumerate and init each supported region

CQ Tang (1):
  drm/i915/stolen: make the object creation interface consistent

Daniele Ceraolo Spurio (4):
  drm/i915: define HAS_MAPPABLE_APERTURE
  drm/i915: do not map aperture if it is not available.
  drm/i915: set num_fence_regs to 0 if there is no aperture
  drm/i915: error capture with no ggtt slot

Matthew Auld (12):
  drm/i915: introduce intel_memory_region
  drm/i915/region: support contiguous allocations
  drm/i915/region: support volatile objects
  drm/i915: support creating LMEM objects
  drm/i915/selftests: add write-dword test for LMEM
  drm/i915/selftests: extend coverage to include LMEM huge-pages
  drm/i915: treat shmem as a region
  drm/i915: treat stolen as a region
  drm/i915: don't allocate the ring in stolen if we lack aperture
  drm/i915/selftests: fallback to using the gpu to trash stolen
  drm/i915/selftests: check for missing aperture
  HAX drm/i915: add the fake lmem region

Michal Wajdeczko (1):
  drm/i915: Don't try to place HWS in non-existing mappable region

 arch/x86/kernel/early-quirks.c|  26 +
 drivers/gpu/drm/i915/Makefile |   4 +
 drivers/gpu/drm/i915/display/intel_display.c  |   2 +-
 drivers/gpu/drm/i915/display/intel_fbdev.c|   4 +-
 drivers/gpu/drm/i915/display/intel_overlay.c  |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_internal.c  |  17 +-
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  |  70 ++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  |  31 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h|   6 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  29 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c |  28 +-
 drivers/gpu/drm/i915/gem/i915_gem_phys.c  |   5 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 168 +
 drivers/gpu/drm/i915/gem/i915_gem_region.h|  29 +
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c |  68 +-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c| 100 ++-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h|   3 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 213 +-
 .../i915/gem/selftests/i915_gem_coherency.c   |   5 +-
 .../drm/i915/gem/selftests/i915_gem_mman.c|   6 +
 drivers/gpu/drm/i915/gt/intel_engine_cs.c |   2 +-
 drivers/gpu/drm/i915/gt/intel_gt.c|   2 +-
 drivers/gpu/drm/i915/gt/intel_rc6.c   |   4 +-
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c|   6 +-
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  14 +-
 drivers/gpu/drm/i915/i915_drv.c   |   8 +
 drivers/gpu/drm/i915/i915_drv.h   |  15 +
 drivers/gpu/drm/i915/i915_gem.c   |   9 -
 drivers/gpu/drm/i915/i915_gem_fence_reg.c |   6 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 119 +++-
 drivers/gpu/drm/i915/i915_gpu_error.c |  65 +-
 drivers/gpu/drm/i915/i915_pci.c   |  29 +-
 drivers/gpu/drm/i915/intel_device_info.h  |   2 +
 drivers/gpu/drm/i915/intel_memory_region.c| 212 ++
 drivers/gpu/drm/i915/intel_memory_region.h| 125 
 drivers/gpu/drm/i915/intel_region_lmem.c  | 157 +
 drivers/gpu/drm/i915/intel_region_lmem.h  |  16 +
 drivers/gpu/drm/i915/selftests/i915_gem.c |  95 ++-
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |   8 +-
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 .../drm/i915/selftests/intel_memory_region.c  | 617 ++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   9 +-
 drivers/gpu/drm/i915/selftests/mock_region.c  |  59 ++
 drivers/gpu/drm/i915/selftests/mock_region.h  |  16 +
 include/drm/i915_drm.h|   3 +
 46 files changed, 2253 insertions(+), 163 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.h
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.c
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.h
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.h

-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH v2 06/22] drm/i915: support creating LMEM objects

2019-10-03 Thread Matthew Auld
We currently define LMEM, or local memory, as just another memory
region, like system memory or stolen, which we can expose to userspace
and can be mapped to the CPU via some BAR.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/Makefile |  2 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  | 31 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  | 23 ++
 drivers/gpu/drm/i915/i915_drv.h   |  5 +++
 drivers/gpu/drm/i915/intel_memory_region.c| 10 +
 drivers/gpu/drm/i915/intel_memory_region.h| 30 +
 drivers/gpu/drm/i915/intel_region_lmem.c  | 43 ++
 drivers/gpu/drm/i915/intel_region_lmem.h  | 11 +
 .../drm/i915/selftests/i915_live_selftests.h  |  1 +
 .../drm/i915/selftests/intel_memory_region.c  | 45 +++
 10 files changed, 201 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.h
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.c
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index da61b0dfe105..07b1d3a6955c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -116,6 +116,7 @@ gem-y += \
gem/i915_gem_internal.o \
gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \
+   gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
@@ -144,6 +145,7 @@ i915-y += \
  i915_scheduler.o \
  i915_trace_points.o \
  i915_vma.o \
+ intel_region_lmem.o \
  intel_wopcm.o
 
 # general-purpose microcontroller (GuC) support
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index ..26a23304df32
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+   .get_pages = i915_gem_object_get_pages_buddy,
+   .put_pages = i915_gem_object_put_pages_buddy,
+   .release = i915_gem_object_release_memory_region,
+};
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+   struct intel_memory_region *region = obj->mm.region;
+
+   return region && region->type == INTEL_LMEM;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+   resource_size_t size,
+   unsigned int flags)
+{
+   return 
i915_gem_object_create_region(i915->mm.regions[INTEL_MEMORY_LMEM],
+size, flags);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index ..ebc15fe24f58
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include 
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+   resource_size_t size,
+   unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f7b56a07a035..a2673f41d93b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -100,6 +100,8 @@
 #include "i915_vma.h"
 #include "i915_irq.h"
 
+#include "intel_region_lmem.h"
+
 #include "intel_gvt.h"
 
 /* General customization:
@@ -679,6 +681,8 @@ struct i915_gem_mm {
 */
struct vfsmount *gemfs;
 
+   struct intel_memory_region *regions[INTEL_MEMORY_UKNOWN];
+
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -2164,6 +2168,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
 
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c 
b/drivers/gpu/drm/i915/intel_

[Intel-gfx] [PATCH v2 01/22] drm/i915/stolen: make the object creation interface consistent

2019-10-03 Thread Matthew Auld
From: CQ Tang 

Our other backends return an actual error value upon failure. Do the
same for stolen objects, which currently just return NULL on failure.

Signed-off-by: CQ Tang 
Signed-off-by: Matthew Auld 
Cc: Chris Wilson 
---
 drivers/gpu/drm/i915/display/intel_display.c |  2 +-
 drivers/gpu/drm/i915/display/intel_fbdev.c   |  4 +--
 drivers/gpu/drm/i915/display/intel_overlay.c |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c   | 36 +++-
 drivers/gpu/drm/i915/gt/intel_gt.c   |  2 +-
 drivers/gpu/drm/i915/gt/intel_rc6.c  |  4 +--
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c   |  2 +-
 7 files changed, 28 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c 
b/drivers/gpu/drm/i915/display/intel_display.c
index c3ac5a5c5185..3a7e324fab26 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -3071,7 +3071,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 base_aligned,
 size_aligned);
mutex_unlock(>struct_mutex);
-   if (!obj)
+   if (IS_ERR(obj))
return false;
 
switch (plane_config->tiling) {
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c 
b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 68338669f054..8e31f254b0d9 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
 * important and we should probably use that space with FBC or other
 * features. */
-   obj = NULL;
+   obj = ERR_PTR(-ENODEV);
if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
-   if (obj == NULL)
+   if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c 
b/drivers/gpu/drm/i915/display/intel_overlay.c
index 5efef9babadb..fb4e2047efdb 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1306,7 +1306,7 @@ static int get_registers(struct intel_overlay *overlay, 
bool use_phys)
mutex_lock(>drm.struct_mutex);
 
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
-   if (obj == NULL)
+   if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index bfbc3e3daf92..3dd295bb61f6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -585,28 +585,32 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
int ret;
 
if (!drm_mm_initialized(_priv->mm.stolen))
-   return NULL;
+   return ERR_PTR(-ENODEV);
 
if (size == 0)
-   return NULL;
+   return ERR_PTR(-EINVAL);
 
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
-   return NULL;
+   return ERR_PTR(-ENOMEM);
 
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
-   if (ret) {
-   kfree(stolen);
-   return NULL;
-   }
+   if (ret)
+   goto err_free;
 
obj = _i915_gem_object_create_stolen(dev_priv, stolen);
-   if (obj)
-   return obj;
+   if (obj == NULL) {
+   ret = -ENOMEM;
+   goto err_remove;
+   }
 
+   return obj;
+
+err_remove:
i915_gem_stolen_remove_node(dev_priv, stolen);
+err_free:
kfree(stolen);
-   return NULL;
+   return ERR_PTR(ret);
 }
 
 struct drm_i915_gem_object *
@@ -622,7 +626,7 @@ i915_gem_object_create_stolen_for_preallocated(struct 
drm_i915_private *dev_priv
int ret;
 
if (!drm_mm_initialized(_priv->mm.stolen))
-   return NULL;
+   return ERR_PTR(-ENODEV);
 
lockdep_assert_held(_priv->drm.struct_mutex);
 
@@ -633,11 +637,11 @@ i915_gem_object_create_stolen_for_preallocated(struct 
drm_i915_private *dev_priv
if (WARN_ON(size == 0) ||
WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
-   return NULL;
+   return ERR_PTR(-EINVAL);
 
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
-   return NULL;
+   return ERR_PTR(-ENOMEM);
 
   

Re: [Intel-gfx] [PATCH] drm/i915/gem: Refactor tests on obj->ops->flags

2019-10-02 Thread Matthew Auld

On 02/10/2019 13:30, Chris Wilson wrote:

We repeat obj->ops->flags in our object checks, so pull that into its
own little helper for clarity.

Signed-off-by: Chris Wilson 
Cc: Matthew Auld 

Reviewed-by: Matthew Auld 

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH] drm/i915/selftests: Extract random_offset() for use with a prng

2019-10-02 Thread Matthew Auld

On 02/10/2019 13:24, Chris Wilson wrote:

For selftests, we desire repeatability and so prefer using a prng with
known seed over true randomness. Extract random_offset() as a selftest
utility that can take the prng state.

Suggested-by: Matthew Auld 
Signed-off-by: Chris Wilson 
Cc: Matthew Auld 

Reviewed-by: Matthew Auld 

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH 10/22] drm/i915/selftests: add write-dword test for LMEM

2019-09-30 Thread Matthew Auld

On 27/09/2019 21:42, Chris Wilson wrote:

Quoting Matthew Auld (2019-09-27 18:33:57)

+   i = 0;
+   engines = i915_gem_context_lock_engines(ctx);
+   do {
+   u32 rng = prandom_u32_state();
+   u32 dword = offset_in_page(rng) / 4;
+
+   ce = engines->engines[order[i] % engines->num_engines];
+   i = (i + 1) % (count * count);
+   if (!ce || !intel_engine_can_store_dword(ce->engine))
+   continue;
+
+   err = igt_gpu_write_dw(ce, vma, dword, rng);
+   if (err)
+   break;


Do you have a test that does
dword,
64B or cacheline,
page
random width of the above
before doing the read back of a random dword from those?


Are you thinking write_dw + increment(dword, qword, cl, ..), or actually 
doing the fill: write_dw, write_qw, write_block?


Or maybe both? I have been playing around with the write_dw + increment 
for hugepages.c.




Think nasty cache artifacts, PCI transfers, and timing.
-Chris


___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 07/22] drm/i915: support creating LMEM objects

2019-09-27 Thread Matthew Auld
We currently define LMEM, or local memory, as just another memory
region, like system memory or stolen, which we can expose to userspace
and can be mapped to the CPU via some BAR.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/Makefile |  2 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  | 31 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  | 23 ++
 drivers/gpu/drm/i915/i915_drv.h   |  5 +++
 drivers/gpu/drm/i915/intel_memory_region.c|  6 +++
 drivers/gpu/drm/i915/intel_memory_region.h| 30 +
 drivers/gpu/drm/i915/intel_region_lmem.c  | 43 ++
 drivers/gpu/drm/i915/intel_region_lmem.h  | 11 +
 .../drm/i915/selftests/i915_live_selftests.h  |  1 +
 .../drm/i915/selftests/intel_memory_region.c  | 45 +++
 10 files changed, 197 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.h
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.c
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d849dff31f76..ccf4223ed3f9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -115,6 +115,7 @@ gem-y += \
gem/i915_gem_internal.o \
gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \
+   gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
@@ -143,6 +144,7 @@ i915-y += \
  i915_scheduler.o \
  i915_trace_points.o \
  i915_vma.o \
+ intel_region_lmem.o \
  intel_wopcm.o
 
 # general-purpose microcontroller (GuC) support
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index ..26a23304df32
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+   .get_pages = i915_gem_object_get_pages_buddy,
+   .put_pages = i915_gem_object_put_pages_buddy,
+   .release = i915_gem_object_release_memory_region,
+};
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+   struct intel_memory_region *region = obj->mm.region;
+
+   return region && region->type == INTEL_LMEM;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+   resource_size_t size,
+   unsigned int flags)
+{
+   return 
i915_gem_object_create_region(i915->mm.regions[INTEL_MEMORY_LMEM],
+size, flags);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index ..ebc15fe24f58
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include 
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+   resource_size_t size,
+   unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 93116cc8b149..05a6491690f7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -100,6 +100,8 @@
 #include "i915_vma.h"
 #include "i915_irq.h"
 
+#include "intel_region_lmem.h"
+
 #include "intel_gvt.h"
 
 /* General customization:
@@ -686,6 +688,8 @@ struct i915_gem_mm {
 */
struct vfsmount *gemfs;
 
+   struct intel_memory_region *regions[INTEL_MEMORY_UKNOWN];
+
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -2171,6 +2175,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
 
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c 
b/drivers/gpu/drm/i915/intel_

[Intel-gfx] [PATCH 21/22] drm/i915: check for missing aperture in GTT pread/pwrite paths

2019-09-27 Thread Matthew Auld
From: CQ Tang 

drm_mm_insert_node_in_range() treats range_start > range_end as a
programmer error, such that we explode in insert_mappable_node. For now
simply check for missing aperture on such paths.

Signed-off-by: CQ Tang 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fd329b6b475c..82daaab022d8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -337,6 +337,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
u64 remain, offset;
int ret;
 
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   return -ENOSPC;
+
ret = mutex_lock_interruptible(>drm.struct_mutex);
if (ret)
return ret;
@@ -530,6 +533,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
void __user *user_data;
int ret;
 
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   return -ENOSPC;
+
ret = mutex_lock_interruptible(>drm.struct_mutex);
if (ret)
return ret;
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 12/22] drm/i915: enumerate and init each supported region

2019-09-27 Thread Matthew Auld
From: Abdiel Janulgue 

Nothing to enumerate yet...

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/i915_drv.h   |  3 +
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 70 +--
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  6 ++
 3 files changed, 72 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 05a6491690f7..cd1414f2bcb5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2394,6 +2394,9 @@ int __must_check i915_gem_evict_for_node(struct 
i915_address_space *vm,
 unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm);
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915);
+int i915_gem_init_memory_regions(struct drm_i915_private *i915);
+
 /* i915_gem_internal.c */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e62e9d1a1307..a2963677861d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2744,6 +2744,66 @@ int i915_init_ggtt(struct drm_i915_private *i915)
return 0;
 }
 
+void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915)
+{
+   int i;
+
+   i915_gem_cleanup_stolen(i915);
+
+   for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+   struct intel_memory_region *region = i915->mm.regions[i];
+
+   if (region)
+   intel_memory_region_destroy(region);
+   }
+}
+
+int i915_gem_init_memory_regions(struct drm_i915_private *i915)
+{
+   int err, i;
+
+   /*
+* Initialise stolen early so that we may reserve preallocated
+* objects for the BIOS to KMS transition.
+*/
+   /* XXX: stolen will become a region at some point */
+   err = i915_gem_init_stolen(i915);
+   if (err)
+   return err;
+
+   for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+   struct intel_memory_region *mem = NULL;
+   u32 type;
+
+   if (!HAS_REGION(i915, BIT(i)))
+   continue;
+
+   type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
+   switch (type) {
+   default:
+   break;
+   }
+
+   if (IS_ERR(mem)) {
+   err = PTR_ERR(mem);
+   DRM_ERROR("Failed to setup region(%d) type=%d\n", err, 
type);
+   goto out_cleanup;
+   }
+
+   mem->id = intel_region_map[i];
+   mem->type = type;
+   mem->instance = 
MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
+
+   i915->mm.regions[i] = mem;
+   }
+
+   return 0;
+
+out_cleanup:
+   i915_gem_cleanup_memory_regions(i915);
+   return err;
+}
+
 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 {
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -2785,6 +2845,8 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
 {
struct pagevec *pvec;
 
+   i915_gem_cleanup_memory_regions(i915);
+
fini_aliasing_ppgtt(>ggtt);
 
ggtt_cleanup_hw(>ggtt);
@@ -2794,8 +2856,6 @@ void i915_ggtt_driver_release(struct drm_i915_private 
*i915)
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
-
-   i915_gem_cleanup_stolen(i915);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3251,11 +3311,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
 
-   /*
-* Initialise stolen early so that we may reserve preallocated
-* objects for the BIOS to KMS transition.
-*/
-   ret = i915_gem_init_stolen(dev_priv);
+   ret = i915_gem_init_memory_regions(dev_priv);
if (ret)
goto out_gtt_cleanup;
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c 
b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 32e32b1cd566..f210b5043112 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -82,6 +82,8 @@ static void mock_device_release(struct drm_device *dev)
 
i915_gemfs_fini(i915);
 
+   i915_gem_cleanup_memory_regions(i915);
+
drm_mode_config_cleanup(>drm);
 
drm_dev_fini(>drm);
@@ -219,6 +221,10 @@ struct drm_i915_private *mock_gem_device(void)
 
WARN_ON(i915_gemfs_init(i915));
 
+   err = i915_gem_init_memory_regions(i915);
+   if (err)
+   goto err_context;
+
return i915;
 
 err_context:
-- 
2.20.1

__

[Intel-gfx] [PATCH 22/22] HAX drm/i915: add the fake lmem region

2019-09-27 Thread Matthew Auld
Intended for upstream testing so that we can still exercise the LMEM
plumbing and !HAS_MAPPABLE_APERTURE paths. Smoke tested on Skull Canyon
device. This works by allocating an intel_memory_region for a reserved
portion of system memory, which we treat like LMEM. For the LMEMBAR we
steal the aperture and 1:1 it map to the stolen region.

To enable simply set i915_fake_lmem_start= on the kernel cmdline with the
start of reserved region(see memmap=). The size of the region we can
use is determined by the size of the mappable aperture, so the size of
reserved region should be >= mappable_end.

eg. memmap=2G$16G i915_fake_lmem_start=0x4

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 arch/x86/kernel/early-quirks.c | 26 +++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c   |  3 +
 drivers/gpu/drm/i915/i915_drv.c|  8 ++
 drivers/gpu/drm/i915/i915_gem_gtt.c|  3 +
 drivers/gpu/drm/i915/intel_memory_region.h |  6 ++
 drivers/gpu/drm/i915/intel_region_lmem.c   | 90 ++
 drivers/gpu/drm/i915/intel_region_lmem.h   |  5 ++
 include/drm/i915_drm.h |  3 +
 8 files changed, 144 insertions(+)

diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6f6b1d04dadf..9b04655e3926 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -603,6 +603,32 @@ static void __init intel_graphics_quirks(int num, int 
slot, int func)
}
 }
 
+struct resource intel_graphics_fake_lmem_res __ro_after_init = 
DEFINE_RES_MEM(0, 0);
+EXPORT_SYMBOL(intel_graphics_fake_lmem_res);
+
+static int __init early_i915_fake_lmem_init(char *s)
+{
+   u64 start;
+   int ret;
+
+   if (*s == '=')
+   s++;
+
+   ret = kstrtoull(s, 16, );
+   if (ret)
+   return ret;
+
+   intel_graphics_fake_lmem_res.start = start;
+   intel_graphics_fake_lmem_res.end = SZ_2G; /* Placeholder; depends on 
aperture size */
+
+   printk(KERN_INFO "Intel graphics fake LMEM starts at %pa\n",
+  _graphics_fake_lmem_res.start);
+
+   return 0;
+}
+
+early_param("i915_fake_lmem_start", early_i915_fake_lmem_init);
+
 static void __init force_disable_hpet(int num, int slot, int func)
 {
 #ifdef CONFIG_HPET_TIMER
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index d7ec74ed5b88..c5e75c2f2511 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -23,6 +23,7 @@ void __iomem *i915_gem_object_lmem_io_map_page(struct 
drm_i915_gem_object *obj,
resource_size_t offset;
 
offset = i915_gem_object_get_dma_address(obj, n);
+   offset -= obj->mm.region->region.start;
 
return io_mapping_map_wc(>mm.region->iomap, offset, PAGE_SIZE);
 }
@@ -33,6 +34,7 @@ void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct 
drm_i915_gem_object
resource_size_t offset;
 
offset = i915_gem_object_get_dma_address(obj, n);
+   offset -= obj->mm.region->region.start;
 
return io_mapping_map_atomic_wc(>mm.region->iomap, offset);
 }
@@ -46,6 +48,7 @@ void __iomem *i915_gem_object_lmem_io_map(struct 
drm_i915_gem_object *obj,
GEM_BUG_ON(!(obj->flags & I915_BO_ALLOC_CONTIGUOUS));
 
offset = i915_gem_object_get_dma_address(obj, n);
+   offset -= obj->mm.region->region.start;
 
return io_mapping_map_wc(>mm.region->iomap, offset, size);
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 91aae56b4280..98fa1932c4aa 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1546,6 +1546,14 @@ int i915_driver_probe(struct pci_dev *pdev, const struct 
pci_device_id *ent)
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
 
+   /* Check if we support fake LMEM -- enable for live selftests */
+   if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live &&
+   intel_graphics_fake_lmem_res.start) {
+   mkwrite_device_info(dev_priv)->memory_regions =
+   REGION_SMEM | REGION_LMEM;
+   GEM_BUG_ON(!HAS_LMEM(dev_priv));
+   }
+
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 29f9c43b2c68..02d2a6266b8c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2778,6 +2778,9 @@ int i915_gem_init_memory_regions(struct drm_i915_private 
*i915)
case INTEL_STOLEN:
mem = i915_gem_stolen_setup(i915);
break;
+   case INTEL_LMEM:
+   mem = intel_setup_fake

[Intel-gfx] [PATCH 16/22] drm/i915: do not map aperture if it is not available.

2019-09-27 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

Skip both setup and cleanup of the aperture mapping if the HW doesn't
have an aperture bar.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 34 ++---
 1 file changed, 21 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 51b2087b214f..1be7b236f234 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2827,7 +2827,9 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
mutex_unlock(>drm.struct_mutex);
 
arch_phys_wc_del(ggtt->mtrr);
-   io_mapping_fini(>iomap);
+
+   if (ggtt->iomap.size)
+   io_mapping_fini(>iomap);
 }
 
 /**
@@ -3038,10 +3040,13 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err;
 
/* TODO: We're not aware of mappable constraints on gen8 yet */
-   ggtt->gmadr =
-   (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
-pci_resource_len(pdev, 2));
-   ggtt->mappable_end = resource_size(>gmadr);
+   /* FIXME: We probably need to add do device_info or runtime_info */
+   if (!HAS_LMEM(dev_priv)) {
+   ggtt->gmadr =
+   (struct resource) 
DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+pci_resource_len(pdev, 
2));
+   ggtt->mappable_end = resource_size(>gmadr);
+   }
 
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
@@ -3267,15 +3272,18 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
 
-   if (!io_mapping_init_wc(>iomap,
-   ggtt->gmadr.start,
-   ggtt->mappable_end)) {
-   ggtt->vm.cleanup(>vm);
-   ret = -EIO;
-   goto out;
-   }
+   if (ggtt->mappable_end) {
+   if (!io_mapping_init_wc(>iomap,
+   ggtt->gmadr.start,
+   ggtt->mappable_end)) {
+   ggtt->vm.cleanup(>vm);
+   ret = -EIO;
+   goto out;
+   }
 
-   ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+   ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+   }
 
i915_ggtt_init_fences(ggtt);
 
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 13/22] drm/i915: treat shmem as a region

2019-09-27 Thread Matthew Auld
Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_phys.c  |  5 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 14 +++-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 68 ++-
 drivers/gpu/drm/i915/i915_drv.h   |  2 +
 drivers/gpu/drm/i915/i915_gem.c   |  9 ---
 drivers/gpu/drm/i915/i915_gem_gtt.c   |  3 +-
 drivers/gpu/drm/i915/i915_pci.c   | 29 +---
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  6 +-
 8 files changed, 95 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c 
b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 768356908160..8043ff63d73f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,6 +16,7 @@
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_object.h"
+#include "i915_gem_region.h"
 #include "i915_scatterlist.h"
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object 
*obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
 
-   if (!IS_ERR_OR_NULL(pages))
+   if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+   i915_gem_object_release_memory_region(obj);
+   }
mutex_unlock(>mm.lock);
return 0;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index e9550e0364cc..0aeaebb41050 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -6,6 +6,7 @@
 #include "intel_memory_region.h"
 #include "i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
 
 void
 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
@@ -144,11 +145,22 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
 
+   /*
+* There is a prevalence of the assumption that we fit the object's
+* page count inside a 32bit _signed_ variable. Let's document this and
+* catch if we ever need to fix it. In the meantime, if you do spot
+* such a local variable, please consider fixing!
+*/
+
if (size >> PAGE_SHIFT > INT_MAX)
return ERR_PTR(-E2BIG);
 
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
 
-   return mem->ops->create_object(mem, size, flags);
+   obj = mem->ops->create_object(mem, size, flags);
+   if (!IS_ERR(obj))
+   trace_i915_gem_object_create(obj);
+
+   return obj;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 9f5d903f7793..696e15e8c410 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,7 +7,9 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
+#include "i915_gemfs.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 #include "i915_trace.h"
@@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
 static int shmem_get_pages(struct drm_i915_gem_object *obj)
 {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+   struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
 * If there's no chance of allocating enough pages for the whole
 * object, bail early.
 */
-   if (page_count > totalram_pages())
+   if (obj->base.size > resource_size(>region))
return -ENOMEM;
 
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,6 +420,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
 
 static void shmem_release(struct drm_i915_gem_object *obj)
 {
+   i915_gem_object_release_memory_region(obj);
+
fput(obj->base.filp);
 }
 
@@ -435,7 +440,7 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.release = shmem_release,
 };
 
-static int create_shmem(struct drm_i915_private *i915,
+static int __create_shmem(struct drm_i915_private *i915,
struct drm_gem_object *obj,
size_t size)
 {
@@ -456,31 +461,23 @@ static int create_shmem(struct drm_i915_private *i915,
return 0;
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i91

[Intel-gfx] [PATCH 15/22] drm/i915: define HAS_MAPPABLE_APERTURE

2019-09-27 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

The following patches in the series will use it to avoid certain
operations when aperture is not available in HW.

Signed-off-by: Daniele Ceraolo Spurio 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_drv.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6cf13e98794a..d6303045f546 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2126,6 +2126,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
(INTEL_INFO(dev_priv)->display.overlay_needs_physical)
 
+#define HAS_MAPPABLE_APERTURE(dev_priv) (dev_priv->ggtt.mappable_end > 0)
+
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev_priv)(IS_I830(dev_priv) || 
IS_I845G(dev_priv))
 
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 18/22] drm/i915/selftests: check for missing aperture

2019-09-27 Thread Matthew Auld
We may be missing support for the mappable aperture on some platforms.

Signed-off-by: Matthew Auld 
Cc: Daniele Ceraolo Spurio 
---
 .../drm/i915/gem/selftests/i915_gem_coherency.c|  5 -
 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c |  6 ++
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c   | 14 ++
 drivers/gpu/drm/i915/selftests/i915_gem.c  |  3 +++
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c  |  3 +++
 5 files changed, 26 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 0ff7a89aadca..07faeada86eb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -246,7 +246,10 @@ static bool always_valid(struct drm_i915_private *i915)
 
 static bool needs_fence_registers(struct drm_i915_private *i915)
 {
-   return !intel_gt_is_wedged(>gt);
+   if (intel_gt_is_wedged(>gt))
+   return false;
+
+   return i915->ggtt.num_fences;
 }
 
 static bool needs_mi_store_dword(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index aefe557527f8..cb880d73ef73 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -301,6 +301,9 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
 
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   return 0;
+
/* We want to check the page mapping and fencing of a large object
 * mmapped through the GTT. The object we create is larger than can
 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
@@ -433,6 +436,9 @@ static int igt_smoke_tiling(void *arg)
IGT_TIMEOUT(end);
int err;
 
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   return 0;
+
/*
 * igt_partial_tiling() does an exhastive check of partial tiling
 * chunking, but will undoubtably run out of time. Here, we do a
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c 
b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index a0098fc35921..35cc2c68b32f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1189,8 +1189,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
struct i915_request *rq;
struct evict_vma arg;
struct hang h;
+   unsigned int pin_flags;
int err;
 
+   if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
+   return 0;
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
 
@@ -1227,10 +1231,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
goto out_obj;
}
 
-   err = i915_vma_pin(arg.vma, 0, 0,
-  i915_vma_is_ggtt(arg.vma) ?
-  PIN_GLOBAL | PIN_MAPPABLE :
-  PIN_USER);
+   pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
+
+   if (flags & EXEC_OBJECT_NEEDS_FENCE)
+   pin_flags |= PIN_MAPPABLE;
+
+   err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
if (err) {
i915_request_add(rq);
goto out_obj;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c 
b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 37593831b539..4951957a4d8d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -42,6 +42,9 @@ static void trash_stolen(struct drm_i915_private *i915)
unsigned long page;
u32 prng = 0x12345678;
 
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   return;
+
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.start + page;
u32 __iomem *s;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index f4d7b254c9a7..57dd237cd220 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1152,6 +1152,9 @@ static int igt_ggtt_page(void *arg)
unsigned int *order, n;
int err;
 
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   return 0;
+
mutex_lock(>drm.struct_mutex);
 
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 09/22] drm/i915/lmem: support kernel mapping

2019-09-27 Thread Matthew Auld
From: Abdiel Janulgue 

We can create LMEM objects, but we also need to support mapping them
into kernel space for internal use.

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
Signed-off-by: Steve Hampson 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/gem/i915_gem_internal.c  |  4 +-
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  | 36 +
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  |  8 ++
 drivers/gpu/drm/i915/gem/i915_gem_object.h|  6 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  3 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c | 20 -
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c |  3 +-
 .../drm/i915/gem/selftests/huge_gem_object.c  |  4 +-
 .../drm/i915/selftests/intel_memory_region.c  | 76 +++
 9 files changed, 152 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c 
b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 5e72cb1cc2d3..c2e237702e8c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -140,7 +140,9 @@ static void i915_gem_object_put_pages_internal(struct 
drm_i915_gem_object *obj,
 
 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
-I915_GEM_OBJECT_IS_SHRINKABLE,
+I915_GEM_OBJECT_IS_SHRINKABLE |
+I915_GEM_OBJECT_IS_MAPPABLE,
+
.get_pages = i915_gem_object_get_pages_internal,
.put_pages = i915_gem_object_put_pages_internal,
 };
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 26a23304df32..d7ec74ed5b88 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -9,11 +9,47 @@
 #include "i915_drv.h"
 
 const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+   .flags = I915_GEM_OBJECT_IS_MAPPABLE,
+
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,
 };
 
+/* XXX: Time to vfunc your life up? */
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+  unsigned long n)
+{
+   resource_size_t offset;
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_wc(>mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct 
drm_i915_gem_object *obj,
+ unsigned long n)
+{
+   resource_size_t offset;
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_atomic_wc(>mm.region->iomap, offset);
+}
+
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+   resource_size_t offset;
+
+   GEM_BUG_ON(!(obj->flags & I915_BO_ALLOC_CONTIGUOUS));
+
+   offset = i915_gem_object_get_dma_address(obj, n);
+
+   return io_mapping_map_wc(>mm.region->iomap, offset, size);
+}
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 {
struct intel_memory_region *region = obj->mm.region;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h 
b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index ebc15fe24f58..31a6462bdbb6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -13,6 +13,14 @@ struct drm_i915_gem_object;
 
 extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
 
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+  unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+   unsigned long n);
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index d5839cbd82c0..e8cc776581d0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -158,6 +158,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object 
*obj)
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
 }
 
+static inline bool
+i915_gem_object_is_mappable(const struct drm_i915_gem_object *obj)
+{
+   return obj->ops->flags & I915_GEM_OBJECT_IS_MAPPABLE;
+}
+
 static inline bool
 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/g

[Intel-gfx] [PATCH 19/22] drm/i915: error capture with no ggtt slot

2019-09-27 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

If the aperture is not available in HW we can't use a ggtt slot and wc
copy, so fall back to regular kmap.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 19 
 drivers/gpu/drm/i915/i915_gpu_error.c | 65 ++-
 2 files changed, 64 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1be7b236f234..29f9c43b2c68 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2661,7 +2661,8 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
 {
ggtt_release_guc_top(ggtt);
-   drm_mm_remove_node(>error_capture);
+   if (drm_mm_node_allocated(>error_capture))
+   drm_mm_remove_node(>error_capture);
 }
 
 static int init_ggtt(struct i915_ggtt *ggtt)
@@ -2692,13 +2693,15 @@ static int init_ggtt(struct i915_ggtt *ggtt)
if (ret)
return ret;
 
-   /* Reserve a mappable slot for our lockless error capture */
-   ret = drm_mm_insert_node_in_range(>vm.mm, >error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
-   if (ret)
-   return ret;
+   if (HAS_MAPPABLE_APERTURE(ggtt->vm.i915)) {
+   /* Reserve a mappable slot for our lockless error capture */
+   ret = drm_mm_insert_node_in_range(>vm.mm, 
>error_capture,
+ PAGE_SIZE, 0, 
I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+   if (ret)
+   return ret;
+   }
 
/*
 * The upper portion of the GuC address space has a sizeable hole
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6384a06aa5bf..c6c96f0c6b28 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -40,6 +40,7 @@
 #include "display/intel_overlay.h"
 
 #include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
 
 #include "i915_drv.h"
 #include "i915_gpu_error.h"
@@ -235,6 +236,7 @@ struct compress {
struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
+   bool wc;
 };
 
 static bool compress_init(struct compress *c)
@@ -292,7 +294,7 @@ static int compress_page(struct compress *c,
struct z_stream_s *zstream = >zstream;
 
zstream->next_in = src;
-   if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+   if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
 
@@ -367,6 +369,7 @@ static void err_compression_marker(struct 
drm_i915_error_state_buf *m)
 
 struct compress {
struct pagevec pool;
+   bool wc;
 };
 
 static bool compress_init(struct compress *c)
@@ -389,7 +392,7 @@ static int compress_page(struct compress *c,
if (!ptr)
return -ENOMEM;
 
-   if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+   if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
 
@@ -970,7 +973,6 @@ i915_error_object_create(struct drm_i915_private *i915,
struct drm_i915_error_object *dst;
unsigned long num_pages;
struct sgt_iter iter;
-   dma_addr_t dma;
int ret;
 
might_sleep();
@@ -996,17 +998,54 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->page_count = 0;
dst->unused = 0;
 
+   compress->wc = i915_gem_object_is_lmem(vma->obj) ||
+  drm_mm_node_allocated(>error_capture);
+
ret = -EINVAL;
-   for_each_sgt_daddr(dma, iter, vma->pages) {
+   if (drm_mm_node_allocated(>error_capture)) {
void __iomem *s;
+   dma_addr_t dma;
 
-   ggtt->vm.insert_page(>vm, dma, slot, I915_CACHE_NONE, 0);
+   for_each_sgt_daddr(dma, iter, vma->pages) {
+   ggtt->vm.insert_page(>vm, dma, slot,
+I915_CACHE_NONE, 0);
 
-   s = io_mapping_map_wc(>iomap, slot, PAGE_SIZE);
-   ret = compress_page(compress, (void  __force *)s, dst);
-   io_mapping_unmap(s);
-   if (ret)
-   break;
+   

[Intel-gfx] [PATCH 17/22] drm/i915: set num_fence_regs to 0 if there is no aperture

2019-09-27 Thread Matthew Auld
From: Daniele Ceraolo Spurio 

We can't fence anything without aperture.

Signed-off-by: Daniele Ceraolo Spurio 
Signed-off-by: Stuart Summers 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_gem_fence_reg.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c 
b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 615a9f4ef30c..e15e4e247576 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -828,8 +828,10 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
 
detect_bit_6_swizzle(i915);
 
-   if (INTEL_GEN(i915) >= 7 &&
-   !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+   if (!HAS_MAPPABLE_APERTURE(i915))
+   num_fences = 0;
+   else if (INTEL_GEN(i915) >= 7 &&
+!(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
num_fences = 32;
else if (INTEL_GEN(i915) >= 4 ||
 IS_I945G(i915) || IS_I945GM(i915) ||
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 20/22] drm/i915: Don't try to place HWS in non-existing mappable region

2019-09-27 Thread Matthew Auld
From: Michal Wajdeczko 

HWS placement restrictions can't just rely on HAS_LLC flag.

Signed-off-by: Michal Wajdeczko 
Cc: Daniele Ceraolo Spurio 
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c 
b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f97686bdc28b..2e3f7a7507ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -513,7 +513,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs 
*engine,
unsigned int flags;
 
flags = PIN_GLOBAL;
-   if (!HAS_LLC(engine->i915))
+   if (!HAS_LLC(engine->i915) && HAS_MAPPABLE_APERTURE(engine->i915))
/*
 * On g33, we cannot place HWS above 256MiB, so
 * restrict its pinning to the low mappable arena.
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 14/22] drm/i915: treat stolen as a region

2019-09-27 Thread Matthew Auld
Convert stolen memory over to a region object. Still leaves open the
question with what to do with pre-allocated objects...

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_region.c |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 71 +++---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h |  3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c| 14 +
 drivers/gpu/drm/i915/i915_pci.c|  2 +-
 5 files changed, 68 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 0aeaebb41050..77e89fabbddf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -159,7 +159,7 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
return ERR_PTR(-E2BIG);
 
obj = mem->ops->create_object(mem, size, flags);
-   if (!IS_ERR(obj))
+   if (!IS_ERR_OR_NULL(obj))
trace_i915_gem_object_create(obj);
 
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index bfbc3e3daf92..1ee8f1790144 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 
+#include "gem/i915_gem_region.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
 
@@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private 
*dev_priv,
return 0;
 }
 
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
 {
if (!drm_mm_initialized(_priv->mm.stolen))
return;
@@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private 
*i915,
}
 }
 
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 {
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -539,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object 
*obj)
 
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
+
+   if (obj->mm.region)
+   i915_gem_object_release_memory_region(obj);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -548,8 +552,9 @@ static const struct drm_i915_gem_object_ops 
i915_gem_object_stolen_ops = {
 };
 
 static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
-  struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+   struct drm_mm_node *stolen,
+   struct intel_memory_region *mem)
 {
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -566,6 +571,9 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
 
+   if (mem)
+   i915_gem_object_init_memory_region(obj, mem, 0);
+
if (i915_gem_object_pin_pages(obj))
goto cleanup;
 
@@ -576,10 +584,12 @@ _i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return NULL;
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+  resource_size_t size,
+  unsigned int flags)
 {
+   struct drm_i915_private *dev_priv = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -600,7 +610,7 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return NULL;
}
 
-   obj = _i915_gem_object_create_stolen(dev_priv, stolen);
+   obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
if (obj)
return obj;
 
@@ -609,6 +619,49 @@ i915_gem_object_create_stolen(struct drm_i915_private 
*dev_priv,
return NULL;
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
+{
+   struct drm_i915_gem_object *obj;
+
+   obj = 
i915_gem_object_create_region(dev_priv->mm.regions[INTEL_MEMORY_STOLEN],
+   size, I915_BO_ALLOC_CONTIGUOUS);
+   if (IS_ERR(obj))
+   return NULL;
+
+   return obj;
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+   /*
+

[Intel-gfx] [PATCH 10/22] drm/i915/selftests: add write-dword test for LMEM

2019-09-27 Thread Matthew Auld
Simple test writing to dwords across an object, using various engines in
a randomized order, checking that our writes land from the cpu.

Signed-off-by: Matthew Auld 
---
 .../drm/i915/selftests/intel_memory_region.c  | 179 ++
 1 file changed, 179 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c 
b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index ba98e8254b80..8d7d8b9e00da 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -7,13 +7,16 @@
 
 #include "../i915_selftest.h"
 
+
 #include "mock_drm.h"
 #include "mock_gem_device.h"
 #include "mock_region.h"
 
+#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
 #include "selftests/igt_flush_test.h"
@@ -255,6 +258,133 @@ static int igt_mock_continuous(void *arg)
return err;
 }
 
+static int igt_gpu_write_dw(struct intel_context *ce,
+   struct i915_vma *vma,
+   u32 dword,
+   u32 value)
+{
+   int err;
+
+   i915_gem_object_lock(vma->obj);
+   err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+   i915_gem_object_unlock(vma->obj);
+   if (err)
+   return err;
+
+   return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+  vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+   unsigned long n;
+   int err;
+
+   i915_gem_object_lock(obj);
+   err = i915_gem_object_set_to_wc_domain(obj, false);
+   i915_gem_object_unlock(obj);
+   if (err)
+   return err;
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   return err;
+
+   for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+   u32 __iomem *base;
+   u32 read_val;
+
+   base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+   read_val = ioread32(base + dword);
+   io_mapping_unmap_atomic(base);
+   if (read_val != val) {
+   pr_err("n=%lu base[%u]=%u, val=%u\n",
+  n, dword, read_val, val);
+   err = -EINVAL;
+   break;
+   }
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+struct drm_i915_gem_object *obj)
+{
+   struct drm_i915_private *i915 = ctx->i915;
+   struct i915_address_space *vm = ctx->vm ?: >ggtt.vm;
+   struct i915_gem_engines *engines;
+   struct i915_gem_engines_iter it;
+   struct intel_context *ce;
+   I915_RND_STATE(prng);
+   IGT_TIMEOUT(end_time);
+   unsigned int count;
+   struct i915_vma *vma;
+   int *order;
+   int i, n;
+   int err = 0;
+
+   GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+   n = 0;
+   count = 0;
+   for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+   count++;
+   if (!intel_engine_can_store_dword(ce->engine))
+   continue;
+
+   n++;
+   }
+   i915_gem_context_unlock_engines(ctx);
+   if (!n)
+   return 0;
+
+   order = i915_random_order(count * count, );
+   if (!order)
+   return -ENOMEM;
+
+   vma = i915_vma_instance(obj, vm, NULL);
+   if (IS_ERR(vma)) {
+   err = PTR_ERR(vma);
+   goto out_free;
+   }
+
+   err = i915_vma_pin(vma, 0, 0, PIN_USER);
+   if (err)
+   goto out_free;
+
+   i = 0;
+   engines = i915_gem_context_lock_engines(ctx);
+   do {
+   u32 rng = prandom_u32_state();
+   u32 dword = offset_in_page(rng) / 4;
+
+   ce = engines->engines[order[i] % engines->num_engines];
+   i = (i + 1) % (count * count);
+   if (!ce || !intel_engine_can_store_dword(ce->engine))
+   continue;
+
+   err = igt_gpu_write_dw(ce, vma, dword, rng);
+   if (err)
+   break;
+
+   err = igt_cpu_check(obj, dword, rng);
+   if (err)
+   break;
+   } while (!__igt_timeout(end_time, NULL));
+   i915_gem_context_unlock_engines(ctx);
+
+out_free:
+   kfree(order);
+
+   if (err == -ENOMEM)
+   err = 0;
+
+   return err;
+}
+
 static int igt_lmem_create(vo

[Intel-gfx] [PATCH 11/22] drm/i915/selftest: extend coverage to include LMEM huge-pages

2019-09-27 Thread Matthew Auld
Signed-off-by: Matthew Auld 
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 121 +-
 1 file changed, 120 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b6dc90030156..434c1fc57adf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -9,6 +9,7 @@
 #include "i915_selftest.h"
 
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_pm.h"
 
 #include "gt/intel_gt.h"
@@ -970,7 +971,7 @@ static int gpu_write(struct intel_context *ce,
   vma->size >> PAGE_SHIFT, val);
 }
 
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
 {
unsigned int needs_flush;
unsigned long n;
@@ -1002,6 +1003,51 @@ static int cpu_check(struct drm_i915_gem_object *obj, 
u32 dword, u32 val)
return err;
 }
 
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 
val)
+{
+   unsigned long n;
+   int err;
+
+   i915_gem_object_lock(obj);
+   err = i915_gem_object_set_to_wc_domain(obj, false);
+   i915_gem_object_unlock(obj);
+   if (err)
+   return err;
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   return err;
+
+   for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+   u32 __iomem *base;
+   u32 read_val;
+
+   base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+   read_val = ioread32(base + dword);
+   io_mapping_unmap_atomic(base);
+   if (read_val != val) {
+   pr_err("n=%lu base[%u]=%u, val=%u\n",
+  n, dword, read_val, val);
+   err = -EINVAL;
+   break;
+   }
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+   if (i915_gem_object_has_struct_page(obj))
+   return __cpu_check_shmem(obj, dword, val);
+   else if (i915_gem_object_is_lmem(obj))
+   return __cpu_check_lmem(obj, dword, val);
+
+   return -ENODEV;
+}
+
 static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
@@ -1386,6 +1432,78 @@ static int igt_ppgtt_gemfs_huge(void *arg)
return err;
 }
 
+static int igt_ppgtt_lmem_huge(void *arg)
+{
+   struct i915_gem_context *ctx = arg;
+   struct drm_i915_private *i915 = ctx->i915;
+   struct drm_i915_gem_object *obj;
+   static const unsigned int sizes[] = {
+   SZ_64K,
+   SZ_512K,
+   SZ_1M,
+   SZ_2M,
+   };
+   int i;
+   int err;
+
+   if (!HAS_LMEM(i915)) {
+   pr_info("device lacks LMEM support, skipping\n");
+   return 0;
+   }
+
+   /*
+* Sanity check that the HW uses huge pages correctly through LMEM
+* -- ensure that our writes land in the right place.
+*/
+
+   for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+   unsigned int size = sizes[i];
+
+   obj = i915_gem_object_create_lmem(i915, size, 
I915_BO_ALLOC_CONTIGUOUS);
+   if (IS_ERR(obj)) {
+   err = PTR_ERR(obj);
+   if (err == -E2BIG) {
+   pr_info("object too big for region!\n");
+   return 0;
+   }
+
+   return err;
+   }
+
+   err = i915_gem_object_pin_pages(obj);
+   if (err)
+   goto out_put;
+
+   if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+   pr_info("LMEM unable to allocate huge-page(s) with 
size=%u\n",
+   size);
+   goto out_unpin;
+   }
+
+   err = igt_write_huge(ctx, obj);
+   if (err) {
+   pr_err("LMEM write-huge failed with size=%u\n", size);
+   goto out_unpin;
+   }
+
+   i915_gem_object_unpin_pages(obj);
+   __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+   i915_gem_object_put(obj);
+   }
+
+   return 0;
+
+out_unpin:
+   i915_gem_object_unpin_pages(obj);
+out_put:
+   i915_gem_object_put(obj);
+
+   if (err == -ENOMEM)
+   err = 0;
+
+   return err;
+}
+
 static int igt_ppgtt_pin_update(void *arg)
 {
 

[Intel-gfx] [PATCH 02/22] drm/i915: simplify i915_gem_init_early

2019-09-27 Thread Matthew Auld
i915_gem_init_early doesn't need to return anything.

Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_drv.c | 5 +
 drivers/gpu/drm/i915/i915_drv.h | 2 +-
 drivers/gpu/drm/i915/i915_gem.c | 4 +---
 3 files changed, 3 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a9ee73b61f4d..91aae56b4280 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -589,9 +589,7 @@ static int i915_driver_early_probe(struct drm_i915_private 
*dev_priv)
 
intel_gt_init_early(_priv->gt, dev_priv);
 
-   ret = i915_gem_init_early(dev_priv);
-   if (ret < 0)
-   goto err_gt;
+   i915_gem_init_early(dev_priv);
 
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
@@ -613,7 +611,6 @@ static int i915_driver_early_probe(struct drm_i915_private 
*dev_priv)
 
 err_gem:
i915_gem_cleanup_early(dev_priv);
-err_gt:
intel_gt_driver_late_release(_priv->gt);
vlv_free_s0ix_state(dev_priv);
 err_workqueues:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b3c7dbc1832a..0dc504fc6ffc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2250,7 +2250,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void 
*data,
 int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
 void i915_gem_sanitize(struct drm_i915_private *i915);
-int i915_gem_init_early(struct drm_i915_private *dev_priv);
+void i915_gem_init_early(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
 int i915_gem_freeze(struct drm_i915_private *dev_priv);
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e2897a666225..3d3fda4cae99 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1533,7 +1533,7 @@ static void i915_gem_init__mm(struct drm_i915_private 
*i915)
i915_gem_init__objects(i915);
 }
 
-int i915_gem_init_early(struct drm_i915_private *dev_priv)
+void i915_gem_init_early(struct drm_i915_private *dev_priv)
 {
int err;
 
@@ -1545,8 +1545,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
err = i915_gemfs_init(dev_priv);
if (err)
DRM_NOTE("Unable to create a private tmpfs mount, hugepage 
support will be disabled(%d).\n", err);
-
-   return 0;
 }
 
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 04/22] drm/i915/region: support continuous allocations

2019-09-27 Thread Matthew Auld
Some kernel internal objects may need to be allocated as a continuous
block, also thinking ahead the various kernel io_mapping interfaces seem
to expect it, although this is purely a limitation in the kernel
API...so perhaps something to be improved.

Signed-off-by: Matthew Auld 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   4 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c|  15 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.h|   3 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |   3 +-
 drivers/gpu/drm/i915/intel_memory_region.c|  13 +-
 drivers/gpu/drm/i915/intel_memory_region.h|   3 +-
 .../drm/i915/selftests/intel_memory_region.c  | 163 ++
 drivers/gpu/drm/i915/selftests/mock_region.c  |   2 +-
 8 files changed, 197 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index d36c860c9c6f..7acd383f174f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -117,6 +117,10 @@ struct drm_i915_gem_object {
 
I915_SELFTEST_DECLARE(struct list_head st_link);
 
+   unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+
/*
 * Is the object to be mapped as read-only to the GPU
 * Only honoured if hardware has relevant pte bit
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 5c3bfc121921..b317a5c84144 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -23,10 +23,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
 {
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = >mm.blocks;
-   unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
resource_size_t size = obj->base.size;
resource_size_t prev_end;
struct i915_buddy_block *block;
+   unsigned int flags;
struct sg_table *st;
struct scatterlist *sg;
unsigned int sg_page_sizes;
@@ -42,6 +42,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
return -ENOMEM;
}
 
+   flags = I915_ALLOC_MIN_PAGE_SIZE;
+   if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+   flags |= I915_ALLOC_CONTIGUOUS;
+
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
if (ret)
goto err_free_sg;
@@ -56,7 +60,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
list_for_each_entry(block, blocks, link) {
u64 block_size, offset;
 
-   block_size = i915_buddy_block_size(>mm, block);
+   block_size = min_t(u64, size,
+  i915_buddy_block_size(>mm, block));
offset = i915_buddy_block_offset(block);
 
GEM_BUG_ON(overflows_type(block_size, sg->length));
@@ -98,10 +103,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object 
*obj)
 }
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-   struct intel_memory_region *mem)
+   struct intel_memory_region *mem,
+   unsigned long flags)
 {
INIT_LIST_HEAD(>mm.blocks);
obj->mm.region = mem;
+   obj->flags = flags;
 }
 
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
@@ -115,6 +122,8 @@ i915_gem_object_create_region(struct intel_memory_region 
*mem,
 {
struct drm_i915_gem_object *obj;
 
+   GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+
if (!mem)
return ERR_PTR(-ENODEV);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h 
b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index ebddc86d78f7..f2ff6f8bff74 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -17,7 +17,8 @@ void i915_gem_object_put_pages_buddy(struct 
drm_i915_gem_object *obj,
 struct sg_table *pages);
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-   struct intel_memory_region *mem);
+   struct intel_memory_region *mem,
+   unsigned long flags);
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 4e1805aaeb99..f9fbf2865782 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_

[Intel-gfx] [PATCH 03/22] drm/i915: introduce intel_memory_region

2019-09-27 Thread Matthew Auld
Support memory regions, as defined by a given (start, end), and allow
creating GEM objects which are backed by said region. The immediate goal
here is to have something to represent our device memory, but later on
we also want to represent every memory domain with a region, so stolen,
shmem, and of course device. At some point we are probably going to want
use a common struct here, such that we are better aligned with say TTM.

Signed-off-by: Matthew Auld 
Signed-off-by: Abdiel Janulgue 
Signed-off-by: Niranjana Vishwanathapura 
Cc: Joonas Lahtinen 
---
 drivers/gpu/drm/i915/Makefile |   2 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 133 ++
 drivers/gpu/drm/i915/gem/i915_gem_region.h|  28 +++
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  78 
 drivers/gpu/drm/i915/i915_drv.h   |   1 +
 drivers/gpu/drm/i915/intel_memory_region.c| 173 ++
 drivers/gpu/drm/i915/intel_memory_region.h|  77 
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 .../drm/i915/selftests/intel_memory_region.c  | 124 +
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   1 +
 drivers/gpu/drm/i915/selftests/mock_region.c  |  59 ++
 drivers/gpu/drm/i915/selftests/mock_region.h  |  16 ++
 13 files changed, 702 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.h
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6313e7b4bd78..d849dff31f76 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -50,6 +50,7 @@ i915-y += i915_drv.o \
  i915_utils.o \
  intel_csr.o \
  intel_device_info.o \
+ intel_memory_region.o \
  intel_pch.o \
  intel_pm.o \
  intel_runtime_pm.o \
@@ -118,6 +119,7 @@ gem-y += \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
+   gem/i915_gem_region.o \
gem/i915_gem_shmem.o \
gem/i915_gem_shrinker.o \
gem/i915_gem_stolen.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index d695f187b790..d36c860c9c6f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -158,6 +158,15 @@ struct drm_i915_gem_object {
atomic_t pages_pin_count;
atomic_t shrink_pin;
 
+   /**
+* Memory region for this object.
+*/
+   struct intel_memory_region *region;
+   /**
+* List of memory region blocks allocated for this object.
+*/
+   struct list_head blocks;
+
struct sg_table *pages;
void *mapping;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c 
b/drivers/gpu/drm/i915/gem/i915_gem_region.c
new file mode 100644
index ..5c3bfc121921
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_gem_region.h"
+#include "i915_drv.h"
+
+void
+i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+   struct sg_table *pages)
+{
+   __intel_memory_region_put_pages_buddy(obj->mm.region, >mm.blocks);
+
+   obj->mm.dirty = false;
+   sg_free_table(pages);
+   kfree(pages);
+}
+
+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+   struct intel_memory_region *mem = obj->mm.region;
+   struct list_head *blocks = >mm.blocks;
+   unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
+   resource_size_t size = obj->base.size;
+   resource_size_t prev_end;
+   struct i915_buddy_block *block;
+   struct sg_table *st;
+   struct scatterlist *sg;
+   unsigned int sg_page_sizes;
+   unsigned long i;
+   int ret;
+
+   st = kmalloc(sizeof(*st), GFP_KERNEL);
+   if (!st)
+   return -ENOMEM;
+
+   if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+   kfree(st);
+   return -ENOMEM;
+   }
+
+   ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+   if (ret)
+   goto err_free_sg;
+
+   GEM_BUG_ON(list_empty(blocks));
+
+   sg = st->sgl;
+  

[Intel-gfx] [PATCH 08/22] drm/i915: setup io-mapping for LMEM

2019-09-27 Thread Matthew Auld
From: Abdiel Janulgue 

Signed-off-by: Abdiel Janulgue 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/intel_region_lmem.c | 28 ++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c 
b/drivers/gpu/drm/i915/intel_region_lmem.c
index 7a3f96e1f766..051069664074 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -36,8 +36,32 @@ lmem_create_object(struct intel_memory_region *mem,
return obj;
 }
 
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+   io_mapping_fini(>iomap);
+   intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+   int ret;
+
+   if (!io_mapping_init_wc(>iomap,
+   mem->io_start,
+   resource_size(>region)))
+   return -EIO;
+
+   ret = intel_memory_region_init_buddy(mem);
+   if (ret)
+   io_mapping_fini(>iomap);
+
+   return ret;
+}
+
 const struct intel_memory_region_ops intel_region_lmem_ops = {
-   .init = intel_memory_region_init_buddy,
-   .release = intel_memory_region_release_buddy,
+   .init = region_lmem_init,
+   .release = region_lmem_release,
.create_object = lmem_create_object,
 };
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 05/22] drm/i915/region: support volatile objects

2019-09-27 Thread Matthew Auld
Volatile objects are marked as DONTNEED while pinned, therefore once
unpinned the backing store can be discarded. This is limited to kernel
internal objects.

Signed-off-by: Matthew Auld 
Signed-off-by: CQ Tang 
Cc: Joonas Lahtinen 
Cc: Abdiel Janulgue 
---
 drivers/gpu/drm/i915/gem/i915_gem_internal.c| 17 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h  |  6 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h|  9 -
 drivers/gpu/drm/i915/gem/i915_gem_pages.c   |  6 ++
 drivers/gpu/drm/i915/gem/i915_gem_region.c  | 12 
 drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 12 
 drivers/gpu/drm/i915/intel_memory_region.c  |  4 
 drivers/gpu/drm/i915/intel_memory_region.h  |  5 +
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c   |  5 ++---
 9 files changed, 56 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c 
b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 0c41e04ab8fa..5e72cb1cc2d3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -117,13 +117,6 @@ static int i915_gem_object_get_pages_internal(struct 
drm_i915_gem_object *obj)
goto err;
}
 
-   /* Mark the pages as dontneed whilst they are still pinned. As soon
-* as they are unpinned they are allowed to be reaped by the shrinker,
-* and the caller is expected to repopulate - the contents of this
-* object are only valid whilst active and pinned.
-*/
-   obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
return 0;
@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct 
drm_i915_gem_object *obj,
internal_free_pages(pages);
 
obj->mm.dirty = false;
-   obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -188,6 +180,15 @@ i915_gem_object_create_internal(struct drm_i915_private 
*i915,
drm_gem_private_object_init(>drm, >base, size);
i915_gem_object_init(obj, _gem_object_internal_ops);
 
+   /*
+* Mark the object as volatile, such that the pages are marked as
+* dontneed whilst they are still pinned. As soon as they are unpinned
+* they are allowed to be reaped by the shrinker, and the caller is
+* expected to repopulate - the contents of this object are only valid
+* whilst active and pinned.
+*/
+   obj->flags = I915_BO_ALLOC_VOLATILE;
+
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 29b9eddc4c7f..d5839cbd82c0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -122,6 +122,12 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object 
*obj);
 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
  struct dma_fence *fence);
 
+static inline bool
+i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
+{
+   return obj->flags & I915_BO_ALLOC_VOLATILE;
+}
+
 static inline void
 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 7acd383f174f..0d934b67e547 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -119,7 +119,8 @@ struct drm_i915_gem_object {
 
unsigned long flags;
 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
-#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+#define I915_BO_ALLOC_VOLATILE   BIT(1)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
 
/*
 * Is the object to be mapped as read-only to the GPU
@@ -170,6 +171,12 @@ struct drm_i915_gem_object {
 * List of memory region blocks allocated for this object.
 */
struct list_head blocks;
+   /**
+* Element within memory_region->objects or region->purgeable
+* if the object is marked as DONTNEED. Access is protected by
+* region->obj_lock.
+*/
+   struct list_head region_link;
 
struct sg_table *pages;
void *mapping;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 2e941f093a20..b0ec0959c13f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -18,6 +18,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object 
*obj,
 
lockdep_assert_held(>mm.lock);
 

[Intel-gfx] [PATCH 06/22] drm/i915: Add memory region information to device_info

2019-09-27 Thread Matthew Auld
From: Abdiel Janulgue 

Exposes available regions for the platform. Shared memory will
always be available.

Signed-off-by: Abdiel Janulgue 
Signed-off-by: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_drv.h  | 2 ++
 drivers/gpu/drm/i915/intel_device_info.h | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ed4b8c2484f..93116cc8b149 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2170,6 +2170,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 #define HAS_IPC(dev_priv)   (INTEL_INFO(dev_priv)->display.has_ipc)
 
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+
 #define HAS_GT_UC(dev_priv)(INTEL_INFO(dev_priv)->has_gt_uc)
 
 /* Having GuC is not the same as using GuC */
diff --git a/drivers/gpu/drm/i915/intel_device_info.h 
b/drivers/gpu/drm/i915/intel_device_info.h
index 0cdc2465534b..e9940f932d26 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -160,6 +160,8 @@ struct intel_device_info {
 
unsigned int page_sizes; /* page sizes supported by the HW */
 
+   u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
 
u8 pipe_mask;
-- 
2.20.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 00/22] LMEM basics

2019-09-27 Thread Matthew Auld
The basic LMEM bits, minus the uAPI, pruning, etc. The goal is to support
basic LMEM object creation within the kernel. From there we can start with the
dumb buffer support, and then the other display related bits.

We still have a few patches that deal with lack of MAPPABLE_APERTURE support,
though we can probably split that into its own series.

Patches 1-2 are just noise.
Patches 3-6 are the basic intel_memory_region bits + basic mock_selftests.
Patches 7-11 are the basic LMEM region bits + basic live_selftests.
Patches 12-14 try to turn stolen + shmem into intel_region_regions.
Patches 15-21 implement the !MAPPABLE_APERTURE bits.

The fake LMEM patch allows us to run the live_selftests with LMEM enabled and
!HAS_MAPPABLE_APERTURE in CI.

Abdiel Janulgue (4):
  drm/i915: Add memory region information to device_info
  drm/i915: setup io-mapping for LMEM
  drm/i915/lmem: support kernel mapping
  drm/i915: enumerate and init each supported region

CQ Tang (1):
  drm/i915: check for missing aperture in GTT pread/pwrite paths

Daniele Ceraolo Spurio (4):
  drm/i915: define HAS_MAPPABLE_APERTURE
  drm/i915: do not map aperture if it is not available.
  drm/i915: set num_fence_regs to 0 if there is no aperture
  drm/i915: error capture with no ggtt slot

Matthew Auld (12):
  drm/i915: check for kernel_context
  drm/i915: simplify i915_gem_init_early
  drm/i915: introduce intel_memory_region
  drm/i915/region: support continuous allocations
  drm/i915/region: support volatile objects
  drm/i915: support creating LMEM objects
  drm/i915/selftests: add write-dword test for LMEM
  drm/i915/selftest: extend coverage to include LMEM huge-pages
  drm/i915: treat shmem as a region
  drm/i915: treat stolen as a region
  drm/i915/selftests: check for missing aperture
  HAX drm/i915: add the fake lmem region

Michal Wajdeczko (1):
  drm/i915: Don't try to place HWS in non-existing mappable region

 arch/x86/kernel/early-quirks.c|  26 +
 drivers/gpu/drm/i915/Makefile |   4 +
 drivers/gpu/drm/i915/gem/i915_gem_internal.c  |  21 +-
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  |  70 +++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  |  31 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h|  12 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  23 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c |  26 +-
 drivers/gpu/drm/i915/gem/i915_gem_phys.c  |   5 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.c| 166 +
 drivers/gpu/drm/i915/gem/i915_gem_region.h|  29 +
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c |  71 ++-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c|  71 ++-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h|   3 +-
 .../drm/i915/gem/selftests/huge_gem_object.c  |   4 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 212 ++-
 .../i915/gem/selftests/i915_gem_coherency.c   |   5 +-
 .../drm/i915/gem/selftests/i915_gem_mman.c|   6 +
 drivers/gpu/drm/i915/gt/intel_engine_cs.c |   9 +-
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  14 +-
 drivers/gpu/drm/i915/i915_drv.c   |  13 +-
 drivers/gpu/drm/i915/i915_drv.h   |  17 +-
 drivers/gpu/drm/i915/i915_gem.c   |  19 +-
 drivers/gpu/drm/i915/i915_gem_fence_reg.c |   6 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 119 +++-
 drivers/gpu/drm/i915/i915_gpu_error.c |  65 +-
 drivers/gpu/drm/i915/i915_pci.c   |  29 +-
 drivers/gpu/drm/i915/intel_device_info.h  |   2 +
 drivers/gpu/drm/i915/intel_memory_region.c| 192 ++
 drivers/gpu/drm/i915/intel_memory_region.h| 119 
 drivers/gpu/drm/i915/intel_region_lmem.c  | 157 +
 drivers/gpu/drm/i915/intel_region_lmem.h  |  16 +
 drivers/gpu/drm/i915/selftests/i915_gem.c |   3 +
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |   8 +-
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 .../drm/i915/selftests/intel_memory_region.c  | 587 ++
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   9 +-
 drivers/gpu/drm/i915/selftests/mock_region.c  |  59 ++
 drivers/gpu/drm/i915/selftests/mock_region.h  |  16 +
 include/drm/i915_drm.h|   3 +
 41 files changed, 2115 insertions(+), 134 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_lmem.h
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/intel_memory_region.h
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.c
 create mode 100644 drivers/gpu/drm/i915/intel_region_lmem.h
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_memory_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.c
 create mode 100644 drivers/gpu/drm/i915/selftests/mock_region.h

  1   2   3   4   5   6   7   8   9   10   >