[Intel-gfx] [PATCH i-g-t 6/9] tests/i915/query: sanity check the unallocated tracking

2022-06-29 Thread Matthew Auld
Sanity both the unallocated_size & unallocated_cpu_visible_size tracking.

v2(Petri): always use from_user_pointer()

Signed-off-by: Matthew Auld 
Cc: Thomas Hellström 
Reviewed-by: Nirmoy Das 
---
 tests/i915/i915_query.c | 274 +++-
 1 file changed, 273 insertions(+), 1 deletion(-)

diff --git a/tests/i915/i915_query.c b/tests/i915/i915_query.c
index ea99dc8d..2bbcfa97 100644
--- a/tests/i915/i915_query.c
+++ b/tests/i915/i915_query.c
@@ -23,6 +23,8 @@
 
 #include "igt.h"
 #include "intel_hwconfig_types.h"
+#include "i915/gem.h"
+#include "i915/gem_create.h"
 
 #include 
 
@@ -519,6 +521,36 @@ static bool query_regions_supported(int fd)
  * Should be source compatible either way though.
  */
 #define probed_cpu_visible_size rsvd1[0]
+#define unallocated_cpu_visible_size rsvd1[1]
+static bool query_regions_unallocated_supported(int fd)
+{
+   struct drm_i915_query_memory_regions *regions;
+   struct drm_i915_query_item item;
+   int i, ret = false;
+
+   memset(, 0, sizeof(item));
+   item.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
+   i915_query_items(fd, , 1);
+   igt_assert(item.length > 0);
+
+   regions = calloc(1, item.length);
+
+   item.data_ptr = to_user_pointer(regions);
+   i915_query_items(fd, , 1);
+
+   for (i = 0; i < regions->num_regions; i++) {
+   struct drm_i915_memory_region_info info = regions->regions[i];
+
+   if (info.unallocated_cpu_visible_size) {
+   ret = true;
+   break;
+   }
+   }
+
+   free(regions);
+   return ret;
+}
+
 static void test_query_regions_garbage_items(int fd)
 {
struct drm_i915_query_memory_regions *regions;
@@ -559,8 +591,9 @@ static void test_query_regions_garbage_items(int fd)
 
/*
 * rsvd1[0] : probed_cpu_visible_size
+* rsvd1[1] : unallocated_cpu_visible_size
 */
-   for (j = 1; j < ARRAY_SIZE(info.rsvd1); j++)
+   for (j = 2; j < ARRAY_SIZE(info.rsvd1); j++)
igt_assert_eq_u32(info.rsvd1[j], 0);
}
 
@@ -573,6 +606,46 @@ static void test_query_regions_garbage_items(int fd)
free(regions);
 }
 
+struct object_handle {
+   uint32_t handle;
+   struct igt_list_head link;
+};
+
+static uint32_t batch_create_size(int fd, uint64_t size)
+{
+   const uint32_t bbe = MI_BATCH_BUFFER_END;
+   uint32_t handle;
+
+   handle = gem_create(fd, size);
+   gem_write(fd, handle, 0, , sizeof(bbe));
+
+   return handle;
+}
+
+static void upload(int fd, struct igt_list_head *handles, uint32_t num_handles)
+{
+   struct drm_i915_gem_exec_object2 *exec;
+   struct drm_i915_gem_execbuffer2 execbuf = {};
+   struct object_handle *iter;
+   uint32_t i;
+
+   exec = calloc(num_handles + 1,
+ sizeof(struct drm_i915_gem_exec_object2));
+
+   i = 0;
+   igt_list_for_each_entry(iter, handles, link)
+   exec[i++].handle = iter->handle;
+
+   exec[i].handle = batch_create_size(fd, 4096);
+
+   execbuf.buffers_ptr = to_user_pointer(exec);
+   execbuf.buffer_count = num_handles + 1;
+
+   gem_execbuf(fd, );
+   gem_close(fd, exec[i].handle);
+   free(exec);
+}
+
 static void test_query_regions_sanity_check(int fd)
 {
struct drm_i915_query_memory_regions *regions;
@@ -605,8 +678,20 @@ static void test_query_regions_sanity_check(int fd)
 
igt_assert(info.probed_cpu_visible_size == 0 ||
   info.probed_cpu_visible_size == 
info.probed_size);
+   igt_assert(info.unallocated_size == info.probed_size);
+   igt_assert(info.unallocated_cpu_visible_size == 0 ||
+  info.unallocated_cpu_visible_size ==
+  info.unallocated_size);
} else {
igt_assert(info.probed_cpu_visible_size <= 
info.probed_size);
+   igt_assert(info.unallocated_size <= info.probed_size);
+   if (info.probed_cpu_visible_size < info.probed_size) {
+   igt_assert(info.unallocated_cpu_visible_size <
+  info.unallocated_size);
+   } else {
+   igt_assert(info.unallocated_cpu_visible_size ==
+  info.unallocated_size);
+   }
}
 
igt_assert(r1.memory_class == I915_MEMORY_CLASS_SYSTEM ||
@@ -623,6 +708,58 @@ static void test_query_regions_sanity_check(int fd)
igt_assert(!(r1.memory_class == r2.memory_class &&
 r1.memory_instance == r2.memory_instance));
}
+
+   {
+   struct 

[Intel-gfx] [PATCH i-g-t 6/9] tests/i915/query: sanity check the unallocated tracking

2022-05-25 Thread Matthew Auld
Signed-off-by: Matthew Auld 
Cc: Thomas Hellström 
---
 tests/i915/i915_query.c | 273 +++-
 1 file changed, 272 insertions(+), 1 deletion(-)

diff --git a/tests/i915/i915_query.c b/tests/i915/i915_query.c
index 6b036241..77cbd93e 100644
--- a/tests/i915/i915_query.c
+++ b/tests/i915/i915_query.c
@@ -22,6 +22,8 @@
  */
 
 #include "igt.h"
+#include "i915/gem.h"
+#include "i915/gem_create.h"
 
 #include 
 
@@ -518,6 +520,36 @@ static bool query_regions_supported(int fd)
  * Should be source compatible either way though.
  */
 #define probed_cpu_visible_size rsvd1[0]
+#define unallocated_cpu_visible_size rsvd1[1]
+static bool query_regions_unallocated_supported(int fd)
+{
+   struct drm_i915_query_memory_regions *regions;
+   struct drm_i915_query_item item;
+   int i, ret = false;
+
+   memset(, 0, sizeof(item));
+   item.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
+   i915_query_items(fd, , 1);
+   igt_assert(item.length > 0);
+
+   regions = calloc(1, item.length);
+
+   item.data_ptr = to_user_pointer(regions);
+   i915_query_items(fd, , 1);
+
+   for (i = 0; i < regions->num_regions; i++) {
+   struct drm_i915_memory_region_info info = regions->regions[i];
+
+   if (info.unallocated_cpu_visible_size) {
+   ret = true;
+   break;
+   }
+   }
+
+   free(regions);
+   return ret;
+}
+
 static void test_query_regions_garbage_items(int fd)
 {
struct drm_i915_query_memory_regions *regions;
@@ -558,8 +590,9 @@ static void test_query_regions_garbage_items(int fd)
 
/*
 * rsvd1[0] : probed_cpu_visible_size
+* rsvd1[1] : unallocated_cpu_visible_size
 */
-   for (j = 1; j < ARRAY_SIZE(info.rsvd1); j++)
+   for (j = 2; j < ARRAY_SIZE(info.rsvd1); j++)
igt_assert_eq_u32(info.rsvd1[j], 0);
}
 
@@ -572,6 +605,46 @@ static void test_query_regions_garbage_items(int fd)
free(regions);
 }
 
+struct object_handle {
+   uint32_t handle;
+   struct igt_list_head link;
+};
+
+static uint32_t batch_create_size(int fd, uint64_t size)
+{
+   const uint32_t bbe = MI_BATCH_BUFFER_END;
+   uint32_t handle;
+
+   handle = gem_create(fd, size);
+   gem_write(fd, handle, 0, , sizeof(bbe));
+
+   return handle;
+}
+
+static void upload(int fd, struct igt_list_head *handles, uint32_t num_handles)
+{
+   struct drm_i915_gem_exec_object2 *exec;
+   struct drm_i915_gem_execbuffer2 execbuf = {};
+   struct object_handle *iter;
+   uint32_t i;
+
+   exec = calloc(num_handles + 1,
+ sizeof(struct drm_i915_gem_exec_object2));
+
+   i = 0;
+   igt_list_for_each_entry(iter, handles, link)
+   exec[i++].handle = iter->handle;
+
+   exec[i].handle = batch_create_size(fd, 4096);
+
+   execbuf.buffers_ptr = to_user_pointer(exec);
+   execbuf.buffer_count = num_handles + 1;
+
+   gem_execbuf(fd, );
+   gem_close(fd, exec[i].handle);
+   free(exec);
+}
+
 static void test_query_regions_sanity_check(int fd)
 {
struct drm_i915_query_memory_regions *regions;
@@ -604,8 +677,19 @@ static void test_query_regions_sanity_check(int fd)
 
igt_assert(info.probed_cpu_visible_size ==
   info.probed_size);
+   igt_assert(info.unallocated_size == info.probed_size);
+   igt_assert(info.unallocated_cpu_visible_size ==
+  info.unallocated_size);
} else {
igt_assert(info.probed_cpu_visible_size <= 
info.probed_size);
+   igt_assert(info.unallocated_size <= info.probed_size);
+   if (info.probed_cpu_visible_size < info.probed_size) {
+   igt_assert(info.unallocated_cpu_visible_size <
+  info.unallocated_size);
+   } else {
+   igt_assert(info.unallocated_cpu_visible_size ==
+  info.unallocated_size);
+   }
}
 
igt_assert(r1.memory_class == I915_MEMORY_CLASS_SYSTEM ||
@@ -622,6 +706,58 @@ static void test_query_regions_sanity_check(int fd)
igt_assert(!(r1.memory_class == r2.memory_class &&
 r1.memory_instance == r2.memory_instance));
}
+
+   {
+   struct igt_list_head handles;
+   struct object_handle oh = {};
+
+   IGT_INIT_LIST_HEAD();
+
+   oh.handle =
+   gem_create_with_cpu_access_in_memory_regions
+   (fd, 4096,
+