Hi Daniel,
On 2025-09-12 at 15:19:30 -0300, Daniel Almeida wrote:
> Add an initial test suit covering query device properties, allocating
> memory, binding and unbinding VA ranges through VM_BIND and submitting a
> simple piece of work through GROUP_SUBMIT.
> 
> Signed-off-by: Daniel Almeida <daniel.alme...@collabora.com>

Same remark about imho unnecessary SPDX-copyright line where
simple Copyright (c) would be enough but it is not a blocker.

Acked-by: Kamil Konieczny <kamil.koniec...@linux.intel.com>

Regards,
Kamil

> ---
>  lib/igt_panthor.c             | 188 +++++++++++++++++++++++
>  lib/igt_panthor.h             |  18 +++
>  tests/panthor/meson.build     |   5 +-
>  tests/panthor/panthor_gem.c   |  66 ++++++++
>  tests/panthor/panthor_group.c | 276 ++++++++++++++++++++++++++++++++++
>  tests/panthor/panthor_vm.c    |  80 ++++++++++
>  6 files changed, 632 insertions(+), 1 deletion(-)
>  create mode 100644 tests/panthor/panthor_gem.c
>  create mode 100644 tests/panthor/panthor_group.c
>  create mode 100644 tests/panthor/panthor_vm.c
> 
> diff --git a/lib/igt_panthor.c b/lib/igt_panthor.c
> index 0b690f796..5d3b572b4 100644
> --- a/lib/igt_panthor.c
> +++ b/lib/igt_panthor.c
> @@ -39,3 +39,191 @@ void igt_panthor_query(int fd, int32_t type, void *data, 
> size_t size, int err)
>       else
>               do_ioctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
>  }
> +
> +/**
> + * igt_panthor_vm_create:
> + * @fd: device file descriptor
> + * @vm_id: pointer to store the created VM ID
> + * @err: expected error code, or 0 for success
> + *
> + * Creates a VM.
> + */
> +void igt_panthor_vm_create(int fd, uint32_t *vm_id, int err)
> +{
> +     struct drm_panthor_vm_create vm_create = {};
> +
> +     if (err) {
> +             do_ioctl_err(fd, DRM_IOCTL_PANTHOR_VM_CREATE, &vm_create, err);
> +     } else {
> +             do_ioctl(fd, DRM_IOCTL_PANTHOR_VM_CREATE, &vm_create);
> +             *vm_id = vm_create.id;
> +     }
> +}
> +
> +/**
> + * igt_panthor_vm_destroy:
> + * @fd: device file descriptor
> + * @vm_id: VM ID to destroy
> + * @err: expected error code, or 0 for success
> + *
> + * Destroys a VM.
> + */
> +void igt_panthor_vm_destroy(int fd, uint32_t vm_id, int err)
> +{
> +     struct drm_panthor_vm_destroy vm_destroy = {
> +             .id = vm_id,
> +     };
> +
> +     if (err)
> +             do_ioctl_err(fd, DRM_IOCTL_PANTHOR_VM_DESTROY, &vm_destroy, 
> err);
> +     else
> +             do_ioctl(fd, DRM_IOCTL_PANTHOR_VM_DESTROY, &vm_destroy);
> +}
> +
> +/**
> + * igt_panthor_vm_bind:
> + * @fd: device file descriptor
> + * @vm_id: VM ID to bind the buffer to
> + * @bo_handle: buffer object handle to bind
> + * @va: virtual address to bind at
> + * @size: size of the binding
> + * @flags: binding flags
> + * @err: expected error code, or 0 for success
> + *
> + * Bind a buffer object to a virtual address in the specified VM.
> + */
> +void igt_panthor_vm_bind(int fd, uint32_t vm_id, uint32_t bo_handle,
> +                      uint64_t va, uint64_t size, uint32_t flags, int err)
> +{
> +     struct drm_panthor_vm_bind_op bind_op = {
> +             .flags = flags,
> +             .bo_handle = bo_handle,
> +             .va = va,
> +             .size = size,
> +     };
> +
> +     struct drm_panthor_vm_bind vm_bind = {
> +             .vm_id = vm_id,
> +             .flags = 0,
> +             .ops = DRM_PANTHOR_OBJ_ARRAY(1, &bind_op),
> +     };
> +
> +     if (err)
> +             do_ioctl_err(fd, DRM_IOCTL_PANTHOR_VM_BIND, &vm_bind, err);
> +     else
> +             do_ioctl(fd, DRM_IOCTL_PANTHOR_VM_BIND, &vm_bind);
> +}
> +
> +/**
> + * igt_panthor_bo_create:
> + * @fd: device file descriptor
> + * @bo: pointer to panthor_bo structure to initialize
> + * @size: requested buffer size in bytes
> + * @flags: buffer object creation flags
> + * @err: expected error code, or 0 for success
> + *
> + * Creates a new buffer object
> + */
> +void igt_panthor_bo_create(int fd, struct panthor_bo *bo,
> +                        uint64_t size, uint32_t flags, int err)
> +{
> +     struct drm_panthor_bo_create bo_create = {
> +             .size = size,
> +             .flags = flags,
> +     };
> +
> +     if (err)
> +             do_ioctl_err(fd, DRM_IOCTL_PANTHOR_BO_CREATE, &bo_create, err);
> +     else
> +             do_ioctl(fd, DRM_IOCTL_PANTHOR_BO_CREATE, &bo_create);
> +
> +     bo->handle = bo_create.handle;
> +     bo->size = bo_create.size;
> +     bo->offset = 0;
> +     bo->map = NULL;
> +}
> +
> +/**
> + * igt_panthor_bo_mmap_offset:
> + * @fd: device file descriptor
> + * @handle: buffer object handle
> + * @err: expected error code, or 0 for success
> + *
> + * Get the mmap offset for a buffer object.
> + *
> + * Returns: the mmap offset for the buffer object
> + */
> +uint64_t igt_panthor_bo_mmap_offset(int fd, uint32_t handle, int err)
> +{
> +     struct drm_panthor_bo_mmap_offset bo_mmap_offset = {
> +             .handle = handle,
> +     };
> +
> +     if (err)
> +             do_ioctl_err(fd, DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET, 
> &bo_mmap_offset, err);
> +     else
> +             do_ioctl(fd, DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET, &bo_mmap_offset);
> +
> +     return bo_mmap_offset.offset;
> +}
> +
> +/**
> + * igt_panthor_mmap_bo:
> + * @fd: device file descriptor
> + * @handle: buffer object handle
> + * @size: size of the buffer to map
> + * @prot: memory protection flags (e.g., PROT_READ | PROT_WRITE)
> + * @offset: mmap offset for the buffer object
> + *
> + * Map a buffer object into the process address space.
> + *
> + * Returns: pointer to the mapped memory, or NULL on failure
> + */
> +void *igt_panthor_mmap_bo(int fd, uint32_t handle, uint64_t size,
> +                       unsigned int prot, uint64_t offset)
> +{
> +     void *ptr;
> +
> +     ptr = mmap(0, size, prot, MAP_SHARED, fd, offset);
> +     if (ptr == MAP_FAILED)
> +             return NULL;
> +     return ptr;
> +}
> +
> +/**
> + * igt_panthor_bo_create_mapped:
> + * @fd: device file descriptor
> + * @bo: pointer to panthor_bo structure to initialize
> + * @size: requested buffer size in bytes
> + * @flags: buffer object creation flags
> + * @err: expected error code, or 0 for success
> + *
> + * Create a new buffer object on the panthor device and map it into
> + * the process address space.
> + */
> +void igt_panthor_bo_create_mapped(int fd, struct panthor_bo *bo, uint64_t 
> size,
> +                               uint32_t flags, int err)
> +{
> +     igt_panthor_bo_create(fd, bo, size, flags, err);
> +     bo->offset = igt_panthor_bo_mmap_offset(fd, bo->handle, err);
> +     bo->map = igt_panthor_mmap_bo(fd, bo->handle, bo->size,
> +                                   PROT_READ | PROT_WRITE, bo->offset);
> +}
> +
> +/**
> + * igt_panthor_free_bo:
> + * @fd: panthor device file descriptor
> + * @bo: pointer to panthor_bo structure to free
> + *
> + * Free a buffer object and unmap it if it was mapped.
> + */
> +void igt_panthor_free_bo(int fd, struct panthor_bo *bo)
> +{
> +     if (!bo)
> +             return;
> +
> +     if (bo->map)
> +             munmap(bo->map, bo->size);
> +
> +     gem_close(fd, bo->handle);
> +}
> diff --git a/lib/igt_panthor.h b/lib/igt_panthor.h
> index a99b7102d..275ad54d6 100644
> --- a/lib/igt_panthor.h
> +++ b/lib/igt_panthor.h
> @@ -7,6 +7,24 @@
>  #include <stddef.h>
>  #include <stdint.h>
>  
> +struct panthor_bo {
> +     int handle;
> +     uint64_t offset;
> +     uint64_t size;
> +     void *map;
> +};
> +
>  void igt_panthor_query(int fd, int32_t type, void *data, size_t size, int 
> err);
> +void igt_panthor_vm_create(int fd, uint32_t *vm_id, int err);
> +void igt_panthor_vm_destroy(int fd, uint32_t vm_id, int err);
> +void igt_panthor_vm_bind(int fd, uint32_t vm_id, uint32_t bo_handle,
> +                       uint64_t va, uint64_t size, uint32_t flags, int err);
> +void igt_panthor_bo_create(int fd, struct panthor_bo *bo, uint64_t size, 
> uint32_t flags, int err);
> +uint64_t igt_panthor_bo_mmap_offset(int fd, uint32_t handle, int err);
> +void igt_panthor_free_bo(int fd, struct panthor_bo *bo);
> +void igt_panthor_bo_create_mapped(int fd, struct panthor_bo *bo, uint64_t 
> size,
> +                               uint32_t flags, int err);
> +void *igt_panthor_mmap_bo(int fd, uint32_t handle, uint64_t size,
> +                       unsigned int prot, uint64_t offset);
>  
>  #endif /* IGT_PANTHOR_H */
> diff --git a/tests/panthor/meson.build b/tests/panthor/meson.build
> index ce13aebaa..42a46e993 100644
> --- a/tests/panthor/meson.build
> +++ b/tests/panthor/meson.build
> @@ -1,5 +1,8 @@
>  panthor_progs = [
> -     'panthor_query'
> +     'panthor_gem',
> +     'panthor_group',
> +     'panthor_query',
> +     'panthor_vm',
>  ]
>  
>  foreach prog : panthor_progs
> diff --git a/tests/panthor/panthor_gem.c b/tests/panthor/panthor_gem.c
> new file mode 100644
> index 000000000..7e8e2084d
> --- /dev/null
> +++ b/tests/panthor/panthor_gem.c
> @@ -0,0 +1,66 @@
> +// SPDX-License-Identifier: MIT
> +// SPDX-FileCopyrightText: Copyright (C) 2025 Collabora Ltd.
> +
> +#include "igt.h"
> +#include "igt_core.h"
> +#include "igt_panthor.h"
> +
> +igt_main {
> +     int fd;
> +
> +     igt_fixture { fd = drm_open_driver(DRIVER_PANTHOR); }
> +
> +     igt_describe("Create a buffer object");
> +     igt_subtest("bo_create") {
> +             struct panthor_bo bo;
> +
> +             igt_panthor_bo_create(fd, &bo, 4096, 0, 0);
> +             igt_assert(bo.handle != 0);
> +
> +             igt_panthor_free_bo(fd, &bo);
> +     }
> +
> +     igt_describe("Create a fake mmap offset for a buffer object");
> +     igt_subtest("bo_mmap_offset") {
> +             struct panthor_bo bo;
> +             uint64_t mmap_offset;
> +
> +             igt_panthor_bo_create(fd, &bo, 4096, 0, 0);
> +             igt_assert(bo.handle != 0);
> +
> +             mmap_offset = igt_panthor_bo_mmap_offset(fd, bo.handle, 0);
> +             igt_assert(mmap_offset != 0);
> +
> +             igt_panthor_free_bo(fd, &bo);
> +     }
> +
> +     igt_describe("Same as bo_mmap_offset but with an invalid handle");
> +     igt_subtest("bo_mmap_offset_invalid_handle") {
> +             struct panthor_bo bo;
> +             uint64_t mmap_offset;
> +
> +             igt_panthor_bo_create(fd, &bo, 4096, 0, 0);
> +             igt_assert(bo.handle != 0);
> +
> +             mmap_offset = igt_panthor_bo_mmap_offset(fd, 0xdeadbeef, 
> ENOENT);
> +             igt_assert(mmap_offset == 0);
> +
> +             igt_panthor_free_bo(fd, &bo);
> +     }
> +
> +     igt_describe_f("Create a buffer object whose size is not page-aligned, 
> and check "
> +             "that the allocated size is rounded up to the next page size 
> %lu.",
> +             8192UL);
> +     igt_subtest("bo_create_round_size") {
> +             struct panthor_bo bo;
> +             uint64_t expected_size = 8192;
> +
> +             igt_panthor_bo_create(fd, &bo, 5000, 0, 0);
> +             igt_assert(bo.handle != 0);
> +             igt_assert(bo.size == expected_size);
> +
> +             igt_panthor_free_bo(fd, &bo);
> +     }
> +
> +     igt_fixture { drm_close_driver(fd); }
> +}
> diff --git a/tests/panthor/panthor_group.c b/tests/panthor/panthor_group.c
> new file mode 100644
> index 000000000..0a3b746d8
> --- /dev/null
> +++ b/tests/panthor/panthor_group.c
> @@ -0,0 +1,276 @@
> +// SPDX-License-Identifier: MIT
> +// SPDX-FileCopyrightText: Copyright (C) 2025 Collabora Ltd.
> +
> +#include <stdint.h>
> +#include <sys/mman.h>
> +#include <endian.h> // For htole64
> +#include <unistd.h>
> +
> +#include "drm.h"
> +#include "igt.h"
> +#include "igt_core.h"
> +#include "igt_panthor.h"
> +#include "panthor_drm.h"
> +
> +static void
> +issue_store_multiple(u8 *command_stream, uint64_t kernel_va, uint32_t 
> constant)
> +{
> +             uint64_t opcode, reg_num, mov48, store_multiple, flush;
> +             uint64_t sr, src0, register_bitmap, offset;
> +
> +             // MOV48: Load the source register ([r68; r69]) with the kernel 
> address
> +             opcode = 0x1;
> +             reg_num = 68;
> +             mov48 = (opcode << 56) | (reg_num << 48) | kernel_va;
> +             mov48 = htole64(mov48);
> +             memcpy(&command_stream[0], &mov48, sizeof(mov48));
> +
> +             // MOV48: Load a known constant into r70
> +             opcode = 0x1;
> +             reg_num = 70;
> +             mov48 = (opcode << 56) | (reg_num << 48) | constant;
> +             mov48 = htole64(mov48);
> +             memcpy(&command_stream[8], &mov48, sizeof(mov48));
> +
> +             // STORE_MULTIPLE: Store the first register to the address 
> pointed to by [r68; r69]
> +             opcode = 0x15; // STORE_MULTIPLE
> +             sr = 70; // Starting from register r70
> +             src0 = 68; // Address pointed to by [r68; r69]
> +             register_bitmap = 1; // Store the first register
> +             offset = 0; // Offset
> +             store_multiple = (opcode << 56) | (sr << 48) | (src0 << 40) |
> +                                                                             
>  (register_bitmap << 16) | offset;
> +             store_multiple = htole64(store_multiple);
> +             memcpy(&command_stream[16], &store_multiple, 
> sizeof(store_multiple));
> +
> +             opcode = 0x1;
> +             reg_num = 68;
> +             mov48 = (opcode << 56) | (reg_num << 48) | 0;
> +             mov48 = htole64(mov48);
> +             memcpy(&command_stream[24], &mov48, sizeof(mov48));
> +
> +             opcode = 36;
> +             flush = opcode << 56 | 0ull << 48 | reg_num << 40 | 0ull << 16 
> | 0x233;
> +             flush = htole64(flush);
> +             memcpy(&command_stream[32], &flush, sizeof(flush));
> +}
> +
> +igt_main {
> +     int fd;
> +
> +     igt_fixture { fd = drm_open_driver(DRIVER_PANTHOR); }
> +
> +     igt_describe("Create and destroy a CSF group.");
> +     igt_subtest("group_create") {
> +             struct drm_panthor_gpu_info gpu_info = {};
> +             struct drm_panthor_vm_create vm_create = {};
> +             struct drm_panthor_group_create group_create = {};
> +             struct drm_panthor_queue_create queue = {};
> +             struct drm_panthor_obj_array queues = {};
> +             struct drm_panthor_group_destroy group_destroy = {};
> +             struct drm_panthor_vm_destroy vm_destroy = {};
> +
> +             igt_panthor_query(fd, DRM_PANTHOR_DEV_QUERY_GPU_INFO,
> +                               &gpu_info, sizeof(gpu_info), 0);
> +             igt_assert(gpu_info.gpu_id != 0);
> +
> +             vm_create.flags = 0;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_VM_CREATE, 
> &vm_create), 0);
> +             igt_assert(vm_create.id != 0);
> +
> +             queue.priority = 0; // Low priority
> +             queue.ringbuf_size = 4096; // Example size
> +             queues = (struct drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(1, 
> &queue);
> +
> +             group_create.queues = queues;
> +             group_create.max_compute_cores = 1;
> +             group_create.max_fragment_cores = 1;
> +             group_create.max_tiler_cores = 1;
> +             group_create.priority = PANTHOR_GROUP_PRIORITY_MEDIUM;
> +             group_create.compute_core_mask = gpu_info.shader_present & 0x1; 
> // Use first core
> +             group_create.fragment_core_mask = gpu_info.shader_present & 
> 0x1; // Use first core
> +             group_create.tiler_core_mask = gpu_info.tiler_present & 0x1; // 
> Use first tiler
> +             group_create.vm_id = vm_create.id;
> +
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_GROUP_CREATE, 
> &group_create), 0);
> +             igt_assert(group_create.group_handle != 0);
> +
> +             // Cleanup: Destroy the group and VM
> +             group_destroy = (struct drm_panthor_group_destroy){
> +                     .group_handle = group_create.group_handle
> +             };
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_GROUP_DESTROY, 
> &group_destroy), 0);
> +
> +             vm_destroy = (struct drm_panthor_vm_destroy) { .id = 
> vm_create.id };
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_VM_DESTROY, 
> &vm_destroy), 0);
> +     }
> +
> +     igt_describe("Submit a job to a group and wait for completion. "
> +                                                      "The job writes a 
> known value to a buffer object that is then "
> +                                                      "mmaped and checked.");
> +     igt_subtest("group_submit") {
> +             struct drm_panthor_gpu_info gpu_info = {};
> +             struct drm_panthor_vm_create vm_create = {};
> +             struct drm_panthor_group_create group_create = {};
> +             struct drm_panthor_queue_create queue = {};
> +             struct drm_panthor_obj_array queues = {};
> +             struct drm_panthor_group_submit group_submit = {};
> +             struct drm_panthor_queue_submit queue_submit = {};
> +             struct drm_panthor_group_destroy group_destroy = {};
> +             struct drm_panthor_obj_array queue_submits = {};
> +             struct drm_panthor_vm_destroy vm_destroy = {};
> +             struct drm_panthor_bo_create bo_create = {};
> +             struct drm_panthor_vm_bind vm_bind = {};
> +             struct drm_panthor_vm_bind_op vm_bind_op = {};
> +             struct drm_syncobj_wait wait = {};
> +             struct drm_syncobj_create syncobj_create = {};
> +             struct drm_panthor_sync_op sync_op = {};
> +             struct drm_gem_close gem_close = {};
> +             struct drm_syncobj_destroy syncobj_destroy = {};
> +             uint64_t command_stream_gpu_addr;
> +             uint32_t command_stream_size;
> +             uint64_t result_gpu_addr;
> +             uint32_t cmd_buf_bo_handle;
> +             uint32_t result_bo_handle;
> +             uint32_t syncobj_handle;
> +             uint8_t command_stream[64] = {0};
> +             uint8_t *bo_cpu_addr;
> +             uint8_t *result_cpu_addr;
> +             const int INITIAL_VA = 0x1000000;
> +             uint64_t bo_mmap_offset;
> +
> +             igt_panthor_query(fd, DRM_PANTHOR_DEV_QUERY_GPU_INFO,
> +                               &gpu_info, sizeof(gpu_info), 0);
> +             igt_assert(gpu_info.gpu_id != 0);
> +
> +             vm_create.flags = 0;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_VM_CREATE, 
> &vm_create), 0);
> +             igt_assert(vm_create.id != 0);
> +
> +             bo_create.size = 4096;
> +             bo_create.flags = 0;
> +             bo_create.exclusive_vm_id = vm_create.id;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_BO_CREATE, 
> &bo_create), 0);
> +             igt_assert(bo_create.handle != 0);
> +             cmd_buf_bo_handle = bo_create.handle;
> +
> +             vm_bind_op.flags = DRM_PANTHOR_VM_BIND_OP_TYPE_MAP;
> +             vm_bind_op.bo_handle = cmd_buf_bo_handle;
> +             vm_bind_op.bo_offset = 0;
> +             vm_bind_op.va = INITIAL_VA;
> +             vm_bind_op.size = bo_create.size;
> +             vm_bind.ops = (struct 
> drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(1, &vm_bind_op);
> +             vm_bind.vm_id = vm_create.id;
> +             vm_bind.flags = 0;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_VM_BIND, 
> &vm_bind), 0);
> +
> +             command_stream_gpu_addr = vm_bind_op.va;
> +             command_stream_size = sizeof(command_stream);
> +
> +             bo_mmap_offset = igt_panthor_bo_mmap_offset(fd, 
> cmd_buf_bo_handle, 0);
> +             bo_cpu_addr = igt_panthor_mmap_bo(fd, cmd_buf_bo_handle,
> +                                               bo_create.size, PROT_READ | 
> PROT_WRITE,
> +                                                     bo_mmap_offset);
> +             igt_assert(bo_cpu_addr);
> +
> +             // Create the BO to receive the result of the store.
> +             memset(&bo_create, 0, sizeof(bo_create));
> +             bo_create.size = 4096;
> +             bo_create.flags = 0;
> +             bo_create.exclusive_vm_id = vm_create.id;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_BO_CREATE, 
> &bo_create), 0);
> +             igt_assert(bo_create.handle != 0);
> +             result_bo_handle = bo_create.handle;
> +
> +             // Also bind the result BO.
> +             vm_bind_op.flags = DRM_PANTHOR_VM_BIND_OP_TYPE_MAP;
> +             vm_bind_op.bo_handle = result_bo_handle;
> +             vm_bind_op.bo_offset = 0;
> +             vm_bind_op.va = INITIAL_VA + 4096;
> +             vm_bind_op.size = bo_create.size;
> +             vm_bind.ops = (struct 
> drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(1, &vm_bind_op);
> +             vm_bind.vm_id = vm_create.id;
> +             vm_bind.flags = 0;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_VM_BIND, 
> &vm_bind), 0);
> +             result_gpu_addr = vm_bind_op.va;
> +
> +             issue_store_multiple(command_stream, result_gpu_addr, 
> 0xdeadbeef);
> +             memcpy(bo_cpu_addr, command_stream, command_stream_size);
> +             munmap(bo_cpu_addr, bo_create.size);
> +
> +             queue.priority = 0;
> +             queue.ringbuf_size = 4096;
> +             queues = (struct drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(1, 
> &queue);
> +
> +             group_create.queues = queues;
> +             group_create.max_compute_cores = 1;
> +             group_create.max_fragment_cores = 1;
> +             group_create.max_tiler_cores = 1;
> +             group_create.priority = PANTHOR_GROUP_PRIORITY_MEDIUM;
> +             group_create.compute_core_mask = gpu_info.shader_present & 0x1;
> +             group_create.fragment_core_mask = gpu_info.shader_present & 0x1;
> +             group_create.tiler_core_mask = gpu_info.tiler_present & 0x1;
> +             group_create.vm_id = vm_create.id;
> +
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_GROUP_CREATE, 
> &group_create), 0);
> +             igt_assert(group_create.group_handle != 0);
> +
> +             syncobj_create = (struct drm_syncobj_create){
> +                             .flags = 0,
> +             };
> +
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, 
> &syncobj_create), 0);
> +             syncobj_handle = syncobj_create.handle;
> +
> +             sync_op = (struct drm_panthor_sync_op) {
> +                             .handle = syncobj_handle,
> +                             .flags = DRM_PANTHOR_SYNC_OP_SIGNAL,
> +             };
> +
> +             queue_submit.syncs = (struct 
> drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(1, &sync_op);
> +
> +             queue_submit.queue_index = 0;
> +             queue_submit.stream_size = command_stream_size;
> +             queue_submit.stream_addr = command_stream_gpu_addr;
> +             queue_submit.latest_flush = 0;
> +             queue_submits = (struct 
> drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(1, &queue_submit);
> +
> +             group_submit.group_handle = group_create.group_handle;
> +             group_submit.queue_submits = queue_submits;
> +
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_GROUP_SUBMIT, 
> &group_submit), 0);
> +
> +             wait = (struct drm_syncobj_wait) {
> +                     .handles = (uint64_t)&syncobj_handle,
> +                     .count_handles = 1,
> +                     .timeout_nsec = INT64_MAX,
> +                     .flags = 0,
> +             };
> +
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait), 0);
> +
> +             bo_mmap_offset = igt_panthor_bo_mmap_offset(fd, 
> result_bo_handle, 0);
> +             result_cpu_addr = igt_panthor_mmap_bo(fd, result_bo_handle,
> +                                                   bo_create.size, PROT_READ 
> | PROT_WRITE, bo_mmap_offset);
> +
> +             igt_assert(*(uint32_t *)result_cpu_addr == 0xdeadbeef);
> +             munmap(result_cpu_addr, bo_create.size);
> +
> +             syncobj_destroy.handle = syncobj_handle;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, 
> &syncobj_destroy), 0);
> +
> +             group_destroy.group_handle = group_create.group_handle;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_GROUP_DESTROY, 
> &group_destroy), 0);
> +
> +             vm_destroy.id = vm_create.id;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_PANTHOR_VM_DESTROY, 
> &vm_destroy), 0);
> +
> +             gem_close.handle = cmd_buf_bo_handle;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close), 
> 0);
> +
> +             gem_close.handle = result_bo_handle;
> +             igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close), 
> 0);
> +}
> +
> +     igt_fixture { drm_close_driver(fd); }
> +}
> diff --git a/tests/panthor/panthor_vm.c b/tests/panthor/panthor_vm.c
> new file mode 100644
> index 000000000..1b641b601
> --- /dev/null
> +++ b/tests/panthor/panthor_vm.c
> @@ -0,0 +1,80 @@
> +// SPDX-License-Identifier: MIT
> +// SPDX-FileCopyrightText: Copyright (C) 2025 Collabora Ltd.
> +
> +#include "igt.h"
> +#include "igt_core.h"
> +#include "igt_panthor.h"
> +#include "panthor_drm.h"
> +
> +igt_main {
> +     int fd;
> +
> +     igt_fixture { fd = drm_open_driver(DRIVER_PANTHOR); }
> +
> +     igt_describe("Create and destroy a VM");
> +     igt_subtest("vm_create_destroy") {
> +             uint32_t vm_id;
> +
> +             igt_panthor_vm_create(fd, &vm_id, 0);
> +             igt_assert(vm_id != 0);
> +
> +             igt_panthor_vm_destroy(fd, vm_id, 0);
> +     }
> +
> +     igt_subtest("vm_destroy_invalid") {
> +             igt_panthor_vm_destroy(fd, 0xdeadbeef, EINVAL);
> +     }
> +
> +     igt_describe("Test the VM_BIND API synchronously");
> +     igt_subtest("vm_bind") {
> +             uint32_t vm_id;
> +             struct panthor_bo bo;
> +             uint64_t bo_size = 0x1000;
> +
> +             igt_panthor_vm_create(fd, &vm_id, 0);
> +             igt_assert(vm_id != 0);
> +
> +             igt_panthor_bo_create(fd, &bo, bo_size, 0, 0);
> +             igt_panthor_vm_bind(fd, vm_id, bo.handle,
> +                                 0x1000, 0x1000, 
> DRM_PANTHOR_VM_BIND_OP_TYPE_MAP, 0);
> +
> +             igt_panthor_vm_destroy(fd, vm_id, 0);
> +     }
> +
> +     igt_describe("Test unbinding a previously bound range");
> +     igt_subtest("vm_unbind") {
> +             uint32_t vm_id;
> +             struct panthor_bo bo;
> +             uint64_t bo_size = 0x1000;
> +
> +             igt_panthor_vm_create(fd, &vm_id, 0);
> +             igt_assert(vm_id != 0);
> +
> +             igt_panthor_bo_create(fd, &bo, bo_size, 0, 0);
> +             igt_panthor_vm_bind(fd, vm_id, bo.handle,
> +                                 0x1000, 0x1000, 
> DRM_PANTHOR_VM_BIND_OP_TYPE_MAP, 0);
> +             igt_panthor_vm_bind(fd, vm_id, 0,
> +                                 0x1000, 0x1000, 
> DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP, 0);
> +
> +             igt_panthor_vm_destroy(fd, vm_id, 0);
> +     }
> +
> +     igt_describe("Test unbinding an address range that was not previously 
> bound");
> +     igt_subtest("vm_unbind_invalid_address") {
> +             uint32_t vm_id;
> +             struct panthor_bo bo;
> +             uint64_t bo_size = 0x1000;
> +
> +             igt_panthor_vm_create(fd, &vm_id, 0);
> +             igt_assert(vm_id != 0);
> +
> +             igt_panthor_bo_create(fd, &bo, bo_size, 0, 0);
> +
> +             /* This was not bound previously*/
> +             igt_panthor_vm_bind(fd, vm_id, bo.handle,
> +                                 0x1000, 0x1000, 
> DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP, EINVAL);
> +             igt_panthor_vm_destroy(fd, vm_id, 0);
> +     }
> +
> +     igt_fixture { drm_close_driver(fd); }
> +}
> -- 
> 2.51.0
> 

Reply via email to