From: Iouri Tarassov <[email protected]>

Implement ioctls to manage compute device virtual addresses (VA):
  - LX_DXRESERVEGPUVIRTUALADDRESS,
  - LX_DXFREEGPUVIRTUALADDRESS,
  - LX_DXMAPGPUVIRTUALADDRESS,
  - LX_DXUPDATEGPUVIRTUALADDRESS.

Compute devices access memory by using virtual addressses.
Each process has a dedicated VA space. The video memory manager
on the host is responsible with updating device page tables
before submitting a DMA buffer for execution.

The LX_DXRESERVEGPUVIRTUALADDRESS ioctl reserves a portion of the
process compute device VA space.

The LX_DXMAPGPUVIRTUALADDRESS ioctl reserves a portion of the process
compute device VA space and maps it to the given compute device
allocation.

The LX_DXFREEGPUVIRTUALADDRESS frees the previously reserved portion
of the compute device VA space.

The LX_DXUPDATEGPUVIRTUALADDRESS ioctl adds operations to modify the
compute device VA space to a compute device execution context. It
allows the operations to be queued and synchronized with execution
of other compute device DMA buffers..

Signed-off-by: Iouri Tarassov <[email protected]>
[kms: forward port to 6.6 from 6.1. No code changes made.]
Signed-off-by: Kelsey Steele <[email protected]>
---
 drivers/hv/dxgkrnl/dxgkrnl.h  |  10 ++
 drivers/hv/dxgkrnl/dxgvmbus.c | 150 ++++++++++++++++++++++
 drivers/hv/dxgkrnl/dxgvmbus.h |  38 ++++++
 drivers/hv/dxgkrnl/ioctl.c    | 228 +++++++++++++++++++++++++++++++++-
 include/uapi/misc/d3dkmthk.h  | 126 +++++++++++++++++++
 5 files changed, 548 insertions(+), 4 deletions(-)

diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h
index 93c3ceb23865..93bc9b41aa41 100644
--- a/drivers/hv/dxgkrnl/dxgkrnl.h
+++ b/drivers/hv/dxgkrnl/dxgkrnl.h
@@ -817,6 +817,16 @@ int dxgvmb_send_evict(struct dxgprocess *pr, struct 
dxgadapter *adapter,
 int dxgvmb_send_submit_command(struct dxgprocess *pr,
                               struct dxgadapter *adapter,
                               struct d3dkmt_submitcommand *args);
+int dxgvmb_send_map_gpu_va(struct dxgprocess *pr, struct d3dkmthandle h,
+                          struct dxgadapter *adapter,
+                          struct d3dddi_mapgpuvirtualaddress *args);
+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *pr,
+                              struct dxgadapter *adapter,
+                              struct d3dddi_reservegpuvirtualaddress *args);
+int dxgvmb_send_free_gpu_va(struct dxgprocess *pr, struct dxgadapter *adapter,
+                           struct d3dkmt_freegpuvirtualaddress *args);
+int dxgvmb_send_update_gpu_va(struct dxgprocess *pr, struct dxgadapter 
*adapter,
+                             struct d3dkmt_updategpuvirtualaddress *args);
 int dxgvmb_send_create_sync_object(struct dxgprocess *pr,
                                   struct dxgadapter *adapter,
                                   struct d3dkmt_createsynchronizationobject2
diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c
index f4c4a7e7ad8b..425a1ab87bd6 100644
--- a/drivers/hv/dxgkrnl/dxgvmbus.c
+++ b/drivers/hv/dxgkrnl/dxgvmbus.c
@@ -2432,6 +2432,156 @@ int dxgvmb_send_submit_command(struct dxgprocess 
*process,
        return ret;
 }
 
+int dxgvmb_send_map_gpu_va(struct dxgprocess *process,
+                          struct d3dkmthandle device,
+                          struct dxgadapter *adapter,
+                          struct d3dddi_mapgpuvirtualaddress *args)
+{
+       struct dxgkvmb_command_mapgpuvirtualaddress *command;
+       struct dxgkvmb_command_mapgpuvirtualaddress_return result;
+       int ret;
+       struct dxgvmbusmsg msg = {.hdr = NULL};
+
+       ret = init_message(&msg, adapter, process, sizeof(*command));
+       if (ret)
+               goto cleanup;
+       command = (void *)msg.msg;
+
+       command_vgpu_to_host_init2(&command->hdr,
+                                  DXGK_VMBCOMMAND_MAPGPUVIRTUALADDRESS,
+                                  process->host_handle);
+       command->args = *args;
+       command->device = device;
+
+       ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result,
+                                  sizeof(result));
+       if (ret < 0)
+               goto cleanup;
+       args->virtual_address = result.virtual_address;
+       args->paging_fence_value = result.paging_fence_value;
+       ret = ntstatus2int(result.status);
+
+cleanup:
+
+       free_message(&msg, process);
+       if (ret)
+               DXG_TRACE("err: %d", ret);
+       return ret;
+}
+
+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *process,
+                              struct dxgadapter *adapter,
+                              struct d3dddi_reservegpuvirtualaddress *args)
+{
+       struct dxgkvmb_command_reservegpuvirtualaddress *command;
+       struct dxgkvmb_command_reservegpuvirtualaddress_return result;
+       int ret;
+       struct dxgvmbusmsg msg = {.hdr = NULL};
+
+       ret = init_message(&msg, adapter, process, sizeof(*command));
+       if (ret)
+               goto cleanup;
+       command = (void *)msg.msg;
+
+       command_vgpu_to_host_init2(&command->hdr,
+                                  DXGK_VMBCOMMAND_RESERVEGPUVIRTUALADDRESS,
+                                  process->host_handle);
+       command->args = *args;
+
+       ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result,
+                                  sizeof(result));
+       args->virtual_address = result.virtual_address;
+
+cleanup:
+       free_message(&msg, process);
+       if (ret)
+               DXG_TRACE("err: %d", ret);
+       return ret;
+}
+
+int dxgvmb_send_free_gpu_va(struct dxgprocess *process,
+                           struct dxgadapter *adapter,
+                           struct d3dkmt_freegpuvirtualaddress *args)
+{
+       struct dxgkvmb_command_freegpuvirtualaddress *command;
+       int ret;
+       struct dxgvmbusmsg msg = {.hdr = NULL};
+
+       ret = init_message(&msg, adapter, process, sizeof(*command));
+       if (ret)
+               goto cleanup;
+       command = (void *)msg.msg;
+
+       command_vgpu_to_host_init2(&command->hdr,
+                                  DXGK_VMBCOMMAND_FREEGPUVIRTUALADDRESS,
+                                  process->host_handle);
+       command->args = *args;
+
+       ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+       free_message(&msg, process);
+       if (ret)
+               DXG_TRACE("err: %d", ret);
+       return ret;
+}
+
+int dxgvmb_send_update_gpu_va(struct dxgprocess *process,
+                             struct dxgadapter *adapter,
+                             struct d3dkmt_updategpuvirtualaddress *args)
+{
+       struct dxgkvmb_command_updategpuvirtualaddress *command;
+       u32 cmd_size;
+       u32 op_size;
+       int ret;
+       struct dxgvmbusmsg msg = {.hdr = NULL};
+
+       if (args->num_operations == 0 ||
+           (DXG_MAX_VM_BUS_PACKET_SIZE /
+            sizeof(struct d3dddi_updategpuvirtualaddress_operation)) <
+           args->num_operations) {
+               ret = -EINVAL;
+               DXG_ERR("Invalid number of operations: %d",
+                       args->num_operations);
+               goto cleanup;
+       }
+
+       op_size = args->num_operations *
+           sizeof(struct d3dddi_updategpuvirtualaddress_operation);
+       cmd_size = sizeof(struct dxgkvmb_command_updategpuvirtualaddress) +
+           op_size - sizeof(args->operations[0]);
+
+       ret = init_message(&msg, adapter, process, cmd_size);
+       if (ret)
+               goto cleanup;
+       command = (void *)msg.msg;
+
+       command_vgpu_to_host_init2(&command->hdr,
+                                  DXGK_VMBCOMMAND_UPDATEGPUVIRTUALADDRESS,
+                                  process->host_handle);
+       command->fence_value = args->fence_value;
+       command->device = args->device;
+       command->context = args->context;
+       command->fence_object = args->fence_object;
+       command->num_operations = args->num_operations;
+       command->flags = args->flags.value;
+       ret = copy_from_user(command->operations, args->operations,
+                            op_size);
+       if (ret) {
+               DXG_ERR("failed to copy operations");
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+       free_message(&msg, process);
+       if (ret)
+               DXG_TRACE("err: %d", ret);
+       return ret;
+}
+
 static void set_result(struct d3dkmt_createsynchronizationobject2 *args,
                       u64 fence_gpu_va, u8 *va)
 {
diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h
index 23f92ab9f8ad..88967ff6a505 100644
--- a/drivers/hv/dxgkrnl/dxgvmbus.h
+++ b/drivers/hv/dxgkrnl/dxgvmbus.h
@@ -418,6 +418,44 @@ struct dxgkvmb_command_flushheaptransitions {
        struct dxgkvmb_command_vgpu_to_host hdr;
 };
 
+struct dxgkvmb_command_freegpuvirtualaddress {
+       struct dxgkvmb_command_vgpu_to_host hdr;
+       struct d3dkmt_freegpuvirtualaddress args;
+};
+
+struct dxgkvmb_command_mapgpuvirtualaddress {
+       struct dxgkvmb_command_vgpu_to_host hdr;
+       struct d3dddi_mapgpuvirtualaddress args;
+       struct d3dkmthandle             device;
+};
+
+struct dxgkvmb_command_mapgpuvirtualaddress_return {
+       u64             virtual_address;
+       u64             paging_fence_value;
+       struct ntstatus status;
+};
+
+struct dxgkvmb_command_reservegpuvirtualaddress {
+       struct dxgkvmb_command_vgpu_to_host hdr;
+       struct d3dddi_reservegpuvirtualaddress args;
+};
+
+struct dxgkvmb_command_reservegpuvirtualaddress_return {
+       u64     virtual_address;
+       u64     paging_fence_value;
+};
+
+struct dxgkvmb_command_updategpuvirtualaddress {
+       struct dxgkvmb_command_vgpu_to_host hdr;
+       u64                             fence_value;
+       struct d3dkmthandle             device;
+       struct d3dkmthandle             context;
+       struct d3dkmthandle             fence_object;
+       u32                             num_operations;
+       u32                             flags;
+       struct d3dddi_updategpuvirtualaddress_operation operations[1];
+};
+
 struct dxgkvmb_command_queryclockcalibration {
        struct dxgkvmb_command_vgpu_to_host hdr;
        struct d3dkmt_queryclockcalibration args;
diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c
index 2700da51bc01..f6700e974f25 100644
--- a/drivers/hv/dxgkrnl/ioctl.c
+++ b/drivers/hv/dxgkrnl/ioctl.c
@@ -2492,6 +2492,226 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess 
*process, void *__user inargs)
        return ret;
 }
 
+static int
+dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+       int ret, ret2;
+       struct d3dddi_mapgpuvirtualaddress args;
+       struct d3dddi_mapgpuvirtualaddress *input = inargs;
+       struct dxgdevice *device = NULL;
+       struct dxgadapter *adapter = NULL;
+
+       ret = copy_from_user(&args, inargs, sizeof(args));
+       if (ret) {
+               DXG_ERR("failed to copy input args");
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       device = dxgprocess_device_by_object_handle(process,
+                                       HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+                                       args.paging_queue);
+       if (device == NULL) {
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       adapter = device->adapter;
+       ret = dxgadapter_acquire_lock_shared(adapter);
+       if (ret < 0) {
+               adapter = NULL;
+               goto cleanup;
+       }
+
+       ret = dxgvmb_send_map_gpu_va(process, zerohandle, adapter, &args);
+       if (ret < 0)
+               goto cleanup;
+       /* STATUS_PENING is a success code > 0. It is returned to user mode */
+       if (!(ret == STATUS_PENDING || ret == 0)) {
+               DXG_ERR("Unexpected error %x", ret);
+               goto cleanup;
+       }
+
+       ret2 = copy_to_user(&input->paging_fence_value,
+                           &args.paging_fence_value, sizeof(u64));
+       if (ret2) {
+               DXG_ERR("failed to copy paging fence to user");
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       ret2 = copy_to_user(&input->virtual_address, &args.virtual_address,
+                               sizeof(args.virtual_address));
+       if (ret2) {
+               DXG_ERR("failed to copy va to user");
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+cleanup:
+
+       if (adapter)
+               dxgadapter_release_lock_shared(adapter);
+       if (device)
+               kref_put(&device->device_kref, dxgdevice_release);
+
+       DXG_TRACE("ioctl:%s %d", errorstr(ret), ret);
+       return ret;
+}
+
+static int
+dxgkio_reserve_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+       int ret;
+       struct d3dddi_reservegpuvirtualaddress args;
+       struct d3dddi_reservegpuvirtualaddress *input = inargs;
+       struct dxgadapter *adapter = NULL;
+       struct dxgdevice *device = NULL;
+
+       ret = copy_from_user(&args, inargs, sizeof(args));
+       if (ret) {
+               DXG_ERR("failed to copy input args");
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+       if (adapter == NULL) {
+               device = dxgprocess_device_by_object_handle(process,
+                                               HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+                                               args.adapter);
+               if (device == NULL) {
+                       DXG_ERR("invalid adapter or paging queue: 0x%x",
+                               args.adapter.v);
+                       ret = -EINVAL;
+                       goto cleanup;
+               }
+               adapter = device->adapter;
+               kref_get(&adapter->adapter_kref);
+               kref_put(&device->device_kref, dxgdevice_release);
+       } else {
+               args.adapter = adapter->host_handle;
+       }
+
+       ret = dxgadapter_acquire_lock_shared(adapter);
+       if (ret < 0) {
+               kref_put(&adapter->adapter_kref, dxgadapter_release);
+               adapter = NULL;
+               goto cleanup;
+       }
+
+       ret = dxgvmb_send_reserve_gpu_va(process, adapter, &args);
+       if (ret < 0)
+               goto cleanup;
+
+       ret = copy_to_user(&input->virtual_address, &args.virtual_address,
+                          sizeof(args.virtual_address));
+       if (ret) {
+               DXG_ERR("failed to copy VA to user");
+               ret = -EINVAL;
+       }
+
+cleanup:
+
+       if (adapter) {
+               dxgadapter_release_lock_shared(adapter);
+               kref_put(&adapter->adapter_kref, dxgadapter_release);
+       }
+
+       DXG_TRACE("ioctl:%s %d", errorstr(ret), ret);
+       return ret;
+}
+
+static int
+dxgkio_free_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+       int ret;
+       struct d3dkmt_freegpuvirtualaddress args;
+       struct dxgadapter *adapter = NULL;
+
+       ret = copy_from_user(&args, inargs, sizeof(args));
+       if (ret) {
+               DXG_ERR("failed to copy input args");
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       adapter = dxgprocess_adapter_by_handle(process, args.adapter);
+       if (adapter == NULL) {
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       ret = dxgadapter_acquire_lock_shared(adapter);
+       if (ret < 0) {
+               kref_put(&adapter->adapter_kref, dxgadapter_release);
+               adapter = NULL;
+               goto cleanup;
+       }
+
+       args.adapter = adapter->host_handle;
+       ret = dxgvmb_send_free_gpu_va(process, adapter, &args);
+
+cleanup:
+
+       if (adapter) {
+               dxgadapter_release_lock_shared(adapter);
+               kref_put(&adapter->adapter_kref, dxgadapter_release);
+       }
+
+       return ret;
+}
+
+static int
+dxgkio_update_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+       int ret;
+       struct d3dkmt_updategpuvirtualaddress args;
+       struct d3dkmt_updategpuvirtualaddress *input = inargs;
+       struct dxgadapter *adapter = NULL;
+       struct dxgdevice *device = NULL;
+
+       ret = copy_from_user(&args, inargs, sizeof(args));
+       if (ret) {
+               DXG_ERR("failed to copy input args");
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       device = dxgprocess_device_by_handle(process, args.device);
+       if (device == NULL) {
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       adapter = device->adapter;
+       ret = dxgadapter_acquire_lock_shared(adapter);
+       if (ret < 0) {
+               adapter = NULL;
+               goto cleanup;
+       }
+
+       ret = dxgvmb_send_update_gpu_va(process, adapter, &args);
+       if (ret < 0)
+               goto cleanup;
+
+       ret = copy_to_user(&input->fence_value, &args.fence_value,
+                          sizeof(args.fence_value));
+       if (ret) {
+               DXG_ERR("failed to copy fence value to user");
+               ret = -EINVAL;
+       }
+
+cleanup:
+
+       if (adapter)
+               dxgadapter_release_lock_shared(adapter);
+       if (device)
+               kref_put(&device->device_kref, dxgdevice_release);
+
+       return ret;
+}
+
 static int
 dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs)
 {
@@ -4931,11 +5151,11 @@ static struct ioctl_desc ioctls[] = {
 /* 0x05 */     {dxgkio_destroy_context, LX_DXDESTROYCONTEXT},
 /* 0x06 */     {dxgkio_create_allocation, LX_DXCREATEALLOCATION},
 /* 0x07 */     {dxgkio_create_paging_queue, LX_DXCREATEPAGINGQUEUE},
-/* 0x08 */     {},
+/* 0x08 */     {dxgkio_reserve_gpu_va, LX_DXRESERVEGPUVIRTUALADDRESS},
 /* 0x09 */     {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO},
 /* 0x0a */     {dxgkio_query_vidmem_info, LX_DXQUERYVIDEOMEMORYINFO},
 /* 0x0b */     {dxgkio_make_resident, LX_DXMAKERESIDENT},
-/* 0x0c */     {},
+/* 0x0c */     {dxgkio_map_gpu_va, LX_DXMAPGPUVIRTUALADDRESS},
 /* 0x0d */     {dxgkio_escape, LX_DXESCAPE},
 /* 0x0e */     {dxgkio_get_device_state, LX_DXGETDEVICESTATE},
 /* 0x0f */     {dxgkio_submit_command, LX_DXSUBMITCOMMAND},
@@ -4956,7 +5176,7 @@ static struct ioctl_desc ioctls[] = {
 /* 0x1d */     {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT},
 /* 0x1e */     {dxgkio_evict, LX_DXEVICT},
 /* 0x1f */     {dxgkio_flush_heap_transitions, LX_DXFLUSHHEAPTRANSITIONS},
-/* 0x20 */     {},
+/* 0x20 */     {dxgkio_free_gpu_va, LX_DXFREEGPUVIRTUALADDRESS},
 /* 0x21 */     {dxgkio_get_context_process_scheduling_priority,
                 LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY},
 /* 0x22 */     {dxgkio_get_context_scheduling_priority,
@@ -4990,7 +5210,7 @@ static struct ioctl_desc ioctls[] = {
                 LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE},
 /* 0x37 */     {dxgkio_unlock2, LX_DXUNLOCK2},
 /* 0x38 */     {dxgkio_update_alloc_property, LX_DXUPDATEALLOCPROPERTY},
-/* 0x39 */     {},
+/* 0x39 */     {dxgkio_update_gpu_va, LX_DXUPDATEGPUVIRTUALADDRESS},
 /* 0x3a */     {dxgkio_wait_sync_object_cpu,
                 LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU},
 /* 0x3b */     {dxgkio_wait_sync_object_gpu,
diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h
index 944f9d1e73d6..1f60f5120e1d 100644
--- a/include/uapi/misc/d3dkmthk.h
+++ b/include/uapi/misc/d3dkmthk.h
@@ -1012,6 +1012,124 @@ struct d3dkmt_evict {
        __u64                           num_bytes_to_trim;
 };
 
+struct d3dddigpuva_protection_type {
+       union {
+               struct {
+                       __u64   write:1;
+                       __u64   execute:1;
+                       __u64   zero:1;
+                       __u64   no_access:1;
+                       __u64   system_use_only:1;
+                       __u64   reserved:59;
+               };
+               __u64           value;
+       };
+};
+
+enum d3dddi_updategpuvirtualaddress_operation_type {
+       _D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP             = 0,
+       _D3DDDI_UPDATEGPUVIRTUALADDRESS_UNMAP           = 1,
+       _D3DDDI_UPDATEGPUVIRTUALADDRESS_COPY            = 2,
+       _D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP_PROTECT     = 3,
+};
+
+struct d3dddi_updategpuvirtualaddress_operation {
+       enum d3dddi_updategpuvirtualaddress_operation_type operation;
+       union {
+               struct {
+                       __u64           base_address;
+                       __u64           size;
+                       struct d3dkmthandle allocation;
+                       __u64           allocation_offset;
+                       __u64           allocation_size;
+               } map;
+               struct {
+                       __u64           base_address;
+                       __u64           size;
+                       struct d3dkmthandle allocation;
+                       __u64           allocation_offset;
+                       __u64           allocation_size;
+                       struct d3dddigpuva_protection_type protection;
+                       __u64           driver_protection;
+               } map_protect;
+               struct {
+                       __u64   base_address;
+                       __u64   size;
+                       struct d3dddigpuva_protection_type protection;
+               } unmap;
+               struct {
+                       __u64   source_address;
+                       __u64   size;
+                       __u64   dest_address;
+               } copy;
+       };
+};
+
+enum d3dddigpuva_reservation_type {
+       _D3DDDIGPUVA_RESERVE_NO_ACCESS          = 0,
+       _D3DDDIGPUVA_RESERVE_ZERO               = 1,
+       _D3DDDIGPUVA_RESERVE_NO_COMMIT          = 2
+};
+
+struct d3dkmt_updategpuvirtualaddress {
+       struct d3dkmthandle                     device;
+       struct d3dkmthandle                     context;
+       struct d3dkmthandle                     fence_object;
+       __u32                                   num_operations;
+#ifdef __KERNEL__
+       struct d3dddi_updategpuvirtualaddress_operation *operations;
+#else
+       __u64                                   operations;
+#endif
+       __u32                                   reserved0;
+       __u32                                   reserved1;
+       __u64                                   reserved2;
+       __u64                                   fence_value;
+       union {
+               struct {
+                       __u32                   do_not_wait:1;
+                       __u32                   reserved:31;
+               };
+               __u32                           value;
+       } flags;
+       __u32                                   reserved3;
+};
+
+struct d3dddi_mapgpuvirtualaddress {
+       struct d3dkmthandle                     paging_queue;
+       __u64                                   base_address;
+       __u64                                   minimum_address;
+       __u64                                   maximum_address;
+       struct d3dkmthandle                     allocation;
+       __u64                                   offset_in_pages;
+       __u64                                   size_in_pages;
+       struct d3dddigpuva_protection_type      protection;
+       __u64                                   driver_protection;
+       __u32                                   reserved0;
+       __u64                                   reserved1;
+       __u64                                   virtual_address;
+       __u64                                   paging_fence_value;
+};
+
+struct d3dddi_reservegpuvirtualaddress {
+       struct d3dkmthandle                     adapter;
+       __u64                                   base_address;
+       __u64                                   minimum_address;
+       __u64                                   maximum_address;
+       __u64                                   size;
+       enum d3dddigpuva_reservation_type       reservation_type;
+       __u64                                   driver_protection;
+       __u64                                   virtual_address;
+       __u64                                   paging_fence_value;
+};
+
+struct d3dkmt_freegpuvirtualaddress {
+       struct d3dkmthandle     adapter;
+       __u32                   reserved;
+       __u64                   base_address;
+       __u64                   size;
+};
+
 enum d3dkmt_memory_segment_group {
        _D3DKMT_MEMORY_SEGMENT_GROUP_LOCAL      = 0,
        _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL  = 1
@@ -1453,12 +1571,16 @@ struct d3dkmt_shareobjectwithhost {
        _IOWR(0x47, 0x06, struct d3dkmt_createallocation)
 #define LX_DXCREATEPAGINGQUEUE         \
        _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue)
+#define LX_DXRESERVEGPUVIRTUALADDRESS  \
+       _IOWR(0x47, 0x08, struct d3dddi_reservegpuvirtualaddress)
 #define LX_DXQUERYADAPTERINFO          \
        _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo)
 #define LX_DXQUERYVIDEOMEMORYINFO      \
        _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo)
 #define LX_DXMAKERESIDENT              \
        _IOWR(0x47, 0x0b, struct d3dddi_makeresident)
+#define LX_DXMAPGPUVIRTUALADDRESS      \
+       _IOWR(0x47, 0x0c, struct d3dddi_mapgpuvirtualaddress)
 #define LX_DXESCAPE                    \
        _IOWR(0x47, 0x0d, struct d3dkmt_escape)
 #define LX_DXGETDEVICESTATE            \
@@ -1493,6 +1615,8 @@ struct d3dkmt_shareobjectwithhost {
        _IOWR(0x47, 0x1e, struct d3dkmt_evict)
 #define LX_DXFLUSHHEAPTRANSITIONS      \
        _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions)
+#define LX_DXFREEGPUVIRTUALADDRESS     \
+       _IOWR(0x47, 0x20, struct d3dkmt_freegpuvirtualaddress)
 #define LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY \
        _IOWR(0x47, 0x21, struct d3dkmt_getcontextinprocessschedulingpriority)
 #define LX_DXGETCONTEXTSCHEDULINGPRIORITY \
@@ -1529,6 +1653,8 @@ struct d3dkmt_shareobjectwithhost {
        _IOWR(0x47, 0x37, struct d3dkmt_unlock2)
 #define LX_DXUPDATEALLOCPROPERTY       \
        _IOWR(0x47, 0x38, struct d3dddi_updateallocproperty)
+#define LX_DXUPDATEGPUVIRTUALADDRESS   \
+       _IOWR(0x47, 0x39, struct d3dkmt_updategpuvirtualaddress)
 #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \
        _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu)
 #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \

Reply via email to