Module: Mesa
Branch: main
Commit: 1f0a9f853c4530bdb20e84ee6ebc9d11c2f416e8
URL:    
http://cgit.freedesktop.org/mesa/mesa/commit/?id=1f0a9f853c4530bdb20e84ee6ebc9d11c2f416e8

Author: José Roberto de Souza <jose.so...@intel.com>
Date:   Fri Nov 24 10:51:39 2023 -0800

intel: Sync xe_drm.h take 2 part 3

Sync xe_drm.h with commit ac7b89571d80 ("drm/xe/uapi: Kill 
exec_queue_set_property").

Signed-off-by: José Roberto de Souza <jose.so...@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwer...@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26360>

---

 include/drm-uapi/xe_drm.h                      | 267 ++++++++++++++-----------
 src/gallium/drivers/iris/xe/iris_kmd_backend.c |   7 +-
 src/intel/common/xe/intel_engine.c             |  14 +-
 src/intel/dev/xe/intel_device_info.c           |   6 +-
 src/intel/vulkan/xe/anv_batch_chain.c          |  12 +-
 src/intel/vulkan/xe/anv_kmd_backend.c          |   3 +-
 src/intel/vulkan/xe/anv_queue.c                |  53 ++---
 7 files changed, 195 insertions(+), 167 deletions(-)

diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 9fedaaa63fe..590f7b7af4b 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -103,12 +103,11 @@ struct xe_user_extension {
 #define DRM_XE_VM_CREATE               0x03
 #define DRM_XE_VM_DESTROY              0x04
 #define DRM_XE_VM_BIND                 0x05
-#define DRM_XE_EXEC                    0x06
-#define DRM_XE_EXEC_QUEUE_CREATE       0x07
-#define DRM_XE_EXEC_QUEUE_DESTROY      0x08
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x09
-#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x0a
-#define DRM_XE_WAIT_USER_FENCE         0x0b
+#define DRM_XE_EXEC_QUEUE_CREATE       0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY      0x07
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
+#define DRM_XE_EXEC                    0x09
+#define DRM_XE_WAIT_USER_FENCE         0x0a
 /* Must be kept compact -- no holes */
 
 #define DRM_IOCTL_XE_DEVICE_QUERY              DRM_IOWR(DRM_COMMAND_BASE + 
DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
@@ -117,14 +116,21 @@ struct xe_user_extension {
 #define DRM_IOCTL_XE_VM_CREATE                 DRM_IOWR(DRM_COMMAND_BASE + 
DRM_XE_VM_CREATE, struct drm_xe_vm_create)
 #define DRM_IOCTL_XE_VM_DESTROY                        
DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
 #define DRM_IOCTL_XE_VM_BIND                   DRM_IOW(DRM_COMMAND_BASE + 
DRM_XE_VM_BIND, struct drm_xe_vm_bind)
-#define DRM_IOCTL_XE_EXEC                      DRM_IOW(DRM_COMMAND_BASE + 
DRM_XE_EXEC, struct drm_xe_exec)
 #define DRM_IOCTL_XE_EXEC_QUEUE_CREATE         DRM_IOWR(DRM_COMMAND_BASE + 
DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
 #define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY                
DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct 
drm_xe_exec_queue_destroy)
 #define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY   DRM_IOW(DRM_COMMAND_BASE + 
DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
 #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY   DRM_IOWR(DRM_COMMAND_BASE + 
DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
+#define DRM_IOCTL_XE_EXEC                      DRM_IOW(DRM_COMMAND_BASE + 
DRM_XE_EXEC, struct drm_xe_exec)
 #define DRM_IOCTL_XE_WAIT_USER_FENCE           DRM_IOWR(DRM_COMMAND_BASE + 
DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
 
-/** struct drm_xe_engine_class_instance - instance of an engine class */
+/**
+ * struct drm_xe_engine_class_instance - instance of an engine class
+ *
+ * It is returned as part of the @drm_xe_engine, but it also is used as
+ * the input of engine selection for both @drm_xe_exec_queue_create and
+ * @drm_xe_query_engine_cycles
+ *
+ */
 struct drm_xe_engine_class_instance {
 #define DRM_XE_ENGINE_CLASS_RENDER             0
 #define DRM_XE_ENGINE_CLASS_COPY               1
@@ -145,6 +151,33 @@ struct drm_xe_engine_class_instance {
        __u16 pad;
 };
 
+/**
+ * struct drm_xe_engine - describe hardware engine
+ */
+struct drm_xe_engine {
+       /** @instance: The @drm_xe_engine_class_instance */
+       struct drm_xe_engine_class_instance instance;
+
+       /** @reserved: Reserved */
+       __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_query_engines - describe engines
+ *
+ * If a query is made with a struct @drm_xe_device_query where .query
+ * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
+ * struct @drm_xe_query_engines in .data.
+ */
+struct drm_xe_query_engines {
+       /** @num_engines: number of engines returned in @engines */
+       __u32 num_engines;
+       /** @pad: MBZ */
+       __u32 pad;
+       /** @engines: The returned engines for this device */
+       struct drm_xe_engine engines[];
+};
+
 /**
  * enum drm_xe_memory_class - Supported memory classes.
  */
@@ -160,10 +193,10 @@ enum drm_xe_memory_class {
 };
 
 /**
- * struct drm_xe_query_mem_region - Describes some region as known to
+ * struct drm_xe_mem_region - Describes some region as known to
  * the driver.
  */
-struct drm_xe_query_mem_region {
+struct drm_xe_mem_region {
        /**
         * @mem_class: The memory class describing this region.
         *
@@ -177,18 +210,18 @@ struct drm_xe_query_mem_region {
         * a unique pair.
         */
        __u16 instance;
-       /** @pad: MBZ */
-       __u32 pad;
        /**
         * @min_page_size: Min page-size in bytes for this region.
         *
         * When the kernel allocates memory for this region, the
         * underlying pages will be at least @min_page_size in size.
-        *
-        * Important note: When userspace allocates a GTT address which
-        * can point to memory allocated from this region, it must also
-        * respect this minimum alignment. This is enforced by the
-        * kernel.
+        * Buffer objects with an allowable placement in this region must be
+        * created with a size aligned to this value.
+        * GPU virtual address mappings of (parts of) buffer objects that
+        * may be placed in this region must also have their GPU virtual
+        * address and range aligned to this value.
+        * Affected IOCTLS will return %-EINVAL if alignment restrictions are
+        * not met.
         */
        __u32 min_page_size;
        /**
@@ -244,8 +277,8 @@ struct drm_xe_query_mem_region {
  * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
  * .data points to this allocated structure.
  *
- * The query returns the engine cycles and the frequency that can
- * be used to calculate the engine timestamp. In addition the
+ * The query returns the engine cycles, which along with GT's @reference_clock,
+ * can be used to calculate the engine timestamp. In addition the
  * query returns a set of cpu timestamps that indicate when the command
  * streamer cycle count was captured.
  */
@@ -273,9 +306,6 @@ struct drm_xe_query_engine_cycles {
         */
        __u64 engine_cycles;
 
-       /** @engine_frequency: Frequency of the engine cycles in Hz. */
-       __u64 engine_frequency;
-
        /**
         * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
         * reading the engine_cycles register using the reference clockid set 
by the
@@ -298,12 +328,12 @@ struct drm_xe_query_engine_cycles {
  * struct drm_xe_query_mem_regions in .data.
  */
 struct drm_xe_query_mem_regions {
-       /** @num_regions: number of memory regions returned in @regions */
-       __u32 num_regions;
+       /** @num_mem_regions: number of memory regions returned in @mem_regions 
*/
+       __u32 num_mem_regions;
        /** @pad: MBZ */
        __u32 pad;
-       /** @regions: The returned regions for this device */
-       struct drm_xe_query_mem_region regions[];
+       /** @mem_regions: The returned memory regions for this device */
+       struct drm_xe_mem_region mem_regions[];
 };
 
 /**
@@ -332,22 +362,26 @@ struct drm_xe_query_config {
 };
 
 /**
- * struct drm_xe_query_gt - describe an individual GT.
+ * struct drm_xe_gt - describe an individual GT.
  *
  * To be used with drm_xe_query_gt_list, which will return a list with all the
  * existing GT individual descriptions.
  * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
  * implementing graphics and/or media operations.
  */
-struct drm_xe_query_gt {
+struct drm_xe_gt {
 #define DRM_XE_QUERY_GT_TYPE_MAIN              0
 #define DRM_XE_QUERY_GT_TYPE_MEDIA             1
        /** @type: GT type: Main or Media */
        __u16 type;
+       /** @tile_id: Tile ID where this GT lives (Information only) */
+       __u16 tile_id;
        /** @gt_id: Unique ID of this GT within the PCI Device */
        __u16 gt_id;
-       /** @clock_freq: A clock frequency for timestamp */
-       __u32 clock_freq;
+       /** @pad: MBZ */
+       __u16 pad[3];
+       /** @reference_clock: A clock frequency for timestamp */
+       __u32 reference_clock;
        /**
         * @near_mem_regions: Bit mask of instances from
         * drm_xe_query_mem_regions that are nearest to the current engines
@@ -379,7 +413,7 @@ struct drm_xe_query_gt_list {
        /** @pad: MBZ */
        __u32 pad;
        /** @gt_list: The GT list returned for this device */
-       struct drm_xe_query_gt gt_list[];
+       struct drm_xe_gt gt_list[];
 };
 
 /**
@@ -442,28 +476,32 @@ struct drm_xe_query_topology_mask {
  *
  * .. code-block:: C
  *
- *     struct drm_xe_engine_class_instance *hwe;
- *     struct drm_xe_device_query query = {
- *             .extensions = 0,
- *             .query = DRM_XE_DEVICE_QUERY_ENGINES,
- *             .size = 0,
- *             .data = 0,
- *     };
- *     ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
- *     hwe = malloc(query.size);
- *     query.data = (uintptr_t)hwe;
- *     ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
- *     int num_engines = query.size / sizeof(*hwe);
- *     for (int i = 0; i < num_engines; i++) {
- *             printf("Engine %d: %s\n", i,
- *                     hwe[i].engine_class == DRM_XE_ENGINE_CLASS_RENDER ? 
"RENDER":
- *                     hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COPY ? 
"COPY":
- *                     hwe[i].engine_class == DRM_XE_ENGINE_CLASS_VIDEO_DECODE 
? "VIDEO_DECODE":
- *                     hwe[i].engine_class == 
DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
- *                     hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COMPUTE ? 
"COMPUTE":
- *                     "UNKNOWN");
- *     }
- *     free(hwe);
+ *     struct drm_xe_query_engines *engines;
+ *     struct drm_xe_device_query query = {
+ *         .extensions = 0,
+ *         .query = DRM_XE_DEVICE_QUERY_ENGINES,
+ *         .size = 0,
+ *         .data = 0,
+ *     };
+ *     ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ *     engines = malloc(query.size);
+ *     query.data = (uintptr_t)engines;
+ *     ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ *     for (int i = 0; i < engines->num_engines; i++) {
+ *         printf("Engine %d: %s\n", i,
+ *             engines->engines[i].instance.engine_class ==
+ *                 DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
+ *             engines->engines[i].instance.engine_class ==
+ *                 DRM_XE_ENGINE_CLASS_COPY ? "COPY":
+ *             engines->engines[i].instance.engine_class ==
+ *                 DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
+ *             engines->engines[i].instance.engine_class ==
+ *                 DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
+ *             engines->engines[i].instance.engine_class ==
+ *                 DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
+ *             "UNKNOWN");
+ *     }
+ *     free(engines);
  */
 struct drm_xe_device_query {
        /** @extensions: Pointer to the first extension struct, if any */
@@ -494,14 +532,16 @@ struct drm_xe_gem_create {
        __u64 extensions;
 
        /**
-        * @size: Requested size for the object
-        *
-        * The (page-aligned) allocated size for the object will be returned.
+        * @size: Size of the object to be created, must match region
+        * (system or vram) minimum alignment (&min_page_size).
         */
        __u64 size;
 
-#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING           (0x1 << 24)
-#define DRM_XE_GEM_CREATE_FLAG_SCANOUT                 (0x1 << 25)
+       /** @placement: A mask of memory instances of where BO can be placed. */
+       __u32 placement;
+
+#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING           (1 << 0)
+#define DRM_XE_GEM_CREATE_FLAG_SCANOUT                 (1 << 1)
 /*
  * When using VRAM as a possible placement, ensure that the corresponding VRAM
  * allocation will always use the CPU accessible part of VRAM. This is 
important
@@ -517,7 +557,7 @@ struct drm_xe_gem_create {
  * display surfaces, therefore the kernel requires setting this flag for such
  * objects, otherwise an error is thrown on small-bar systems.
  */
-#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM      (0x1 << 26)
+#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM      (1 << 2)
        /**
         * @flags: Flags, currently a mask of memory instances of where BO can
         * be placed
@@ -559,7 +599,7 @@ struct drm_xe_gem_create {
 #define DRM_XE_GEM_CPU_CACHING_WC                      2
        __u16 cpu_caching;
        /** @pad: MBZ */
-       __u16 pad;
+       __u16 pad[3];
 
        /** @reserved: Reserved */
        __u64 reserved[2];
@@ -601,13 +641,33 @@ struct drm_xe_ext_set_property {
 };
 
 struct drm_xe_vm_create {
-#define DRM_XE_VM_EXTENSION_SET_PROPERTY       0
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;
 
 #define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE     (1 << 0)
-#define DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE     (1 << 1)
+       /*
+        * An LR, or Long Running VM accepts exec submissions
+        * to its exec_queues that don't have an upper time limit on
+        * the job execution time. But exec submissions to these
+        * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
+        * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
+        * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+        * LR VMs can be created in recoverable page-fault mode using
+        * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
+        * If that flag is omitted, the UMD can not rely on the slightly
+        * different per-VM overcommit semantics that are enabled by
+        * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
+        * still enable recoverable pagefaults if supported by the device.
+        */
+#define DRM_XE_VM_CREATE_FLAG_LR_MODE          (1 << 1)
 #define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT    (1 << 2)
+       /*
+        * DRM_XE_VM_CREATE_FLAG_FAULT_MODE requires also
+        * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated
+        * on demand when accessed, and also allows per-VM overcommit of memory.
+        * The xe driver internally uses recoverable pagefaults to implement
+        * this.
+        */
 #define DRM_XE_VM_CREATE_FLAG_FAULT_MODE       (1 << 3)
        /** @flags: Flags */
        __u32 flags;
@@ -631,6 +691,9 @@ struct drm_xe_vm_destroy {
 };
 
 struct drm_xe_vm_bind_op {
+       /** @extensions: Pointer to the first extension struct, if any */
+       __u64 extensions;
+
        /**
         * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
         */
@@ -698,12 +761,6 @@ struct drm_xe_vm_bind_op {
        /** @addr: Address to operate on, MBZ for UNMAP_ALL */
        __u64 addr;
 
-       /**
-        * @tile_mask: Mask for which tiles to create binds for, 0 == All tiles,
-        * only applies to creating new VMAs
-        */
-       __u64 tile_mask;
-
 #define DRM_XE_VM_BIND_OP_MAP          0x0
 #define DRM_XE_VM_BIND_OP_UNMAP                0x1
 #define DRM_XE_VM_BIND_OP_MAP_USERPTR  0x2
@@ -737,8 +794,11 @@ struct drm_xe_vm_bind_op {
         */
        __u32 prefetch_mem_region_instance;
 
+       /** @pad: MBZ */
+       __u32 pad2;
+
        /** @reserved: Reserved */
-       __u64 reserved[2];
+       __u64 reserved[3];
 };
 
 struct drm_xe_vm_bind {
@@ -755,12 +815,12 @@ struct drm_xe_vm_bind {
         */
        __u32 exec_queue_id;
 
-       /** @num_binds: number of binds in this IOCTL */
-       __u32 num_binds;
-
        /** @pad: MBZ */
        __u32 pad;
 
+       /** @num_binds: number of binds in this IOCTL */
+       __u32 num_binds;
+
        union {
                /** @bind: used if num_binds == 1 */
                struct drm_xe_vm_bind_op bind;
@@ -772,12 +832,12 @@ struct drm_xe_vm_bind {
                __u64 vector_of_binds;
        };
 
+       /** @pad: MBZ */
+       __u32 pad2;
+
        /** @num_syncs: amount of syncs to wait on */
        __u32 num_syncs;
 
-       /** @pad2: MBZ */
-       __u32 pad2;
-
        /** @syncs: pointer to struct drm_xe_sync array */
        __u64 syncs;
 
@@ -799,38 +859,17 @@ struct drm_xe_vm_bind {
 /* Monitor 64MB contiguous region with 2M sub-granularity */
 #define DRM_XE_ACC_GRANULARITY_64M 3
 
-/**
- * struct drm_xe_exec_queue_set_property - exec queue set property
- *
- * Same namespace for extensions as drm_xe_exec_queue_create
- */
-struct drm_xe_exec_queue_set_property {
-       /** @extensions: Pointer to the first extension struct, if any */
-       __u64 extensions;
-
-       /** @exec_queue_id: Exec queue ID */
-       __u32 exec_queue_id;
-
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY                        0
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE               1
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT      2
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE             3
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT             4
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER             5
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY              6
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY         7
-       /** @property: property to set */
-       __u32 property;
-
-       /** @value: property value */
-       __u64 value;
-
-       /** @reserved: Reserved */
-       __u64 reserved[2];
-};
-
 struct drm_xe_exec_queue_create {
-#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY               0
+#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY               0
+#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY              0
+#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE             1
+#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT    2
+#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE           3
+#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT           4
+#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER           5
+#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY            6
+#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY       7
+
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;
 
@@ -895,15 +934,15 @@ struct drm_xe_sync {
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;
 
-#define DRM_XE_SYNC_FLAG_SYNCOBJ               0x0
-#define DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ      0x1
-#define DRM_XE_SYNC_FLAG_DMA_BUF               0x2
-#define DRM_XE_SYNC_FLAG_USER_FENCE            0x3
-#define DRM_XE_SYNC_FLAG_SIGNAL                0x10
-       __u32 flags;
+#define DRM_XE_SYNC_TYPE_SYNCOBJ               0x0
+#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ      0x1
+#define DRM_XE_SYNC_TYPE_USER_FENCE            0x2
+       /** @type: Type of the this sync object */
+       __u32 type;
 
-       /** @pad: MBZ */
-       __u32 pad;
+#define DRM_XE_SYNC_FLAG_SIGNAL        (1 << 0)
+       /** @flags: Sync Flags */
+       __u32 flags;
 
        union {
                __u32 handle;
diff --git a/src/gallium/drivers/iris/xe/iris_kmd_backend.c 
b/src/gallium/drivers/iris/xe/iris_kmd_backend.c
index 2a48377e75e..4c4d731ca6a 100644
--- a/src/gallium/drivers/iris/xe/iris_kmd_backend.c
+++ b/src/gallium/drivers/iris/xe/iris_kmd_backend.c
@@ -63,7 +63,7 @@ xe_gem_create(struct iris_bufmgr *bufmgr,
      .flags = flags,
    };
    for (uint16_t i = 0; i < regions_count; i++)
-      gem_create.flags |= BITFIELD_BIT(regions[i]->instance);
+      gem_create.placement |= BITFIELD_BIT(regions[i]->instance);
 
    const struct intel_device_info *devinfo = 
iris_bufmgr_get_device_info(bufmgr);
    const struct intel_device_info_pat_entry *pat_entry;
@@ -350,13 +350,12 @@ xe_batch_submit(struct iris_batch *batch)
 
       util_dynarray_foreach(&batch->exec_fences, struct iris_batch_fence,
                             fence) {
-         uint32_t flags = DRM_XE_SYNC_FLAG_SYNCOBJ;
 
          if (fence->flags & IRIS_BATCH_FENCE_SIGNAL)
-            flags |= DRM_XE_SYNC_FLAG_SIGNAL;
+            syncs[i].flags = DRM_XE_SYNC_FLAG_SIGNAL;
 
          syncs[i].handle = fence->handle;
-         syncs[i].flags = flags;
+         syncs[i].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
          i++;
       }
    }
diff --git a/src/intel/common/xe/intel_engine.c 
b/src/intel/common/xe/intel_engine.c
index 67ba34faab5..bbd19349aab 100644
--- a/src/intel/common/xe/intel_engine.c
+++ b/src/intel/common/xe/intel_engine.c
@@ -70,25 +70,23 @@ intel_engine_class_to_xe(enum intel_engine_class intel)
 struct intel_query_engine_info *
 xe_engine_get_info(int fd)
 {
-   struct drm_xe_engine_class_instance *xe_engines;
-   uint32_t len;
+   struct drm_xe_query_engines *xe_engines;
 
-   xe_engines = xe_device_query_alloc_fetch(fd, DRM_XE_DEVICE_QUERY_ENGINES, 
&len);
+   xe_engines = xe_device_query_alloc_fetch(fd, DRM_XE_DEVICE_QUERY_ENGINES, 
NULL);
    if (!xe_engines)
       return NULL;
 
-   const uint32_t engines_count = len / sizeof(*xe_engines);
    struct intel_query_engine_info *intel_engines_info;
    intel_engines_info = calloc(1, sizeof(*intel_engines_info) +
                                sizeof(*intel_engines_info->engines) *
-                               engines_count);
+                               xe_engines->num_engines);
    if (!intel_engines_info) {
       goto error_free_xe_engines;
       return NULL;
    }
 
-   for (uint32_t i = 0; i < engines_count; i++) {
-      struct drm_xe_engine_class_instance *xe_engine = &xe_engines[i];
+   for (uint32_t i = 0; i < xe_engines->num_engines; i++) {
+      struct drm_xe_engine_class_instance *xe_engine = 
&xe_engines->engines[i].instance;
       struct intel_engine_class_instance *intel_engine = 
&intel_engines_info->engines[i];
 
       intel_engine->engine_class = 
xe_engine_class_to_intel(xe_engine->engine_class);
@@ -96,7 +94,7 @@ xe_engine_get_info(int fd)
       intel_engine->gt_id = xe_engine->gt_id;
    }
 
-   intel_engines_info->num_engines = engines_count;
+   intel_engines_info->num_engines = xe_engines->num_engines;
    free(xe_engines);
    return intel_engines_info;
 
diff --git a/src/intel/dev/xe/intel_device_info.c 
b/src/intel/dev/xe/intel_device_info.c
index d740dccad81..ef1e82d5629 100644
--- a/src/intel/dev/xe/intel_device_info.c
+++ b/src/intel/dev/xe/intel_device_info.c
@@ -85,8 +85,8 @@ intel_device_info_xe_query_regions(int fd, struct 
intel_device_info *devinfo,
    if (!regions)
       return false;
 
-   for (int i = 0; i < regions->num_regions; i++) {
-      struct drm_xe_query_mem_region *region = &regions->regions[i];
+   for (int i = 0; i < regions->num_mem_regions; i++) {
+      struct drm_xe_mem_region *region = &regions->mem_regions[i];
 
       switch (region->mem_class) {
       case DRM_XE_MEM_REGION_CLASS_SYSMEM: {
@@ -139,7 +139,7 @@ xe_query_gts(int fd, struct intel_device_info *devinfo)
 
    for (uint32_t i = 0; i < gt_list->num_gt; i++) {
       if (gt_list->gt_list[i].type == DRM_XE_QUERY_GT_TYPE_MAIN)
-         devinfo->timestamp_frequency = gt_list->gt_list[i].clock_freq;
+         devinfo->timestamp_frequency = gt_list->gt_list[i].reference_clock;
    }
 
    free(gt_list);
diff --git a/src/intel/vulkan/xe/anv_batch_chain.c 
b/src/intel/vulkan/xe/anv_batch_chain.c
index 8d25781b1f5..0a1c4573de4 100644
--- a/src/intel/vulkan/xe/anv_batch_chain.c
+++ b/src/intel/vulkan/xe/anv_batch_chain.c
@@ -45,7 +45,8 @@ xe_execute_simple_batch(struct anv_queue *queue,
       return vk_errorf(device, VK_ERROR_UNKNOWN, "Unable to create sync obj");
 
    struct drm_xe_sync sync = {
-      .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
+      .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+      .flags = DRM_XE_SYNC_FLAG_SIGNAL,
       .handle = syncobj_handle,
    };
    struct drm_xe_exec exec = {
@@ -91,14 +92,14 @@ xe_exec_fill_sync(struct drm_xe_sync *xe_sync, struct 
vk_sync *vk_sync,
    xe_sync->handle = syncobj->syncobj;
 
    if (value) {
-      xe_sync->flags |= DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ;
+      xe_sync->type = DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ;
       xe_sync->timeline_value = value;
    } else {
-      xe_sync->flags |= DRM_XE_SYNC_FLAG_SYNCOBJ;
+      xe_sync->type = DRM_XE_SYNC_TYPE_SYNCOBJ;
    }
 
    if (signal)
-      xe_sync->flags |= DRM_XE_SYNC_FLAG_SIGNAL;
+      xe_sync->flags = DRM_XE_SYNC_FLAG_SIGNAL;
 }
 
 static VkResult
@@ -193,7 +194,8 @@ xe_execute_trtt_batch(struct anv_sparse_submission *submit,
    VkResult result;
 
    struct drm_xe_sync extra_sync = {
-      .flags = DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
+      .type = DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ,
+      .flags = DRM_XE_SYNC_FLAG_SIGNAL,
       .handle = trtt->timeline_handle,
       .timeline_value = trtt_bbo->timeline_val,
    };
diff --git a/src/intel/vulkan/xe/anv_kmd_backend.c 
b/src/intel/vulkan/xe/anv_kmd_backend.c
index 529e9616a2b..6c4a58d4538 100644
--- a/src/intel/vulkan/xe/anv_kmd_backend.c
+++ b/src/intel/vulkan/xe/anv_kmd_backend.c
@@ -63,7 +63,7 @@ xe_gem_create(struct anv_device *device,
      .flags = flags,
    };
    for (uint16_t i = 0; i < regions_count; i++)
-      gem_create.flags |= BITFIELD_BIT(regions[i]->instance);
+      gem_create.placement |= BITFIELD_BIT(regions[i]->instance);
 
    const struct intel_device_info_pat_entry *pat_entry =
          anv_device_get_pat_entry(device, alloc_flags);
@@ -147,7 +147,6 @@ xe_vm_bind_op(struct anv_device *device,
          .obj_offset = bind->bo_offset,
          .range = bind->size,
          .addr = intel_48b_address(bind->address),
-         .tile_mask = 0,
          .op = DRM_XE_VM_BIND_OP_UNMAP,
          .flags = 0,
          .prefetch_mem_region_instance = 0,
diff --git a/src/intel/vulkan/xe/anv_queue.c b/src/intel/vulkan/xe/anv_queue.c
index a1e805299cf..ac043a40758 100644
--- a/src/intel/vulkan/xe/anv_queue.c
+++ b/src/intel/vulkan/xe/anv_queue.c
@@ -63,6 +63,22 @@ create_engine(struct anv_device *device,
       &physical->queue.families[queue_family_index];
    const struct intel_query_engine_info *engines = physical->engine_info;
    struct drm_xe_engine_class_instance *instances;
+   const VkDeviceQueueGlobalPriorityCreateInfoKHR *queue_priority =
+      vk_find_struct_const(pCreateInfo->pNext,
+                           DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR);
+   const VkQueueGlobalPriorityKHR priority = queue_priority ?
+                                             queue_priority->globalPriority :
+                                             
VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR;
+
+   /* As per spec, the driver implementation may deny requests to acquire
+    * a priority above the default priority (MEDIUM) if the caller does not
+    * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_KHR
+    * is returned.
+    */
+   if (physical->max_context_priority >= VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR) {
+      if (priority > physical->max_context_priority)
+         return vk_error(device, VK_ERROR_NOT_PERMITTED_KHR);
+   }
 
    instances = vk_alloc(&device->vk.alloc,
                         sizeof(*instances) * queue_family->queueCount, 8,
@@ -83,12 +99,18 @@ create_engine(struct anv_device *device,
    }
 
    assert(device->vm_id != 0);
+   struct drm_xe_ext_set_property ext = {
+      .base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+      .property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
+      .value = anv_vk_priority_to_drm_sched_priority(priority),
+   };
    struct drm_xe_exec_queue_create create = {
          /* Allows KMD to pick one of those engines for the submission queue */
          .instances = (uintptr_t)instances,
          .vm_id = device->vm_id,
          .width = 1,
          .num_placements = count,
+         .extensions = (uintptr_t)&ext,
    };
    int ret = intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create);
    vk_free(&device->vk.alloc, instances);
@@ -100,38 +122,7 @@ create_engine(struct anv_device *device,
    else
       queue->exec_queue_id = create.exec_queue_id;
 
-   const VkDeviceQueueGlobalPriorityCreateInfoKHR *queue_priority =
-      vk_find_struct_const(pCreateInfo->pNext,
-                           DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR);
-   const VkQueueGlobalPriorityKHR priority = queue_priority ?
-                                             queue_priority->globalPriority :
-                                             
VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR;
-
-   /* As per spec, the driver implementation may deny requests to acquire
-    * a priority above the default priority (MEDIUM) if the caller does not
-    * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_KHR
-    * is returned.
-    */
-   if (physical->max_context_priority >= VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR) {
-      if (priority > physical->max_context_priority)
-         goto priority_error;
-
-      struct drm_xe_exec_queue_set_property exec_queue_property = {
-         .exec_queue_id = create.exec_queue_id,
-         .property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
-         .value = anv_vk_priority_to_drm_sched_priority(priority),
-      };
-      ret = intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY,
-                        &exec_queue_property);
-      if (ret && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR)
-         goto priority_error;
-   }
-
    return VK_SUCCESS;
-
-priority_error:
-   anv_xe_destroy_engine(device, queue);
-   return vk_error(device, VK_ERROR_NOT_PERMITTED_KHR);
 }
 
 VkResult

Reply via email to