list_for_each_entry(tmp_client, &xdna->client_list, node) {
- ret = amdxdna_hwctx_walk(tmp_client, &info_args,
aie2_hwctx_status_cb);
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
if (ret)
break;
}
- args->buffer_size = (u32)(info_args.buffer - args->buffer);
+ args->buffer_size -= (u32)(array_args.buffer - args->buffer);
return ret;
}
@@ -877,6 +888,58 @@ static int aie2_get_info(struct amdxdna_client
*client, struct amdxdna_drm_get_i
return ret;
}
+static int aie2_query_ctx_status_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ array_args.element_size = min(args->element_size,
+ sizeof(struct amdxdna_drm_hwctx_entry));
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->num_element * args->element_size /
+ array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->element_size = array_args.element_size;
+ args->num_element = (u32)((array_args.buffer - args->buffer) /
+ args->element_size);
+
+ return ret;
+}
+
+static int aie2_get_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_HW_CONTEXT_ALL:
+ ret = aie2_query_ctx_status_array(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u",
args->param);
+ ret = -EOPNOTSUPP;
+ }
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
static int aie2_set_power_mode(struct amdxdna_client *client,
struct amdxdna_drm_set_state *args)
{
@@ -926,15 +989,16 @@ static int aie2_set_state(struct amdxdna_client
*client,
}
const struct amdxdna_dev_ops aie2_ops = {
- .init = aie2_init,
- .fini = aie2_fini,
- .resume = aie2_hw_resume,
- .suspend = aie2_hw_suspend,
- .get_aie_info = aie2_get_info,
- .set_aie_state = aie2_set_state,
- .hwctx_init = aie2_hwctx_init,
- .hwctx_fini = aie2_hwctx_fini,
- .hwctx_config = aie2_hwctx_config,
- .cmd_submit = aie2_cmd_submit,
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_resume,
+ .suspend = aie2_hw_suspend,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .cmd_submit = aie2_cmd_submit,
.hmm_invalidate = aie2_hmm_invalidate,
+ .get_array = aie2_get_array,
};
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c
b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index 8ef5e4f27f5e..cb574ad7bfc3 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -26,6 +26,13 @@ MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
+/*
+ * 0.0: Initial version
+ * 0.1: Support getting all hardware contexts by
DRM_IOCTL_AMDXDNA_GET_ARRAY
+ */
+#define AMDXDNA_DRIVER_MAJOR 0
+#define AMDXDNA_DRIVER_MINOR 1
+
/*
* Bind the driver base on (vendor_id, device_id) pair and later
use the
* (device_id, rev_id) pair as a key to select the devices. The
devices with
@@ -164,6 +171,26 @@ static int amdxdna_drm_get_info_ioctl(struct
drm_device *dev, void *data, struct
return ret;
}
+static int amdxdna_drm_get_array_ioctl(struct drm_device *dev,
void *data,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_array *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->get_array)
+ return -EOPNOTSUPP;
+
+ if (args->pad || !args->num_element)
+ return -EINVAL;
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->get_array(client, args);
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+}
+
static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void
*data, struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
@@ -195,6 +222,7 @@ static const struct drm_ioctl_desc
amdxdna_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD,
amdxdna_drm_submit_cmd_ioctl, 0),
/* AIE hardware */
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl,
0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY,
amdxdna_drm_get_array_ioctl, 0),
DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE,
amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
};
@@ -218,6 +246,8 @@ const struct drm_driver amdxdna_drm_drv = {
.fops = &amdxdna_fops,
.name = "amdxdna_accel_driver",
.desc = "AMD XDNA DRM implementation",
+ .major = AMDXDNA_DRIVER_MAJOR,
+ .minor = AMDXDNA_DRIVER_MINOR,
.open = amdxdna_drm_open,
.postclose = amdxdna_drm_close,
.ioctls = amdxdna_drm_ioctls,
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h
b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index b6b3b424d1d5..72d6696d49da 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -58,6 +58,7 @@ struct amdxdna_dev_ops {
int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct
amdxdna_sched_job *job, u64 *seq);
int (*get_aie_info)(struct amdxdna_client *client, struct
amdxdna_drm_get_info *args);
int (*set_aie_state)(struct amdxdna_client *client, struct
amdxdna_drm_set_state *args);
+ int (*get_array)(struct amdxdna_client *client, struct
amdxdna_drm_get_array *args);
};
/*
diff --git a/include/uapi/drm/amdxdna_accel.h
b/include/uapi/drm/amdxdna_accel.h
index ce523e9ccc52..2e363880c1eb 100644
--- a/include/uapi/drm/amdxdna_accel.h
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -34,6 +34,7 @@ enum amdxdna_drm_ioctl_id {
DRM_AMDXDNA_EXEC_CMD,
DRM_AMDXDNA_GET_INFO,
DRM_AMDXDNA_SET_STATE,
+ DRM_AMDXDNA_GET_ARRAY = 10,
};
/**
@@ -455,6 +456,110 @@ struct amdxdna_drm_get_info {
__u64 buffer; /* in/out */
};
+#define AMDXDNA_HWCTX_STATE_IDLE 0
+#define AMDXDNA_HWCTX_STATE_ACTIVE 1
+
+/**
+ * struct amdxdna_drm_hwctx_entry - The hardware context array entry
+ */
+struct amdxdna_drm_hwctx_entry {
+ /** @context_id: Context ID. */
+ __u32 context_id;
+ /** @start_col: Start AIE array column assigned to context. */
+ __u32 start_col;
+ /** @num_col: Number of AIE array columns assigned to context. */
+ __u32 num_col;
+ /** @hwctx_id: The real hardware context id. */
+ __u32 hwctx_id;
+ /** @pid: ID of process which created this context. */
+ __s64 pid;
+ /** @command_submissions: Number of commands submitted. */
+ __u64 command_submissions;
+ /** @command_completions: Number of commands completed. */
+ __u64 command_completions;
+ /** @migrations: Number of times been migrated. */
+ __u64 migrations;
+ /** @preemptions: Number of times been preempted. */
+ __u64 preemptions;
+ /** @errors: Number of errors happened. */
+ __u64 errors;
+ /** @priority: Context priority. */
+ __u64 priority;
+ /** @heap_usage: Usage of device heap buffer. */
+ __u64 heap_usage;
+ /** @suspensions: Number of times been suspended. */
+ __u64 suspensions;
+ /**
+ * @state: Context state.
+ * %AMDXDNA_HWCTX_STATE_IDLE
+ * %AMDXDNA_HWCTX_STATE_ACTIVE
+ */
+ __u32 state;
+ /** @pasid: PASID been bound. */
+ __u32 pasid;
+ /** @gops: Giga operations per second. */
+ __u32 gops;
+ /** @fps: Frames per second. */
+ __u32 fps;
+ /** @dma_bandwidth: DMA bandwidth. */
+ __u32 dma_bandwidth;
+ /** @latency: Frame response latency. */
+ __u32 latency;
+ /** @frame_exec_time: Frame execution time. */
+ __u32 frame_exec_time;
+ /** @txn_op_idx: Index of last control code executed. */
+ __u32 txn_op_idx;
+ /** @ctx_pc: Program counter. */
+ __u32 ctx_pc;
+ /** @fatal_error_type: Fatal error type if context crashes. */
+ __u32 fatal_error_type;
+ /** @fatal_error_exception_type: Firmware exception type. */
+ __u32 fatal_error_exception_type;
+ /** @fatal_error_exception_pc: Firmware exception program
counter. */
+ __u32 fatal_error_exception_pc;
+ /** @fatal_error_app_module: Exception module name. */
+ __u32 fatal_error_app_module;
+};