if (!cmdq->primary_preempt_buf) {
ivpu_err(vdev, "Failed to create primary preemption
buffer\n");
return -ENOMEM;
}
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev,
&file_priv->ctx, &vdev->hw->ranges.dma,
- secondary_size, DRM_IVPU_BO_WC);
+ vdev->fw->secondary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
if (!cmdq->secondary_preempt_buf) {
ivpu_err(vdev, "Failed to create secondary preemption
buffer\n");
goto err_free_primary;
@@ -66,20 +64,39 @@ static int ivpu_preemption_buffers_create(struct
ivpu_device *vdev,
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct
ivpu_cmdq *cmdq)
{
- if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
- return;
-
if (cmdq->primary_preempt_buf)
ivpu_bo_free(cmdq->primary_preempt_buf);
if (cmdq->secondary_preempt_buf)
ivpu_bo_free(cmdq->secondary_preempt_buf);
}
+static int ivpu_preemption_job_init(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv,
+ struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+{
+ int ret;
+
+ /* Use preemption buffer provided by the user space */
+ if (job->primary_preempt_buf)
+ return 0;
+
+ if (!cmdq->primary_preempt_buf) {
+ /* Allocate per command queue preemption buffers */
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+ return ret;
+ }
+
+ /* Use preemption buffers allocated by the kernel */
+ job->primary_preempt_buf = cmdq->primary_preempt_buf;
+ job->secondary_preempt_buf = cmdq->secondary_preempt_buf;
+
+ return 0;
+}
+
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv
*file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq;
- int ret;
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct
ivpu_file_priv *file_priv)
if (!cmdq->mem)
goto err_free_cmdq;
- ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
- if (ret)
- ivpu_warn(vdev, "Failed to allocate preemption buffers,
preemption limited\n");
-
return cmdq;
err_free_cmdq:
@@ -429,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq
*cmdq, struct ivpu_job *job)
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
- if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- if (cmdq->primary_preempt_buf) {
- entry->primary_preempt_buf_addr =
cmdq->primary_preempt_buf->vpu_addr;
- entry->primary_preempt_buf_size =
ivpu_bo_size(cmdq->primary_preempt_buf);
- }
+ if (job->primary_preempt_buf) {
+ entry->primary_preempt_buf_addr =
job->primary_preempt_buf->vpu_addr;
+ entry->primary_preempt_buf_size =
ivpu_bo_size(job->primary_preempt_buf);
+ }
- if (cmdq->secondary_preempt_buf) {
- entry->secondary_preempt_buf_addr =
cmdq->secondary_preempt_buf->vpu_addr;
- entry->secondary_preempt_buf_size =
- ivpu_bo_size(cmdq->secondary_preempt_buf);
- }
+ if (job->secondary_preempt_buf) {
+ entry->secondary_preempt_buf_addr =
job->secondary_preempt_buf->vpu_addr;
+ entry->secondary_preempt_buf_size =
ivpu_bo_size(job->secondary_preempt_buf);
}
wmb(); /* Ensure that tail is updated after filling entry */
@@ -663,6 +673,13 @@ static int ivpu_job_submit(struct ivpu_job *job,
u8 priority, u32 cmdq_id)
goto err_unlock;
}
+ ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize preemption buffers for
job %d: %d\n",
+ job->job_id, ret);
+ goto err_unlock;
+ }
+
job->cmdq_id = cmdq->id;
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
@@ -716,7 +733,7 @@ static int ivpu_job_submit(struct ivpu_job *job,
u8 priority, u32 cmdq_id)
static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct
ivpu_job *job, u32 *buf_handles,
- u32 buf_count, u32 commands_offset)
+ u32 buf_count, u32 commands_offset, u32
preempt_buffer_index)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = file_priv->vdev;
@@ -752,6 +769,20 @@ ivpu_job_prepare_bos_for_submit(struct drm_file
*file, struct ivpu_job *job, u32
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
+ if (preempt_buffer_index) {
+ struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index];
+
+ if (ivpu_bo_size(preempt_bo) <
ivpu_fw_preempt_buf_size(vdev)) {
+ ivpu_warn(vdev, "Preemption buffer is too small\n");
+ return -EINVAL;
+ }
+ if (ivpu_bo_is_mappable(preempt_bo)) {
+ ivpu_warn(vdev, "Preemption buffer cannot be mappable\n");
+ return -EINVAL;
+ }
+ job->primary_preempt_buf = preempt_bo;
+ }
+
ret = drm_gem_lock_reservations((struct drm_gem_object
**)job->bos, buf_count,
&acquire_ctx);
if (ret) {
@@ -782,7 +813,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file
*file, struct ivpu_job *job, u32
static int ivpu_submit(struct drm_file *file, struct
ivpu_file_priv *file_priv, u32 cmdq_id,
u32 buffer_count, u32 engine, void __user
*buffers_ptr, u32 cmds_offset,
- u8 priority)
+ u32 preempt_buffer_index, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
@@ -814,7 +845,8 @@ static int ivpu_submit(struct drm_file *file,
struct ivpu_file_priv *file_priv,
goto err_exit_dev;
}
- ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles,
buffer_count, cmds_offset);
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles,
buffer_count, cmds_offset,
+ preempt_buffer_index);
if (ret) {
ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
goto err_destroy_job;
@@ -868,7 +900,7 @@ int ivpu_submit_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
priority = ivpu_job_to_jsm_priority(args->priority);
return ivpu_submit(file, file_priv, 0, args->buffer_count,
args->engine,
- (void __user *)args->buffers_ptr,
args->commands_offset, priority);
+ (void __user *)args->buffers_ptr,
args->commands_offset, 0, priority);
}
int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -885,6 +917,9 @@ int ivpu_cmdq_submit_ioctl(struct drm_device
*dev, void *data, struct drm_file *
if (args->buffer_count == 0 || args->buffer_count >
JOB_MAX_BUFFER_COUNT)
return -EINVAL;
+ if (args->preempt_buffer_index >= args->buffer_count)
+ return -EINVAL;
+
if (!IS_ALIGNED(args->commands_offset, 8))
return -EINVAL;
@@ -895,7 +930,8 @@ int ivpu_cmdq_submit_ioctl(struct drm_device
*dev, void *data, struct drm_file *
return -EBADFD;
return ivpu_submit(file, file_priv, args->cmdq_id,
args->buffer_count, VPU_ENGINE_COMPUTE,
- (void __user *)args->buffers_ptr,
args->commands_offset, 0);
+ (void __user *)args->buffers_ptr, args->commands_offset,
+ args->preempt_buffer_index, 0);
}
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
diff --git a/drivers/accel/ivpu/ivpu_job.h
b/drivers/accel/ivpu/ivpu_job.h
index 2e301c2eea7b..6c8b9c739b51 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_JOB_H__
@@ -55,6 +55,8 @@ struct ivpu_job {
u32 job_id;
u32 engine_idx;
size_t bo_count;
+ struct ivpu_bo *primary_preempt_buf;
+ struct ivpu_bo *secondary_preempt_buf;
struct ivpu_bo *bos[] __counted_by(bo_count);
};
diff --git a/include/uapi/drm/ivpu_accel.h
b/include/uapi/drm/ivpu_accel.h
index 160ee1411d4a..e470b0221e02 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -90,6 +90,7 @@ extern "C" {
#define DRM_IVPU_PARAM_TILE_CONFIG 11
#define DRM_IVPU_PARAM_SKU 12
#define DRM_IVPU_PARAM_CAPABILITIES 13
+#define DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE 14
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
@@ -176,6 +177,9 @@ struct drm_ivpu_param {
*
* %DRM_IVPU_PARAM_CAPABILITIES:
* Supported capabilities (read-only)
+ *
+ * %DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
+ * Size of the preemption buffer (read-only)
*/
__u32 param;
@@ -371,6 +375,13 @@ struct drm_ivpu_cmdq_submit {
* to be executed. The offset has to be 8-byte aligned.
*/
__u32 commands_offset;
+ /**
+ * @preempt_buffer_index:
+ *
+ * Index of the preemption buffer in the buffers_ptr array.
+ */
+ __u32 preempt_buffer_index;
+ __u32 reserved;
};
/* drm_ivpu_bo_wait job status codes */