On Wed, Dec 3, 2025 at 8:14 AM Lijo Lazar <[email protected]> wrote: > > Add cwsr_trap_obj to render file handle. It maps the first level cwsr > handler to the vm with which the file handle is associated. Use > cwsr trap object's tba/tma address for the userqueue. > > Signed-off-by: Lijo Lazar <[email protected]>
Acked-by: Alex Deucher <[email protected]> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ > drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 8 ++++++++ > drivers/gpu/drm/amd/amdgpu/mes_userqueue.c | 6 ++++++ > 3 files changed, 16 insertions(+) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h > b/drivers/gpu/drm/amd/amdgpu/amdgpu.h > index b9920cab5d31..ec2919a9c636 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h > @@ -332,6 +332,7 @@ struct amdgpu_hive_info; > struct amdgpu_reset_context; > struct amdgpu_reset_control; > struct amdgpu_cwsr_isa; > +struct amdgpu_cwsr_trap_obj; > > enum amdgpu_cp_irq { > AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, > @@ -505,6 +506,7 @@ struct amdgpu_fpriv { > struct idr bo_list_handles; > struct amdgpu_ctx_mgr ctx_mgr; > struct amdgpu_userq_mgr userq_mgr; > + struct amdgpu_cwsr_trap_obj *cwsr_trap; > > /* Eviction fence infra */ > struct amdgpu_eviction_fence_mgr evf_mgr; > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c > b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c > index b3e6b3fcdf2c..398d6c8d343c 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c > @@ -46,6 +46,7 @@ > #include "amdgpu_reset.h" > #include "amd_pcie.h" > #include "amdgpu_userq.h" > +#include "amdgpu_cwsr.h" > > void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) > { > @@ -1452,6 +1453,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, > struct drm_file *file_priv) > if (r) > DRM_WARN("Can't setup usermode queues, use legacy workload > submission only\n"); > > + if (amdgpu_cwsr_is_enabled(adev)) { > + r = amdgpu_cwsr_alloc(adev, &fpriv->vm, &fpriv->cwsr_trap); > + if (r) > + dev_dbg(adev->dev, "cwsr trap not enabled"); > + } > + > r = amdgpu_eviction_fence_init(&fpriv->evf_mgr); > if (r) > goto error_vm; > @@ -1524,6 +1531,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, > } > > amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); > + amdgpu_cwsr_free(adev, &fpriv->vm, &fpriv->cwsr_trap); > amdgpu_vm_fini(adev, &fpriv->vm); > > if (pasid) > diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c > b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c > index 8b0aeb89025a..480f4806e951 100644 > --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c > +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c > @@ -26,6 +26,7 @@ > #include "amdgpu_gfx.h" > #include "mes_userqueue.h" > #include "amdgpu_userq_fence.h" > +#include "amdgpu_cwsr.h" > > #define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE > #define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE > @@ -116,6 +117,7 @@ static int convert_to_mes_priority(int priority) > static int mes_userq_map(struct amdgpu_usermode_queue *queue) > { > struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; > + struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); > struct amdgpu_device *adev = uq_mgr->adev; > struct amdgpu_userq_obj *ctx = &queue->fw_obj; > struct amdgpu_mqd_prop *userq_props = queue->userq_prop; > @@ -145,6 +147,10 @@ static int mes_userq_map(struct amdgpu_usermode_queue > *queue) > queue_input.doorbell_offset = userq_props->doorbell_index; > queue_input.page_table_base_addr = > amdgpu_gmc_pd_addr(queue->vm->root.bo); > queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr; > + if (fpriv->cwsr_trap) { > + queue_input.tba_addr = fpriv->cwsr_trap->tba_gpu_va_addr; > + queue_input.tma_addr = fpriv->cwsr_trap->tma_gpu_va_addr; > + } > > amdgpu_mes_lock(&adev->mes); > r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); > -- > 2.49.0 >
