[AMD Official Use Only - AMD Internal Distribution Only]

> -----Original Message-----
> From: Lazar, Lijo <[email protected]>
> Sent: Wednesday, December 3, 2025 8:55 PM
> To: [email protected]
> Cc: Zhang, Hawking <[email protected]>; Deucher, Alexander
> <[email protected]>; Koenig, Christian
> <[email protected]>; Zhang, Jesse(Jie) <[email protected]>
> Subject: [RFC PATCH v3 02/10] drm/amdgpu: Add cwsr functions
>
> Add functions related to cwsr handling inside amdgpu framework.
>
> Signed-off-by: Lijo Lazar <[email protected]>
> ---
>  drivers/gpu/drm/amd/amdgpu/Makefile      |   2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h      |   3 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.c | 346
> +++++++++++++++++++++++  drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.h |
> 67 +++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h   |   2 +-
>  5 files changed, 418 insertions(+), 2 deletions(-)  create mode 100644
> drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.c
>  create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.h
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile
> b/drivers/gpu/drm/amd/amdgpu/Makefile
> index f65021678fc0..a5feb674508a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
> @@ -67,7 +67,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o
> amdgpu_kms.o \
>       amdgpu_fw_attestation.o amdgpu_securedisplay.o \
>       amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
>       amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o
> amdgpu_dev_coredump.o \
> -     amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o
> amdgpu_ip.o
> +     amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o
> amdgpu_ip.o
> +amdgpu_cwsr.o
>
>  amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index fa71df36f4b3..b9920cab5d31 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -331,6 +331,7 @@ struct kfd_vm_fault_info;  struct amdgpu_hive_info;  
> struct
> amdgpu_reset_context;  struct amdgpu_reset_control;
> +struct amdgpu_cwsr_isa;
>
>  enum amdgpu_cp_irq {
>       AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
> @@ -1325,6 +1326,8 @@ struct amdgpu_device {
>        * Must be last --ends in a flexible-array member.
>        */
>       struct amdgpu_kfd_dev           kfd;
> +
> +     struct amdgpu_cwsr_info *cwsr_info;
>  };
>
>  static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, 
> diff --
> git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.c
> new file mode 100644
> index 000000000000..c0fc5a383071
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.c
> @@ -0,0 +1,346 @@
> +/*
> + * Copyright 2025 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person
> +obtaining a
> + * copy of this software and associated documentation files (the
> +"Software"),
> + * to deal in the Software without restriction, including without
> +limitation
> + * the rights to use, copy, modify, merge, publish, distribute,
> +sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom
> +the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be
> +included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
> KIND,
> +EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> +MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN
> NO EVENT
> +SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY
> CLAIM,
> +DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
> +OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
> THE USE
> +OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + */
> +
> +#include <drm/drm_exec.h>
> +
> +#include "amdgpu.h"
> +#include "cwsr_trap_handler.h"
> +#include "amdgpu_cwsr.h"
> +
> +extern int cwsr_enable;
> +
> +#define AMDGPU_CWSR_TBA_MAX_SIZE (2 * AMDGPU_GPU_PAGE_SIZE)
> #define
> +AMDGPU_CWSR_TMA_MAX_SIZE (AMDGPU_GPU_PAGE_SIZE) #define
> +AMDGPU_CWSR_TMA_OFFSET (AMDGPU_CWSR_TBA_MAX_SIZE)
> +
> +enum amdgpu_cwsr_region {
> +     AMDGPU_CWSR_TBA,
> +     AMDGPU_CWSR_TMA,
> +};
> +
> +static inline uint64_t amdgpu_cwsr_tba_vaddr(struct amdgpu_device
> +*adev) {
> +     uint64_t addr = AMDGPU_VA_RESERVED_TRAP_START(adev);
[Zhang, Jesse(Jie)]  A new VA should be reserved for KGD userq to avoid VA 
conflicts with KFD CWSR.

> +
> +     addr = amdgpu_gmc_sign_extend(addr);
> +
> +     return addr;
> +}
> +
> +static inline bool amdgpu_cwsr_is_supported(struct amdgpu_device *adev)
> +{
> +     uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
> +
> +     if (!cwsr_enable || gc_ver < IP_VERSION(9, 0, 1))
> +             return false;
> +
> +     return true;
> +}
> +
> +static void amdgpu_cwsr_init_isa_details(struct amdgpu_device *adev,
> +                                      struct amdgpu_cwsr_info *cwsr_info) {
> +     uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
> +
> +     if (gc_ver < IP_VERSION(9, 0, 1)) {
> +             BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) >
> +                          AMDGPU_CWSR_TBA_MAX_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_gfx8_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_gfx8_hex);
> +     } else if (gc_ver == IP_VERSION(9, 4, 1)) {
> +             BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) >
> +                          AMDGPU_CWSR_TBA_MAX_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_arcturus_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_arcturus_hex);
> +     } else if (gc_ver == IP_VERSION(9, 4, 2)) {
> +             BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) >
> +                          AMDGPU_CWSR_TBA_MAX_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_aldebaran_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_aldebaran_hex);
> +     } else if (gc_ver == IP_VERSION(9, 4, 3) ||
> +                gc_ver == IP_VERSION(9, 4, 4)) {
> +             BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex) >
> +                          AMDGPU_CWSR_TBA_MAX_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_gfx9_4_3_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_gfx9_4_3_hex);
> +     } else if (gc_ver == IP_VERSION(9, 5, 0)) {
> +             BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_5_0_hex) > PAGE_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_gfx9_5_0_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_gfx9_5_0_hex);
> +     } else if (gc_ver < IP_VERSION(10, 1, 1)) {
> +             BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) >
> +                          AMDGPU_CWSR_TBA_MAX_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_gfx9_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_gfx9_hex);
> +     } else if (gc_ver < IP_VERSION(10, 3, 0)) {
> +             BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) >
> +                          AMDGPU_CWSR_TBA_MAX_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_nv1x_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_nv1x_hex);
> +     } else if (gc_ver < IP_VERSION(11, 0, 0)) {
> +             BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) >
> +                          AMDGPU_CWSR_TBA_MAX_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_gfx10_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_gfx10_hex);
> +     } else if (gc_ver < IP_VERSION(12, 0, 0)) {
> +             /* The gfx11 cwsr trap handler must fit inside a single
> +                        page. */
> +             BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_gfx11_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_gfx11_hex);
> +     } else {
> +             BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) >
> +                          AMDGPU_CWSR_TBA_MAX_SIZE);
> +             cwsr_info->isa_buf = cwsr_trap_gfx12_hex;
> +             cwsr_info->isa_sz = sizeof(cwsr_trap_gfx12_hex);
> +     }
> +}
> +
> +int amdgpu_cwsr_init(struct amdgpu_device *adev) {
> +     struct amdgpu_cwsr_info *cwsr_info __free(kfree) = NULL;
> +     void *ptr;
> +     int r;
> +
> +     if (!amdgpu_cwsr_is_supported(adev))
> +             return -EOPNOTSUPP;
> +
> +     cwsr_info = kzalloc(sizeof(*cwsr_info), GFP_KERNEL);
> +     if (!cwsr_info)
> +             return -ENOMEM;
> +     amdgpu_cwsr_init_isa_details(adev, cwsr_info);
> +
> +     if (!cwsr_info->isa_sz)
> +             return -EOPNOTSUPP;
> +
> +     r = amdgpu_bo_create_kernel(adev, AMDGPU_CWSR_TBA_MAX_SIZE,
> PAGE_SIZE,
> +                                 AMDGPU_GEM_DOMAIN_GTT, &cwsr_info-
> >isa_bo,
> +                                 NULL, &ptr);
> +     if (r)
> +             return r;
> +
> +     memcpy(ptr, cwsr_info->isa_buf, cwsr_info->isa_sz);
> +     adev->cwsr_info = no_free_ptr(cwsr_info);
> +
> +     return 0;
> +}
> +
> +void amdgpu_cwsr_fini(struct amdgpu_device *adev) {
> +     if (!amdgpu_cwsr_is_enabled(adev))
> +             return;
> +
> +     amdgpu_bo_free_kernel(&adev->cwsr_info->isa_bo, NULL, NULL);
> +     kfree(adev->cwsr_info);
> +     adev->cwsr_info = NULL;
> +}
> +
> +/*
> + * amdgpu_map_cwsr_trap_handler should be called during amdgpu_vm_init
> + * it maps virtual address amdgpu_cwsr_trap_handler_vaddr() to this VM,
> +and each
> + * compute queue can use this virtual address for wave save/restore
> + * operations to support compute preemption.
> + */
> +static int amdgpu_cwsr_map_region(struct amdgpu_device *adev,
> +                               struct amdgpu_vm *vm,
> +                               struct amdgpu_cwsr_trap_obj *cwsr,
> +                               enum amdgpu_cwsr_region region)
> +{
> +     uint64_t cwsr_addr, va_flags, va;
> +     struct amdgpu_bo_va **bo_va;
> +     struct amdgpu_bo *bo;
> +     uint32_t size;
> +     int r;
> +
> +     if (!cwsr || !vm)
> +             return -EINVAL;
> +
> +     cwsr_addr = amdgpu_cwsr_tba_vaddr(adev);
> +
> +     if (region == AMDGPU_CWSR_TBA) {
> +             size = AMDGPU_CWSR_TBA_MAX_SIZE;
> +             bo_va = &cwsr->tba_va;
> +             bo = adev->cwsr_info->isa_bo;
> +             va = cwsr_addr;
> +             va_flags = (AMDGPU_VM_PAGE_READABLE |
> AMDGPU_VM_PAGE_WRITEABLE |
> +                         AMDGPU_VM_PAGE_EXECUTABLE);
> +     } else {
> +             size = AMDGPU_CWSR_TMA_MAX_SIZE;
> +             bo_va = &cwsr->tma_va;
> +             bo = cwsr->tma_bo;
> +             va = cwsr_addr + AMDGPU_CWSR_TMA_OFFSET;
> +             va_flags = (AMDGPU_VM_PAGE_READABLE |
> AMDGPU_VM_PAGE_WRITEABLE);
> +     }
> +
> +     *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
> +     if (!*bo_va)
> +             return -ENOMEM;
> +
> +     va &= AMDGPU_GMC_HOLE_MASK;
> +     r = amdgpu_vm_bo_map(adev, *bo_va, va, 0, size, va_flags);
> +
> +     if (r) {
> +             dev_err(adev->dev, "failed to do bo_map on CWSR TBA, err=%d\n",
> +                     r);
> +             amdgpu_vm_bo_del(adev, *bo_va);
> +             *bo_va = NULL;
> +     } else {
> +             if (region == AMDGPU_CWSR_TBA)
> +                     cwsr->tba_gpu_va_addr = va;
> +             else
> +                     cwsr->tma_gpu_va_addr = va;
> +     }
> +
> +     return r;
> +}
> +
> +static int amdgpu_cwsr_unmap_region(struct amdgpu_device *adev,
> +                                 struct amdgpu_cwsr_trap_obj *cwsr,
> +                                 enum amdgpu_cwsr_region region) {
> +     struct amdgpu_bo_va **bo_va;
> +     uint64_t va;
> +     int r;
> +
> +     if (!cwsr)
> +             return -EINVAL;
> +
> +     if (region == AMDGPU_CWSR_TBA) {
> +             bo_va = &cwsr->tba_va;
> +             va = cwsr->tba_gpu_va_addr;
> +     } else {
> +             bo_va = &cwsr->tma_va;
> +             va = cwsr->tma_gpu_va_addr;
> +     }
> +
> +     r = amdgpu_vm_bo_unmap(adev, *bo_va, va);
> +     if (r) {
> +             dev_err(adev->dev,
> +                     "failed to do bo_unmap on CWSR trap handler, err=%d\n",
> +                     r);
> +             return r;
> +     }
> +
> +     amdgpu_vm_bo_del(adev, *bo_va);
> +     *bo_va = NULL;
> +
> +     return r;
> +}
> +
> +/* TBD : Handle APU allocation */
> +int amdgpu_cwsr_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
> +                   struct amdgpu_cwsr_trap_obj **trap_obj) {
> +     struct amdgpu_cwsr_trap_obj *cwsr __free(kfree) = NULL;
> +     struct amdgpu_bo *bo;
> +     struct drm_exec exec;
> +     int r;
> +
> +     if (!amdgpu_cwsr_is_enabled(adev))
> +             return -EOPNOTSUPP;
> +
> +     cwsr = kzalloc(sizeof(*cwsr), GFP_KERNEL);
> +     if (!cwsr)
> +             return -ENOMEM;
> +
> +     bo = adev->cwsr_info->isa_bo;
> +     drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
> +     drm_exec_until_all_locked(&exec)
> +     {
> +             r = amdgpu_vm_lock_pd(vm, &exec, 0);
> +             if (likely(!r))
> +                     r = drm_exec_lock_obj(&exec, &bo->tbo.base);
> +             drm_exec_retry_on_contention(&exec);
> +             if (unlikely(r)) {
> +                     dev_err(adev->dev,
> +                             "failed to reserve for CWSR allocs: err=%d\n",
> +                             r);
> +                     goto err;
> +             }
> +     }
> +
> +     r = amdgpu_bo_create_kernel(adev, AMDGPU_CWSR_TMA_MAX_SIZE,
> PAGE_SIZE,
> +                                 AMDGPU_GEM_DOMAIN_GTT, &cwsr->tma_bo,
> NULL,
> +                                 &cwsr->tma_cpu_addr);
> +     if (r)
> +             goto err;
> +
> +     r = amdgpu_cwsr_map_region(adev, vm, cwsr, AMDGPU_CWSR_TMA);
> +     if (r)
> +             goto err;
> +     r = amdgpu_cwsr_map_region(adev, vm, cwsr, AMDGPU_CWSR_TBA);
> +     if (r) {
> +             amdgpu_cwsr_unmap_region(adev, cwsr, AMDGPU_CWSR_TMA);
> +             goto err;
> +     }
> +
> +     *trap_obj = no_free_ptr(cwsr);
> +
> +err:
> +     drm_exec_fini(&exec);
> +     if (r)
> +             amdgpu_bo_free_kernel(&cwsr->tma_bo, NULL, NULL);
> +
> +     return r;
> +}
> +
> +void amdgpu_cwsr_free(struct amdgpu_device *adev, struct amdgpu_vm *vm,
> +                   struct amdgpu_cwsr_trap_obj **trap_obj) {
> +     struct amdgpu_bo *tba_bo;
> +     struct amdgpu_bo *tma_bo;
> +     struct drm_exec exec;
> +     int r;
> +
> +     if (!trap_obj || !*trap_obj || !(*trap_obj)->tma_bo)
> +             return;
> +     tba_bo = adev->cwsr_info->isa_bo;
> +     tma_bo = (*trap_obj)->tma_bo;
> +
> +     if (!tba_bo || !tma_bo)
> +             return;
> +
> +     drm_exec_init(&exec, 0, 0);
> +     drm_exec_until_all_locked(&exec)
> +     {
> +             r = amdgpu_vm_lock_pd(vm, &exec, 0);
> +             if (likely(!r))
> +                     r = drm_exec_lock_obj(&exec, &tba_bo->tbo.base);
> +             drm_exec_retry_on_contention(&exec);
> +             if (likely(!r))
> +                     r = drm_exec_lock_obj(&exec, &tma_bo->tbo.base);
> +             drm_exec_retry_on_contention(&exec);
> +             if (unlikely(r)) {
> +                     dev_err(adev->dev,
> +                             "failed to reserve CWSR BOs: err=%d\n", r);
> +                     goto err;
> +             }
> +     }
> +
> +     amdgpu_cwsr_unmap_region(adev, *trap_obj, AMDGPU_CWSR_TBA);
> +     amdgpu_cwsr_unmap_region(adev, *trap_obj, AMDGPU_CWSR_TMA);
> +err:
> +     drm_exec_fini(&exec);
> +     amdgpu_bo_free_kernel(&(*trap_obj)->tma_bo, NULL, NULL);
> +     kfree(*trap_obj);
> +     *trap_obj = NULL;
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.h
> new file mode 100644
> index 000000000000..26ed9308f70b
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cwsr.h
> @@ -0,0 +1,67 @@
> +/*
> + * Copyright 2025 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person
> +obtaining a
> + * copy of this software and associated documentation files (the
> +"Software"),
> + * to deal in the Software without restriction, including without
> +limitation
> + * the rights to use, copy, modify, merge, publish, distribute,
> +sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom
> +the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be
> +included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
> KIND,
> +EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> +MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN
> NO EVENT
> +SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY
> CLAIM,
> +DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
> +OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
> THE USE
> +OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + */
> +
> +#ifndef AMDGPU_CWSR_H
> +#define AMDGPU_CWSR_H
> +
> +#include <linux/types.h>
> +
> +struct amdgpu_bo;
> +struct amdgpu_bo_va;
> +struct amdgpu_device;
> +struct amdgpu_vm;
> +
> +/**
> + * struct amdgpu_cwsr_obj - CWSR (Compute Wave Save Restore) buffer
> +tracking
> + * @bo: Buffer object for CWSR area
> + * @bo_va: Buffer object virtual address mapping  */ struct
> +amdgpu_cwsr_trap_obj {
> +     uint64_t tma_gpu_va_addr;
> +     uint64_t tba_gpu_va_addr;
> +
> +     struct amdgpu_bo *tma_bo;
> +     struct amdgpu_bo_va *tba_va;
> +     struct amdgpu_bo_va *tma_va;
> +     void *tma_cpu_addr;
> +};
> +
> +struct amdgpu_cwsr_info {
> +     /* cwsr isa */
> +     struct amdgpu_bo *isa_bo;
> +     const void *isa_buf;
> +     uint32_t isa_sz;
> +};
> +
> +int amdgpu_cwsr_init(struct amdgpu_device *adev); void
> +amdgpu_cwsr_fini(struct amdgpu_device *adev);
> +
> +int amdgpu_cwsr_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
> +                   struct amdgpu_cwsr_trap_obj **cwsr_obj); void
> +amdgpu_cwsr_free(struct amdgpu_device *adev, struct amdgpu_vm *vm,
> +                   struct amdgpu_cwsr_trap_obj **cwsr_obj); static inline 
> bool
> +amdgpu_cwsr_is_enabled(struct amdgpu_device *adev) {
> +     return adev->cwsr_info != NULL;
> +}
> +
> +#endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index 139642eacdd0..783ca2b8dfef 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -173,7 +173,7 @@ struct amdgpu_bo_vm;
>  #define AMDGPU_VA_RESERVED_SEQ64_SIZE                (2ULL << 20)
>  #define AMDGPU_VA_RESERVED_SEQ64_START(adev)
>       (AMDGPU_VA_RESERVED_CSA_START(adev) \
>                                                -
> AMDGPU_VA_RESERVED_SEQ64_SIZE)
> -#define AMDGPU_VA_RESERVED_TRAP_SIZE         (2ULL << 12)
> +#define AMDGPU_VA_RESERVED_TRAP_SIZE         (3ULL << 12)
[Zhang, Jesse(Jie)] )]  A new VA should be reserved for KGD userq to avoid VA 
conflicts with KFD CWSR.

>  #define AMDGPU_VA_RESERVED_TRAP_START(adev)
>       (AMDGPU_VA_RESERVED_SEQ64_START(adev) \
>                                                -
> AMDGPU_VA_RESERVED_TRAP_SIZE)
>  #define AMDGPU_VA_RESERVED_BOTTOM            (1ULL << 16)
> --
> 2.49.0

Reply via email to