Let's get rid of version check and hardcode, calculate hqd mask with number of queues per pipe and number of gfx/compute queues kernel used.
Signed-off-by: Lang Yu <[email protected]> --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 42 ++++++++++++------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index dffa0f7276b7..5b5962a727f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -31,7 +31,6 @@ #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 #define AMDGPU_ONE_DOORBELL_SIZE 8 -#define AMDGPU_MES_RESERVED_QUEUES 2 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) { @@ -115,8 +114,10 @@ int amdgpu_mes_init(struct amdgpu_device *adev) adev->mes.vmid_mask_mmhub = 0xFF00; adev->mes.vmid_mask_gfxhub = total_vmid_mask & ~reserved_vmid_mask; - queue_mask = (u32)(1UL << adev->gfx.mec.num_queue_per_pipe) - 1; - reserved_queue_mask = (u32)(1UL << AMDGPU_MES_RESERVED_QUEUES) - 1; + queue_mask = (u32)(1UL << adev->gfx.me.num_queue_per_pipe) - 1; + reserved_queue_mask = (u32)(1UL << (ALIGN(adev->gfx.num_gfx_rings, + adev->gfx.me.num_pipe_per_me) / + adev->gfx.me.num_pipe_per_me)) - 1; num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me; if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES) @@ -126,34 +127,26 @@ int amdgpu_mes_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) { if (i >= num_pipes) break; - if (amdgpu_ip_version(adev, GC_HWIP, 0) >= - IP_VERSION(12, 0, 0)) - /* - * GFX V12 has only one GFX pipe, but 8 queues in it. - * GFX pipe 0 queue 0 is being used by Kernel queue. - * Set GFX pipe 0 queue 1-7 for MES scheduling - * mask = 1111 1110b - */ - adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE; - else - /* - * GFX pipe 0 queue 0 is being used by Kernel queue. - * Set GFX pipe 0 queue 1 for MES scheduling - * mask = 10b - */ - adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2; + + adev->mes.gfx_hqd_mask[i] = (queue_mask & ~reserved_queue_mask); } + queue_mask = (u32)(1UL << adev->gfx.mec.num_queue_per_pipe) - 1; + reserved_queue_mask = (u32)(1UL << (ALIGN(adev->gfx.num_compute_rings, + adev->gfx.mec.num_pipe_per_mec) / + adev->gfx.mec.num_pipe_per_mec)) - 1; + num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec; if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES) dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n", num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES); for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { - if (i >= num_pipes) + /* skip MEC2, since only MEC1 is used */ + if (i >= adev->gfx.mec.num_pipe_per_mec) break; - adev->mes.compute_hqd_mask[i] = - adev->gfx.disable_kq ? 0xF : (queue_mask & ~reserved_queue_mask); + + adev->mes.compute_hqd_mask[i] = (queue_mask & ~reserved_queue_mask); } num_pipes = adev->sdma.num_instances; @@ -167,6 +160,11 @@ int amdgpu_mes_init(struct amdgpu_device *adev) adev->mes.sdma_hqd_mask[i] = 0xfc; } + dev_info(adev->dev, "MES: vmid_mask_mmhub 0x%08x, vmid_mask_gfxhub 0x%08x\n", + adev->mes.vmid_mask_mmhub, adev->mes.vmid_mask_gfxhub); + dev_info(adev->dev, "MES: gfx_hqd_mask 0x%08x, compute_hqd_mask 0x%08x, sdma_hqd_mask 0x%08x\n", + adev->mes.gfx_hqd_mask[0], adev->mes.compute_hqd_mask[0], adev->mes.sdma_hqd_mask[0]); + for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++) { r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]); if (r) { -- 2.34.1
