both gfx ring and mes use cp0 to flush hdp, cause conflict. use function get_ref_and_mask to assign the cp entry. reassign mes to use cp8 instead.
Signed-off-by: chong li <[email protected]> --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 15 +++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 ++ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 3 +- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 40 +++++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 40 +++++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 3 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 3 +- 8 files changed, 87 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 895b841b9626..73305f07ddd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -556,11 +556,22 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, int amdgpu_mes_hdp_flush(struct amdgpu_device *adev) { - uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask; + uint32_t hdp_flush_req_offset, hdp_flush_done_offset; + struct amdgpu_ring *gfx_ring, *mes_ring; + uint32_t ref_and_mask = 0, reg_mem_engine = 0; + + gfx_ring = &adev->gfx.gfx_ring[0]; + mes_ring = &adev->mes.ring[0]; + + if (!gfx_ring->funcs->get_ref_and_mask) { + dev_err(adev->dev, "amdgpu_mes_hdp_flush not support \n"); + return -EINVAL; + } hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev); hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev); - ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0; + + gfx_ring->funcs->get_ref_and_mask(mes_ring, &ref_and_mask, ®_mem_engine); return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask, ref_and_mask, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index a1fb0fadb6ea..adca4396658e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -250,6 +250,8 @@ struct amdgpu_ring_funcs { void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); void (*emit_hdp_flush)(struct amdgpu_ring *ring); + void (*get_ref_and_mask)(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine); void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, uint32_t gds_base, uint32_t gds_size, uint32_t gws_base, uint32_t gws_size, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index aaed24f7e716..aafd34ddcfcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -8616,7 +8616,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask, reg_mem_engine; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) || + (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) { switch (ring->me) { case 1: ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f4d4dd5dd07b..0ada69d0cc68 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -5829,29 +5829,50 @@ static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) } } -static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +/** + * gfx_v11_0_ring_get_ref_and_mask - get the reference and mask for HDP flush + * + * @ring: amdgpu_ring structure holding ring information + * @ref_and_mask: pointer to store the reference and mask + * @reg_mem_engine: pointer to store the register memory engine + * + * Calculates the reference and mask for HDP flush based on the ring type and me. + */ +static void gfx_v11_0_ring_get_ref_and_mask(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine) { struct amdgpu_device *adev = ring->adev; - u32 ref_and_mask, reg_mem_engine; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_MES || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { switch (ring->me) { case 1: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; break; case 2: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + break; + case 3: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp8 << ring->pipe; break; default: return; } - reg_mem_engine = 0; + *reg_mem_engine = 0; } else { - ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; - reg_mem_engine = 1; /* pfp */ + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; + *reg_mem_engine = 1; /* pfp */ } +} + +static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 ref_and_mask, reg_mem_engine; + ring->funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine); gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, adev->nbio.funcs->get_hdp_flush_req_offset(adev), adev->nbio.funcs->get_hdp_flush_done_offset(adev), @@ -7235,6 +7256,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, + .get_ref_and_mask = gfx_v11_0_ring_get_ref_and_mask, .test_ring = gfx_v11_0_ring_test_ring, .test_ib = gfx_v11_0_ring_test_ib, .insert_nop = gfx_v11_ring_insert_nop, @@ -7281,6 +7303,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, + .get_ref_and_mask = gfx_v11_0_ring_get_ref_and_mask, .test_ring = gfx_v11_0_ring_test_ring, .test_ib = gfx_v11_0_ring_test_ib, .insert_nop = gfx_v11_ring_insert_nop, @@ -7323,6 +7346,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, + .get_ref_and_mask = gfx_v11_0_ring_get_ref_and_mask, }; static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index f9cae6666697..0bdb4f292dea 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -4385,29 +4385,50 @@ static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring) } } -static void gfx_v12_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +/** + * gfx_v12_0_ring_get_ref_and_mask - get the reference and mask for HDP flush + * + * @ring: amdgpu_ring structure holding ring information + * @ref_and_mask: pointer to store the reference and mask + * @reg_mem_engine: pointer to store the register memory engine + * + * Calculates the reference and mask for HDP flush based on the ring type and me. + */ +static void gfx_v12_0_ring_get_ref_and_mask(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine) { struct amdgpu_device *adev = ring->adev; - u32 ref_and_mask, reg_mem_engine; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_MES || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { switch (ring->me) { case 1: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; break; case 2: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + break; + case 3: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp8 << ring->pipe; break; default: return; } - reg_mem_engine = 0; + *reg_mem_engine = 0; } else { - ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; - reg_mem_engine = 1; /* pfp */ + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; + *reg_mem_engine = 1; /* pfp */ } +} + +static void gfx_v12_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 ref_and_mask, reg_mem_engine; + ring->funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine); gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, adev->nbio.funcs->get_hdp_flush_req_offset(adev), adev->nbio.funcs->get_hdp_flush_done_offset(adev), @@ -5515,6 +5536,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, + .get_ref_and_mask = gfx_v12_0_ring_get_ref_and_mask, .test_ring = gfx_v12_0_ring_test_ring, .test_ib = gfx_v12_0_ring_test_ib, .insert_nop = gfx_v12_ring_insert_nop, @@ -5557,6 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, + .get_ref_and_mask = gfx_v12_0_ring_get_ref_and_mask, .test_ring = gfx_v12_0_ring_test_ring, .test_ib = gfx_v12_0_ring_test_ib, .insert_nop = gfx_v12_ring_insert_nop, @@ -5599,6 +5622,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, + .get_ref_and_mask = gfx_v12_0_ring_get_ref_and_mask, }; static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 66a4e4998106..176658846b81 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2070,7 +2070,8 @@ static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask; int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { switch (ring->me) { case 1: ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index e6187be27385..bc1d0870cb2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -5382,7 +5382,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask, reg_mem_engine; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) || + (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) { switch (ring->me) { case 1: ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 89253df5ffc8..418b8daa7097 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2820,7 +2820,8 @@ static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask, reg_mem_engine; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) || + (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) { switch (ring->me) { case 1: ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; -- 2.48.1
