v3: Unify the get_ref_and_mask function in amdgpu_gfx_funcs, to support both GFX11 and earlier generations
v2: place "get_ref_and_mask" in amdgpu_gfx_funcs instead of amdgpu_ring, since this function only assigns the cp entry. v1: both gfx ring and mes ring use cp0 to flush hdp, cause conflict. use function get_ref_and_mask to assign the cp entry. reassign mes to use cp8 instead. Signed-off-by: chong li <[email protected]> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 13 ++++- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 60 ++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 64 ++++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 64 ++++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 59 +++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 59 ++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 60 ++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 60 ++++++++++++++++------- 9 files changed, 327 insertions(+), 114 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index efd61a1ccc66..090714127cba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -356,6 +356,8 @@ struct amdgpu_gfx_funcs { int num_xccs_per_xcp); int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); int (*get_xccs_per_xcp)(struct amdgpu_device *adev); + void (*get_ref_and_mask)(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine); }; struct sq_work { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 895b841b9626..77d25317973e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -556,11 +556,20 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, int amdgpu_mes_hdp_flush(struct amdgpu_device *adev) { - uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask; + uint32_t hdp_flush_req_offset, hdp_flush_done_offset; + struct amdgpu_ring *mes_ring; + uint32_t ref_and_mask = 0, reg_mem_engine = 0; + if (!adev->gfx.funcs->get_ref_and_mask) { + dev_err(adev->dev, "mes hdp flush is not supported.\n"); + return -EINVAL; + } + + mes_ring = &adev->mes.ring[0]; hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev); hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev); - ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0; + + adev->gfx.funcs->get_ref_and_mask(mes_ring, &ref_and_mask, ®_mem_engine); return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask, ref_and_mask, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index aaed24f7e716..8b85b836fd32 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4566,6 +4566,45 @@ static void gfx_v10_0_update_perfmon_mgcg(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmRLC_PERFMON_CLK_CNTL, data); } +/** + * gfx_v10_0_get_ref_and_mask - get the reference and mask for HDP flush + * + * @ring: amdgpu_ring structure holding ring information + * @ref_and_mask: pointer to store the reference and mask + * @reg_mem_engine: pointer to store the register memory engine + * + * Calculates the reference and mask for HDP flush based on the ring type and me. + */ +static void gfx_v10_0_get_ref_and_mask(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine) +{ + if (!ring || !ref_and_mask || !reg_mem_engine) { + DRM_INFO("%s:invalid params\n", __func__); + return; + } + + struct amdgpu_device *adev = ring->adev; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { + switch (ring->me) { + case 1: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; + break; + case 2: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + break; + default: + return; + } + *reg_mem_engine = 0; + } else { + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; + *reg_mem_engine = 1; /* pfp */ + } +} + static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter, .select_se_sh = &gfx_v10_0_select_se_sh, @@ -4575,6 +4614,7 @@ static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = { .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q, .init_spm_golden = &gfx_v10_0_init_spm_golden_registers, .update_perfmon_mgcg = &gfx_v10_0_update_perfmon_mgcg, + .get_ref_and_mask = &gfx_v10_0_get_ref_and_mask, }; static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) @@ -8614,25 +8654,13 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - switch (ring->me) { - case 1: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; - break; - case 2: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; - break; - default: - return; - } - reg_mem_engine = 0; - } else { - ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; - reg_mem_engine = 1; /* pfp */ + if (!adev->gfx.funcs->get_ref_and_mask) { + dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__); + return; } + adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine); gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, adev->nbio.funcs->get_hdp_flush_req_offset(adev), adev->nbio.funcs->get_hdp_flush_done_offset(adev), diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f4d4dd5dd07b..2600a698ea38 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1072,6 +1072,49 @@ static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev, } } +/** + * gfx_v11_0_get_ref_and_mask - get the reference and mask for HDP flush + * + * @ring: amdgpu_ring structure holding ring information + * @ref_and_mask: pointer to store the reference and mask + * @reg_mem_engine: pointer to store the register memory engine + * + * Calculates the reference and mask for HDP flush based on the ring type and me. + */ +static void gfx_v11_0_get_ref_and_mask(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine) +{ + if (!ring || !ref_and_mask || !reg_mem_engine) { + DRM_INFO("%s:invalid params\n", __func__); + return; + } + + struct amdgpu_device *adev = ring->adev; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_MES || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { + switch (ring->me) { + case 1: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; + break; + case 2: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + break; + case 3: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp8 << ring->pipe; + break; + default: + return; + } + *reg_mem_engine = 0; + } else { + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; + *reg_mem_engine = 1; /* pfp */ + } +} + static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, .select_se_sh = &gfx_v11_0_select_se_sh, @@ -1081,6 +1124,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk, .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info, + .get_ref_and_mask = &gfx_v11_0_get_ref_and_mask, }; static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) @@ -5833,25 +5877,13 @@ static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - switch (ring->me) { - case 1: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; - break; - case 2: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; - break; - default: - return; - } - reg_mem_engine = 0; - } else { - ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; - reg_mem_engine = 1; /* pfp */ + if (!adev->gfx.funcs->get_ref_and_mask) { + dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__); + return; } + adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine); gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, adev->nbio.funcs->get_hdp_flush_req_offset(adev), adev->nbio.funcs->get_hdp_flush_done_offset(adev), diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index f9cae6666697..4c86f99a2783 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -929,6 +929,49 @@ static int gfx_v12_0_get_gfx_shadow_info(struct amdgpu_device *adev, return -EINVAL; } +/** + * gfx_v12_0_get_ref_and_mask - get the reference and mask for HDP flush + * + * @ring: amdgpu_ring structure holding ring information + * @ref_and_mask: pointer to store the reference and mask + * @reg_mem_engine: pointer to store the register memory engine + * + * Calculates the reference and mask for HDP flush based on the ring type and me. + */ +static void gfx_v12_0_get_ref_and_mask(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine) +{ + if (!ring || !ref_and_mask || !reg_mem_engine) { + DRM_INFO("%s:invalid params\n", __func__); + return; + } + + struct amdgpu_device *adev = ring->adev; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_MES || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { + switch (ring->me) { + case 1: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; + break; + case 2: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + break; + case 3: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp8 << ring->pipe; + break; + default: + return; + } + *reg_mem_engine = 0; + } else { + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; + *reg_mem_engine = 1; /* pfp */ + } +} + static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter, .select_se_sh = &gfx_v12_0_select_se_sh, @@ -938,6 +981,7 @@ static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = { .select_me_pipe_q = &gfx_v12_0_select_me_pipe_q, .update_perfmon_mgcg = &gfx_v12_0_update_perf_clk, .get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info, + .get_ref_and_mask = &gfx_v12_0_get_ref_and_mask, }; static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev) @@ -4389,25 +4433,13 @@ static void gfx_v12_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - switch (ring->me) { - case 1: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; - break; - case 2: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; - break; - default: - return; - } - reg_mem_engine = 0; - } else { - ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; - reg_mem_engine = 1; /* pfp */ + if (!adev->gfx.funcs->get_ref_and_mask) { + dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__); + return; } + adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine); gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, adev->nbio.funcs->get_hdp_flush_req_offset(adev), adev->nbio.funcs->get_hdp_flush_done_offset(adev), diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 66a4e4998106..29a6378cbf04 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2068,23 +2068,15 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { u32 ref_and_mask; - int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; + int usepfp; + struct amdgpu_device *adev = ring->adev; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - switch (ring->me) { - case 1: - ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; - break; - case 2: - ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; - break; - default: - return; - } - } else { - ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; + if (!adev->gfx.funcs->get_ref_and_mask) { + dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__); + return; } + adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, &usepfp); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ WAIT_REG_MEM_FUNCTION(3) | /* == */ @@ -4075,12 +4067,49 @@ static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev, cik_srbm_select(adev, me, pipe, q, vm); } +/** + * gfx_v7_0_get_ref_and_mask - get the reference and mask for HDP flush + * + * @ring: amdgpu_ring structure holding ring information + * @ref_and_mask: pointer to store the reference and mask + * @reg_mem_engine: pointer to store the register memory engine + * + * Calculates the reference and mask for HDP flush based on the ring type and me. + */ +static void gfx_v7_0_get_ref_and_mask(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine) +{ + if (!ring || !ref_and_mask || !reg_mem_engine) { + DRM_INFO("%s:invalid params\n", __func__); + return; + } + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { + switch (ring->me) { + case 1: + *ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; + break; + case 2: + *ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; + break; + default: + return; + } + *reg_mem_engine = 0; + } else { + *ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; + *reg_mem_engine = 1; + } +} + static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, .select_se_sh = &gfx_v7_0_select_se_sh, .read_wave_data = &gfx_v7_0_read_wave_data, .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs, - .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q + .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q, + .get_ref_and_mask = &gfx_v7_0_get_ref_and_mask, }; static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5d6e8e0601cb..0fd17fdf2cb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5211,13 +5211,49 @@ static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id start + SQIND_WAVE_SGPRS_OFFSET, size, dst); } +/** + * gfx_v8_0_get_ref_and_mask - get the reference and mask for HDP flush + * + * @ring: amdgpu_ring structure holding ring information + * @ref_and_mask: pointer to store the reference and mask + * @reg_mem_engine: pointer to store the register memory engine + * + * Calculates the reference and mask for HDP flush based on the ring type and me. + */ +static void gfx_v8_0_get_ref_and_mask(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine) +{ + if (!ring || !ref_and_mask || !reg_mem_engine) { + DRM_INFO("%s:invalid params\n", __func__); + return; + } + + if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) || + (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) { + switch (ring->me) { + case 1: + *ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; + break; + case 2: + *ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; + break; + default: + return; + } + *reg_mem_engine = 0; + } else { + *ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; + *reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */ + } +} static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, .select_se_sh = &gfx_v8_0_select_se_sh, .read_wave_data = &gfx_v8_0_read_wave_data, .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs, - .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q + .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q, + .get_ref_and_mask = &gfx_v8_0_get_ref_and_mask, }; static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block) @@ -6000,25 +6036,14 @@ static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { u32 ref_and_mask, reg_mem_engine; + struct amdgpu_device *adev = ring->adev; - if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) || - (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) { - switch (ring->me) { - case 1: - ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; - break; - case 2: - ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; - break; - default: - return; - } - reg_mem_engine = 0; - } else { - ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; - reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */ + if (!adev->gfx.funcs->get_ref_and_mask) { + dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__); + return; } + adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ WAIT_REG_MEM_FUNCTION(3) | /* == */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index e6187be27385..8b912ea87450 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1997,6 +1997,45 @@ static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, soc15_grbm_select(adev, me, pipe, q, vm, 0); } +/** + * gfx_v9_0_get_ref_and_mask - get the reference and mask for HDP flush + * + * @ring: amdgpu_ring structure holding ring information + * @ref_and_mask: pointer to store the reference and mask + * @reg_mem_engine: pointer to store the register memory engine + * + * Calculates the reference and mask for HDP flush based on the ring type and me. + */ +static void gfx_v9_0_get_ref_and_mask(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine) +{ + if (!ring || !ref_and_mask || !reg_mem_engine) { + DRM_INFO("%s:invalid params\n", __func__); + return; + } + + struct amdgpu_device *adev = ring->adev; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { + switch (ring->me) { + case 1: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; + break; + case 2: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + break; + default: + return; + } + *reg_mem_engine = 0; + } else { + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; + *reg_mem_engine = 1; /* pfp */ + } +} + static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, .select_se_sh = &gfx_v9_0_select_se_sh, @@ -2004,6 +2043,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, + .get_ref_and_mask = &gfx_v9_0_get_ref_and_mask, }; const struct amdgpu_ras_block_hw_ops gfx_v9_0_ras_ops = { @@ -5380,25 +5420,13 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - switch (ring->me) { - case 1: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; - break; - case 2: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; - break; - default: - return; - } - reg_mem_engine = 0; - } else { - ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; - reg_mem_engine = 1; /* pfp */ + if (!adev->gfx.funcs->get_ref_and_mask) { + dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__); + return; } + adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine); gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, adev->nbio.funcs->get_hdp_flush_req_offset(adev), adev->nbio.funcs->get_hdp_flush_done_offset(adev), diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 89253df5ffc8..56a1f6d8c121 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -838,6 +838,45 @@ static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) return xcc - 1; } +/** + * gfx_v9_4_3_get_ref_and_mask - get the reference and mask for HDP flush + * + * @ring: amdgpu_ring structure holding ring information + * @ref_and_mask: pointer to store the reference and mask + * @reg_mem_engine: pointer to store the register memory engine + * + * Calculates the reference and mask for HDP flush based on the ring type and me. + */ +static void gfx_v9_4_3_get_ref_and_mask(struct amdgpu_ring *ring, + uint32_t *ref_and_mask, uint32_t *reg_mem_engine) +{ + if (!ring || !ref_and_mask || !reg_mem_engine) { + DRM_INFO("%s:invalid params\n", __func__); + return; + } + + struct amdgpu_device *adev = ring->adev; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { + switch (ring->me) { + case 1: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; + break; + case 2: + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + break; + default: + return; + } + *reg_mem_engine = 0; + } else { + *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; + *reg_mem_engine = 1; /* pfp */ + } +} + static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, @@ -848,6 +887,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp, + .get_ref_and_mask = &gfx_v9_4_3_get_ref_and_mask, }; static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, @@ -2818,25 +2858,13 @@ static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - switch (ring->me) { - case 1: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; - break; - case 2: - ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; - break; - default: - return; - } - reg_mem_engine = 0; - } else { - ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; - reg_mem_engine = 1; /* pfp */ + if (!adev->gfx.funcs->get_ref_and_mask) { + dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__); + return; } + adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, ®_mem_engine); gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1, adev->nbio.funcs->get_hdp_flush_req_offset(adev), adev->nbio.funcs->get_hdp_flush_done_offset(adev), -- 2.48.1
