[AMD Official Use Only - AMD Internal Distribution Only]

Hi, Lijo.
-----------------------------------------------------------------------------------------------
It appears like gfx v9/9.4.3/10/11/12 all can be kept in some 
amdgpu_gfx_get_ref_mask generic helper, then it's not required to repeat the 
logic.

if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
        ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
        switch (ring->me) {
        case 1:
                *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
                break;
        case 2:
                *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
                break;
        default:
                return;
        }
        *reg_mem_engine = 0;
        return;
}

if (ring->funcs->type == AMDGPU_RING_TYPE_MES) {
        *ref_and_mask = nbio_hf_reg->ref_and_mask_cp8 << ring->pipe;
        *reg_mem_engine = 0;
} else {
        *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
        *reg_mem_engine = 1; /* pfp */
}
------------------------------------------------------------------------------

In gfx10, the ring->me of kiq is 2, this code can pass.
But in gfx11 and gfx12, the ring->me of kiq is 3, so this code logic does not 
work.

As the value of ring->me has changed with gfx version,
the code logic is not repeat, I suggest keep the origin way to assign 
ref_and_mask.


I accept your suggestion of add params check, alignment mismatch and delete the 
unused variable "usepfp".


Thanks,
Lijo



-----Original Message-----
From: Lazar, Lijo <[email protected]>
Sent: Tuesday, December 9, 2025 6:03 PM
To: Li, Chong(Alan) <[email protected]>; [email protected]
Cc: Deng, Emily <[email protected]>
Subject: Re: [PATCH v3] drm/amdgpu: fix mes packet params issue when flush hdp.



On 12/9/2025 1:08 PM, chong li wrote:
> v3:
> Unify the get_ref_and_mask function in amdgpu_gfx_funcs, to support
> both GFX11 and earlier generations
>
> v2:
> place "get_ref_and_mask" in amdgpu_gfx_funcs instead of amdgpu_ring,
> since this function only assigns the cp entry.
>
> v1:
> both gfx ring and mes ring use cp0 to flush hdp, cause conflict.
>
> use function get_ref_and_mask to assign the cp entry.
> reassign mes to use cp8 instead.
>
> Signed-off-by: chong li <[email protected]>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  2 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 13 +++++-
>   drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c  | 54 +++++++++++++++--------
>   drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c  | 58 +++++++++++++++++--------
>   drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c  | 58 +++++++++++++++++--------
>   drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c   | 55 +++++++++++++++--------
>   drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 55 +++++++++++++++--------
>   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 54 +++++++++++++++--------
>   drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 54 +++++++++++++++--------
>   9 files changed, 275 insertions(+), 128 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> index efd61a1ccc66..090714127cba 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> @@ -356,6 +356,8 @@ struct amdgpu_gfx_funcs {
>                                    int num_xccs_per_xcp);
>       int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node);
>       int (*get_xccs_per_xcp)(struct amdgpu_device *adev);
> +     void (*get_ref_and_mask)(struct amdgpu_ring *ring,
> +                             uint32_t *ref_and_mask, uint32_t 
> *reg_mem_engine);
>   };
>
>   struct sq_work {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
> index 895b841b9626..5c7724f203d0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
> @@ -556,11 +556,20 @@ int amdgpu_mes_reg_write_reg_wait(struct
> amdgpu_device *adev,
>
>   int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
>   {
> -     uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
> +     uint32_t hdp_flush_req_offset, hdp_flush_done_offset;
> +     struct amdgpu_ring *mes_ring;
> +     uint32_t ref_and_mask = 0, reg_mem_engine = 0;
>
> +     if (!adev->gfx.funcs->get_ref_and_mask) {
> +             dev_err(adev->dev, "amdgpu_mes_hdp_flush not support\n");
> +             return -EINVAL;
> +     }
> +
> +     mes_ring = &adev->mes.ring[0];
>       hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
>       hdp_flush_done_offset = 
> adev->nbio.funcs->get_hdp_flush_done_offset(adev);
> -     ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
> +
> +     adev->gfx.funcs->get_ref_and_mask(mes_ring, &ref_and_mask,
> +&reg_mem_engine);
>
>       return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, 
> hdp_flush_done_offset,
>                                            ref_and_mask, ref_and_mask, 0); 
> diff --git
> a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> index aaed24f7e716..ed79ceafc57b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> @@ -4566,6 +4566,40 @@ static void gfx_v10_0_update_perfmon_mgcg(struct 
> amdgpu_device *adev,
>               WREG32_SOC15(GC, 0, mmRLC_PERFMON_CLK_CNTL, data);
>   }
>
> +/**
> + * gfx_v10_0_get_ref_and_mask - get the reference and mask for HDP
> +flush
> + *
> + * @ring: amdgpu_ring structure holding ring information
> + * @ref_and_mask: pointer to store the reference and mask
> + * @reg_mem_engine: pointer to store the register memory engine
> + *
> + * Calculates the reference and mask for HDP flush based on the ring type 
> and me.
> + */
> +static void gfx_v10_0_get_ref_and_mask(struct amdgpu_ring *ring,
> +                                     uint32_t *ref_and_mask, uint32_t 
> *reg_mem_engine) {
> +     struct amdgpu_device *adev = ring->adev;
> +     const struct nbio_hdp_flush_reg *nbio_hf_reg =
> +adev->nbio.hdp_flush_reg;
> +

Need to do NULL check of params (this and others).

> +     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
> +             ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
> +             switch (ring->me) {
> +             case 1:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> +                     break;
> +             case 2:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> +                     break;
> +             default:
> +                     return;
> +             }
> +             *reg_mem_engine = 0;
> +     } else {
> +             *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
> +             *reg_mem_engine = 1; /* pfp */
> +     }
> +}
> +
>   static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
>       .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter,
>       .select_se_sh = &gfx_v10_0_select_se_sh, @@ -4575,6 +4609,7 @@
> static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
>       .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
>       .init_spm_golden = &gfx_v10_0_init_spm_golden_registers,
>       .update_perfmon_mgcg = &gfx_v10_0_update_perfmon_mgcg,
> +     .get_ref_and_mask = &gfx_v10_0_get_ref_and_mask,
>   };
>
>   static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) @@
> -8614,25 +8649,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct 
> amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
>       u32 ref_and_mask, reg_mem_engine;
> -     const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
> -
> -     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
> -             switch (ring->me) {
> -             case 1:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> -                     break;
> -             case 2:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> -                     break;
> -             default:
> -                     return;
> -             }
> -             reg_mem_engine = 0;
> -     } else {
> -             ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
> -             reg_mem_engine = 1; /* pfp */
> -     }
>
> +     adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask,
> +&reg_mem_engine);
>       gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
>                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
>                              
> adev->nbio.funcs->get_hdp_flush_done_offset(adev),
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
> index f4d4dd5dd07b..c3d8e7588740 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
> @@ -1072,6 +1072,44 @@ static int gfx_v11_0_get_gfx_shadow_info(struct 
> amdgpu_device *adev,
>       }
>   }
>
> +/**
> + * gfx_v11_0_get_ref_and_mask - get the reference and mask for HDP
> +flush
> + *
> + * @ring: amdgpu_ring structure holding ring information
> + * @ref_and_mask: pointer to store the reference and mask
> + * @reg_mem_engine: pointer to store the register memory engine
> + *
> + * Calculates the reference and mask for HDP flush based on the ring type 
> and me.
> + */
> +static void gfx_v11_0_get_ref_and_mask(struct amdgpu_ring *ring,
> +                                     uint32_t *ref_and_mask, uint32_t 
> *reg_mem_engine) {
> +     struct amdgpu_device *adev = ring->adev;
> +     const struct nbio_hdp_flush_reg *nbio_hf_reg =
> +adev->nbio.hdp_flush_reg;
> +
> +     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
> +         ring->funcs->type == AMDGPU_RING_TYPE_MES ||
> +             ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
> +             switch (ring->me) {
> +             case 1:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> +                     break;
> +             case 2:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> +                     break;
> +             case 3:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp8 << 
> ring->pipe;
> +                     break;
> +             default:
> +                     return;
> +             }
> +             *reg_mem_engine = 0;
> +     } else {
> +             *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
> +             *reg_mem_engine = 1; /* pfp */
> +     }
> +}
> +
>   static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
>       .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
>       .select_se_sh = &gfx_v11_0_select_se_sh, @@ -1081,6 +1119,7 @@
> static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
>       .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
>       .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
>       .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
> +     .get_ref_and_mask = &gfx_v11_0_get_ref_and_mask,
>   };
>
>   static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) @@
> -5833,25 +5872,8 @@ static void gfx_v11_0_ring_emit_hdp_flush(struct 
> amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
>       u32 ref_and_mask, reg_mem_engine;
> -     const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
> -
> -     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
> -             switch (ring->me) {
> -             case 1:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> -                     break;
> -             case 2:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> -                     break;
> -             default:
> -                     return;
> -             }
> -             reg_mem_engine = 0;
> -     } else {
> -             ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
> -             reg_mem_engine = 1; /* pfp */
> -     }
>
> +     adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask,
> +&reg_mem_engine);
>       gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
>                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
>                              
> adev->nbio.funcs->get_hdp_flush_done_offset(adev),
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
> index f9cae6666697..b805ed4f88aa 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
> @@ -929,6 +929,44 @@ static int gfx_v12_0_get_gfx_shadow_info(struct 
> amdgpu_device *adev,
>       return -EINVAL;
>   }
>
> +/**
> + * gfx_v12_0_get_ref_and_mask - get the reference and mask for HDP
> +flush
> + *
> + * @ring: amdgpu_ring structure holding ring information
> + * @ref_and_mask: pointer to store the reference and mask
> + * @reg_mem_engine: pointer to store the register memory engine
> + *
> + * Calculates the reference and mask for HDP flush based on the ring type 
> and me.
> + */
> +static void gfx_v12_0_get_ref_and_mask(struct amdgpu_ring *ring,
> +                                     uint32_t *ref_and_mask, uint32_t 
> *reg_mem_engine) {
> +     struct amdgpu_device *adev = ring->adev;
> +     const struct nbio_hdp_flush_reg *nbio_hf_reg =
> +adev->nbio.hdp_flush_reg;
> +
> +     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
> +         ring->funcs->type == AMDGPU_RING_TYPE_MES ||
> +             ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
> +             switch (ring->me) {
> +             case 1:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> +                     break;
> +             case 2:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> +                     break;
> +             case 3:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp8 << 
> ring->pipe;
> +                     break;
> +             default:
> +                     return;
> +             }
> +             *reg_mem_engine = 0;
> +     } else {
> +             *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
> +             *reg_mem_engine = 1; /* pfp */
> +     }
> +}
> +
>   static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
>       .get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter,
>       .select_se_sh = &gfx_v12_0_select_se_sh, @@ -938,6 +976,7 @@ static
> const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
>       .select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
>       .update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
>       .get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
> +     .get_ref_and_mask = &gfx_v12_0_get_ref_and_mask,
>   };
>
>   static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev) @@
> -4389,25 +4428,8 @@ static void gfx_v12_0_ring_emit_hdp_flush(struct 
> amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
>       u32 ref_and_mask, reg_mem_engine;
> -     const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
> -
> -     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
> -             switch (ring->me) {
> -             case 1:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> -                     break;
> -             case 2:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> -                     break;
> -             default:
> -                     return;
> -             }
> -             reg_mem_engine = 0;
> -     } else {
> -             ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
> -             reg_mem_engine = 1; /* pfp */
> -     }
>
> +     adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask,
> +&reg_mem_engine);
>       gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
>                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
>                              
> adev->nbio.funcs->get_hdp_flush_done_offset(adev),
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> index 66a4e4998106..b3ea45e3c60f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> @@ -2068,23 +2068,10 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring 
> *ring)
>   static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
>   {
>       u32 ref_and_mask;
> -     int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
> -
> -     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
> -             switch (ring->me) {
> -             case 1:
> -                     ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << 
> ring->pipe;
> -                     break;
> -             case 2:
> -                     ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << 
> ring->pipe;
> -                     break;
> -             default:
> -                     return;
> -             }
> -     } else {
> -             ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
> -     }
> +     int usepfp;
> +     struct amdgpu_device *adev = ring->adev;
>
> +     adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask, &usepfp);
>       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
>       amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, 
> write */
>                                WAIT_REG_MEM_FUNCTION(3) |  /* == */ @@ 
> -4075,12 +4062,46 @@
> static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
>       cik_srbm_select(adev, me, pipe, q, vm);
>   }
>
> +/**
> + * gfx_v7_0_get_ref_and_mask - get the reference and mask for HDP
> +flush
> + *
> + * @ring: amdgpu_ring structure holding ring information
> + * @ref_and_mask: pointer to store the reference and mask
> + * @reg_mem_engine: pointer to store the register memory engine
> + *
> + * Calculates the reference and mask for HDP flush based on the ring type 
> and me.
> + */
> +static void gfx_v7_0_get_ref_and_mask(struct amdgpu_ring *ring,
> +                                     uint32_t *ref_and_mask, uint32_t 
> *reg_mem_engine) {
> +     int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;

This doesn't look used inside this function.

> +
> +     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
> +             ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
> +             switch (ring->me) {
> +             case 1:
> +                     *ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << 
> ring->pipe;
> +                     break;
> +             case 2:
> +                     *ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << 
> ring->pipe;
> +                     break;
> +             default:
> +                     return;
> +             }
> +             *reg_mem_engine = 0;
> +     } else {
> +             *ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
> +             *reg_mem_engine = 1;
> +     }
> +}
> +
>   static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
>       .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
>       .select_se_sh = &gfx_v7_0_select_se_sh,
>       .read_wave_data = &gfx_v7_0_read_wave_data,
>       .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
> -     .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
> +     .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q,
> +     .get_ref_and_mask = &gfx_v7_0_get_ref_and_mask,
>   };
>
>   static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { diff
> --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> index 5d6e8e0601cb..cc5acfcdf360 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> @@ -5211,13 +5211,46 @@ static void gfx_v8_0_read_wave_sgprs(struct 
> amdgpu_device *adev, uint32_t xcc_id
>               start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
>   }
>
> +/**
> + * gfx_v8_0_get_ref_and_mask - get the reference and mask for HDP
> +flush
> + *
> + * @ring: amdgpu_ring structure holding ring information
> + * @ref_and_mask: pointer to store the reference and mask
> + * @reg_mem_engine: pointer to store the register memory engine
> + *
> + * Calculates the reference and mask for HDP flush based on the ring type 
> and me.
> + */
> +static void gfx_v8_0_get_ref_and_mask(struct amdgpu_ring *ring,
> +                                     uint32_t *ref_and_mask, uint32_t 
> *reg_mem_engine) {
> +     struct amdgpu_device *adev = ring->adev;
> +
> +     if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
> +         (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
> +             switch (ring->me) {
> +             case 1:
> +                     *ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << 
> ring->pipe;
> +                     break;
> +             case 2:
> +                     *ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << 
> ring->pipe;
> +                     break;
> +             default:
> +                     return;
> +             }
> +             *reg_mem_engine = 0;
> +     } else {
> +             *ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
> +             *reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
> +     }
> +}
>
>   static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
>       .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
>       .select_se_sh = &gfx_v8_0_select_se_sh,
>       .read_wave_data = &gfx_v8_0_read_wave_data,
>       .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
> -     .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
> +     .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q,
> +     .get_ref_and_mask = &gfx_v8_0_get_ref_and_mask,
>   };
>
>   static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block) @@
> -6000,25 +6033,9 @@ static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring 
> *ring)
>   static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
>   {
>       u32 ref_and_mask, reg_mem_engine;
> +     struct amdgpu_device *adev = ring->adev;
>
> -     if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
> -         (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
> -             switch (ring->me) {
> -             case 1:
> -                     ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << 
> ring->pipe;
> -                     break;
> -             case 2:
> -                     ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << 
> ring->pipe;
> -                     break;
> -             default:
> -                     return;
> -             }
> -             reg_mem_engine = 0;
> -     } else {
> -             ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
> -             reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
> -     }
> -
> +     adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask,
> +&reg_mem_engine);
>       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
>       amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, 
> write */
>                                WAIT_REG_MEM_FUNCTION(3) |  /* == */ diff --git
> a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index e6187be27385..f2ebacc73eb2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -1997,6 +1997,40 @@ static void gfx_v9_0_select_me_pipe_q(struct 
> amdgpu_device *adev,
>       soc15_grbm_select(adev, me, pipe, q, vm, 0);
>   }
>
> +/**
> + * gfx_v9_0_get_ref_and_mask - get the reference and mask for HDP
> +flush
> + *
> + * @ring: amdgpu_ring structure holding ring information
> + * @ref_and_mask: pointer to store the reference and mask
> + * @reg_mem_engine: pointer to store the register memory engine
> + *
> + * Calculates the reference and mask for HDP flush based on the ring type 
> and me.
> + */
> +static void gfx_v9_0_get_ref_and_mask(struct amdgpu_ring *ring,
> +                                     uint32_t *ref_and_mask, uint32_t 
> *reg_mem_engine) {
> +     struct amdgpu_device *adev = ring->adev;
> +     const struct nbio_hdp_flush_reg *nbio_hf_reg =
> +adev->nbio.hdp_flush_reg;
> +
> +     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
> +             ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
> +             switch (ring->me) {
> +             case 1:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> +                     break;
> +             case 2:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> +                     break;
> +             default:
> +                     return;
> +             }
> +             *reg_mem_engine = 0;
> +     } else {
> +             *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
> +             *reg_mem_engine = 1; /* pfp */
> +     }
> +}
> +
>   static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
>           .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
>           .select_se_sh = &gfx_v9_0_select_se_sh, @@ -2004,6 +2038,7
> @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
>           .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
>           .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
>           .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
> +             .get_ref_and_mask = &gfx_v9_0_get_ref_and_mask,

Alignment mismatch?

>   };
>
>   const struct amdgpu_ras_block_hw_ops  gfx_v9_0_ras_ops = { @@
> -5380,25 +5415,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct 
> amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
>       u32 ref_and_mask, reg_mem_engine;
> -     const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
> -
> -     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
> -             switch (ring->me) {
> -             case 1:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> -                     break;
> -             case 2:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> -                     break;
> -             default:
> -                     return;
> -             }
> -             reg_mem_engine = 0;
> -     } else {
> -             ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
> -             reg_mem_engine = 1; /* pfp */
> -     }
>
> +     adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask,
> +&reg_mem_engine);
>       gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
>                             adev->nbio.funcs->get_hdp_flush_req_offset(adev),
>                             adev->nbio.funcs->get_hdp_flush_done_offset(adev),
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
> index 89253df5ffc8..b4ba76110c34 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
> @@ -838,6 +838,40 @@ static int gfx_v9_4_3_ih_to_xcc_inst(struct 
> amdgpu_device *adev, int ih_node)
>       return xcc - 1;
>   }
>
> +/**
> + * gfx_v9_4_3_get_ref_and_mask - get the reference and mask for HDP
> +flush
> + *
> + * @ring: amdgpu_ring structure holding ring information
> + * @ref_and_mask: pointer to store the reference and mask
> + * @reg_mem_engine: pointer to store the register memory engine
> + *
> + * Calculates the reference and mask for HDP flush based on the ring type 
> and me.
> + */
> +static void gfx_v9_4_3_get_ref_and_mask(struct amdgpu_ring *ring,
> +                                     uint32_t *ref_and_mask, uint32_t 
> *reg_mem_engine) {
> +     struct amdgpu_device *adev = ring->adev;
> +     const struct nbio_hdp_flush_reg *nbio_hf_reg =
> +adev->nbio.hdp_flush_reg;
> +
> +     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
> +             ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
> +             switch (ring->me) {
> +             case 1:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> +                     break;
> +             case 2:
> +                     *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> +                     break;
> +             default:
> +                     return;
> +             }
> +             *reg_mem_engine = 0;
> +     } else {
> +             *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
> +             *reg_mem_engine = 1; /* pfp */
> +     }
> +}
> +
>   static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
>       .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
>       .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, @@ -848,6 +882,7 @@
> static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
>       .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
>       .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
>       .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
> +     .get_ref_and_mask = &gfx_v9_4_3_get_ref_and_mask,
>   };
>
>   static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, @@
> -2818,25 +2853,8 @@ static void gfx_v9_4_3_ring_emit_hdp_flush(struct 
> amdgpu_ring *ring)
>   {
>       struct amdgpu_device *adev = ring->adev;
>       u32 ref_and_mask, reg_mem_engine;
> -     const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
> -
> -     if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
> -             switch (ring->me) {
> -             case 1:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << 
> ring->pipe;
> -                     break;
> -             case 2:
> -                     ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << 
> ring->pipe;
> -                     break;
> -             default:
> -                     return;
> -             }
> -             reg_mem_engine = 0;
> -     } else {
> -             ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
> -             reg_mem_engine = 1; /* pfp */
> -     }
>

It appears like gfx v9/9.4.3/10/11/12 all can be kept in some 
amdgpu_gfx_get_ref_mask generic helper, then it's not required to repeat the 
logic.

if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
        ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
        switch (ring->me) {
        case 1:
                *ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
                break;
        case 2:
                *ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
                break;
        default:
                return;
        }
        *reg_mem_engine = 0;
        return;
}

if (ring->funcs->type == AMDGPU_RING_TYPE_MES) {
        *ref_and_mask = nbio_hf_reg->ref_and_mask_cp8 << ring->pipe;
        *reg_mem_engine = 0;
} else {
        *ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
        *reg_mem_engine = 1; /* pfp */
}


Thanks,
Lijo

> +     adev->gfx.funcs->get_ref_and_mask(ring, &ref_and_mask,
> +&reg_mem_engine);
>       gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
>                             adev->nbio.funcs->get_hdp_flush_req_offset(adev),
>                             adev->nbio.funcs->get_hdp_flush_done_offset(adev),


Reply via email to