GDS doesn't exist in gfx12. The incomplete packet allows userspace to hang
the hw from the kernel.

Signed-off-by: Marek Olšák <[email protected]>
Acked-by: Christian König <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 16 ----------------
 1 file changed, 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index a638696b2142..ccb26f78252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -4108,21 +4108,6 @@ static void gfx_v12_0_ring_emit_ib_compute(struct 
amdgpu_ring *ring,
                /* inherit vmid from mqd */
                control |= 0x40000000;
 
-       /* Currently, there is a high possibility to get wave ID mismatch
-        * between ME and GDS, leading to a hw deadlock, because ME generates
-        * different wave IDs than the GDS expects. This situation happens
-        * randomly when at least 5 compute pipes use GDS ordered append.
-        * The wave IDs generated by ME are also wrong after suspend/resume.
-        * Those are probably bugs somewhere else in the kernel driver.
-        *
-        * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
-        * GDS to 0 for this ring (me/pipe).
-        */
-       if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
-       }
-
        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
        amdgpu_ring_write(ring,
@@ -4721,7 +4706,6 @@ static const struct amdgpu_ring_funcs 
gfx_v12_0_ring_funcs_gfx = {
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
                2 + /* VM_FLUSH */
                8 + /* FENCE for VM_FLUSH */
-               20 + /* GDS switch */
                5 + /* COND_EXEC */
                7 + /* HDP_flush */
                4 + /* VGT_flush */
-- 
2.34.1

Reply via email to