Re-emit the unprocessed state after resetting the queue.

Signed-off-by: Alex Deucher <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c | 30 ++++++++++++++++++++++++--
 1 file changed, 28 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 3e036c37b1f5a..9d89bd1ed8075 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -802,11 +802,23 @@ static bool sdma_v7_0_check_soft_reset(struct 
amdgpu_ip_block *ip_block)
        return false;
 }
 
+static bool sdma_v7_0_is_queue_selected(struct amdgpu_device *adev,
+                                       uint32_t instance_id)
+{
+       /* we always use queue0 for KGD */
+       u32 context_status = RREG32(sdma_v7_0_get_reg_offset(adev, instance_id,
+                                                            
regSDMA0_QUEUE0_CONTEXT_STATUS));
+
+       /* Check if the SELECTED bit is set */
+       return (context_status & SDMA0_QUEUE0_CONTEXT_STATUS__SELECTED_MASK) != 
0;
+}
+
 static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring,
                                 unsigned int vmid,
                                 struct amdgpu_fence *guilty_fence)
 {
        struct amdgpu_device *adev = ring->adev;
+       bool is_guilty;
        int i, r;
 
        if (amdgpu_sriov_vf(adev))
@@ -822,6 +834,11 @@ static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring,
                return -EINVAL;
        }
 
+       is_guilty = sdma_v7_0_is_queue_selected(adev, i);
+
+       amdgpu_ring_backup_unprocessed_commands(ring,
+                                               is_guilty ? guilty_fence : 
NULL);
+
        r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
        if (r)
                return r;
@@ -829,8 +846,17 @@ static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring,
        r = sdma_v7_0_gfx_resume_instance(adev, i, true);
        if (r)
                return r;
-       amdgpu_fence_driver_force_completion(ring);
-       atomic_inc(&ring->adev->gpu_reset_counter);
+
+       if (is_guilty) {
+               /* signal the fence of the bad job */
+               amdgpu_fence_driver_guilty_force_completion(guilty_fence);
+               atomic_inc(&ring->adev->gpu_reset_counter);
+       }
+       r = amdgpu_ring_reemit_unprocessed_commands(ring);
+       if (r)
+               /* if we fail to reemit, force complete all fences */
+               amdgpu_fence_driver_force_completion(ring);
+
        return 0;
 }
 
-- 
2.49.0

Reply via email to