Am 24.10.2017 um 07:57 schrieb Monk Liu:
1,new imple names amdgpu_gpu_recover which gives more hint
on what it does compared with gpu_reset

2,gpu_recover unify bare-metal and SR-IOV, only the reset part
is implemented differently

3,gpu_recover will increase hang job karma and its context guilty
if exceeds limit, which will be canceled in run_job() routine of
gpu scheduler with an error "-ECANCELED" set.

4,gpu_recover knows if this reset comes with VRAM lost or not and
all jobs will be canceled in run_job() in sched recovery if VRAM lost hit.

Change-Id: If3c9380e38f68f73d7a1cdb3faa53e5aa33d1fc5
Signed-off-by: Monk Liu <[email protected]>

Need to take a close look at this one and #4-#6, but at first glance they looked good.

I would reorder the patches and submit #2 and #3 immediately and then resend the rest. And in general please split patches into changes to amd/amdgpu and am/scheduler.

Regards,
Christian.

---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h           |   4 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    | 252 ++++++++++++++++++++++++++
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c       |  21 ++-
  drivers/gpu/drm/amd/scheduler/gpu_scheduler.c |   4 +-
  4 files changed, 276 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6a4178b..5646e61 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -178,6 +178,10 @@ extern int amdgpu_cik_support;
  #define CIK_CURSOR_WIDTH 128
  #define CIK_CURSOR_HEIGHT 128
+/* GPU RESET flags */
+#define AMDGPU_RESET_INFO_VRAM_LOST  (1 << 0)
+#define AMDGPU_RESET_INFO_FULLRESET     (1 << 1)
+
  struct amdgpu_device;
  struct amdgpu_ib;
  struct amdgpu_cs_parser;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a07544d..159d3d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3116,6 +3116,258 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
        return r;
  }
+static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
+{
+       int r;
+       bool need_full_reset, vram_lost = 0;
+
+       need_full_reset = amdgpu_need_full_reset(adev);
+
+       if (!need_full_reset) {
+               amdgpu_pre_soft_reset(adev);
+               r = amdgpu_soft_reset(adev);
+               amdgpu_post_soft_reset(adev);
+               if (r || amdgpu_check_soft_reset(adev)) {
+                       DRM_INFO("soft reset failed, will fallback to full 
reset!\n");
+                       need_full_reset = true;
+               }
+
+       }
+
+       if (need_full_reset) {
+               r = amdgpu_suspend(adev);
+
+retry:
+               amdgpu_atombios_scratch_regs_save(adev);
+               r = amdgpu_asic_reset(adev);
+               amdgpu_atombios_scratch_regs_restore(adev);
+               /* post card */
+               amdgpu_atom_asic_init(adev->mode_info.atom_context);
+
+               if (!r) {
+                       dev_info(adev->dev, "GPU reset succeeded, trying to 
resume\n");
+                       r = amdgpu_resume_phase1(adev);
+                       if (r)
+                               goto out;
+
+                       vram_lost = amdgpu_check_vram_lost(adev);
+                       if (vram_lost) {
+                               DRM_ERROR("VRAM is lost!\n");
+                               atomic_inc(&adev->vram_lost_counter);
+                       }
+
+                       r = amdgpu_ttm_recover_gart(adev);
+                       if (r)
+                               goto out;
+
+                       r = amdgpu_resume_phase2(adev);
+                       if (r)
+                               goto out;
+
+                       if (vram_lost)
+                               amdgpu_fill_reset_magic(adev);
+               }
+       }
+
+out:
+       if (!r) {
+               amdgpu_irq_gpu_reset_resume_helper(adev);
+               r = amdgpu_ib_ring_tests(adev);
+               if (r) {
+                       dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+                       r = amdgpu_suspend(adev);
+                       need_full_reset = true;
+                       goto retry;
+               }
+       }
+
+       if (reset_flags) {
+               if (vram_lost)
+                       (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
+
+               if (need_full_reset)
+                       (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
+       }
+
+       return r;
+}
+
+static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t 
*reset_flags, bool from_hypervisor)
+{
+       int r;
+
+       if (from_hypervisor)
+               r = amdgpu_virt_request_full_gpu(adev, true);
+       else
+               r = amdgpu_virt_reset_gpu(adev);
+       if (r)
+               return r;
+
+       /* Resume IP prior to SMC */
+       r = amdgpu_sriov_reinit_early(adev);
+       if (r)
+               goto error;
+
+       /* we need recover gart prior to run SMC/CP/SDMA resume */
+       amdgpu_ttm_recover_gart(adev);
+
+       /* now we are okay to resume SMC/CP/SDMA */
+       r = amdgpu_sriov_reinit_late(adev);
+       if (r)
+               goto error;
+
+       amdgpu_irq_gpu_reset_resume_helper(adev);
+       r = amdgpu_ib_ring_tests(adev);
+       if (r)
+               dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", 
r);
+
+error:
+       /* release full control of GPU after ib test */
+       amdgpu_virt_release_full_gpu(adev, true);
+
+       if (reset_flags) {
+               /* will get vram_lost from GIM in future, now all
+                * reset request considered VRAM LOST
+                */
+               (*reset_flags) |= ~AMDGPU_RESET_INFO_VRAM_LOST;
+               atomic_inc(&adev->vram_lost_counter);
+
+               /* VF FLR or hotlink reset is always full-reset */
+               (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
+       }
+
+       return r;
+}
+
+int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
+{
+       struct drm_atomic_state *state = NULL;
+       uint64_t reset_flags = 0;
+       int i, r, resched;
+
+       if (!amdgpu_check_soft_reset(adev)) {
+               DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+               return 0;
+       }
+
+       dev_info(adev->dev, "GPU reset begin!\n");
+
+       mutex_lock(&adev->virt.lock_reset);
+       atomic_inc(&adev->gpu_reset_counter);
+       adev->in_sriov_reset = 1;
+
+       /* block TTM */
+       resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+       /* store modesetting */
+       if (amdgpu_device_has_dc_support(adev))
+               state = drm_atomic_helper_suspend(adev->ddev);
+
+       /* block scheduler */
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               struct amdgpu_ring *ring = adev->rings[i];
+
+               if (!ring || !ring->sched.thread)
+                       continue;
+
+               /* only focus on the ring hit timeout if &job not NULL */
+               if (job && job->ring->idx != i)
+                       continue;
+
+               kthread_park(ring->sched.thread);
+               amd_sched_hw_job_reset(&ring->sched, &job->base);
+
+               /* after all hw jobs are reset, hw fence is meaningless, so 
force_completion */
+               amdgpu_fence_driver_force_completion(ring);
+       }
+
+       if (amdgpu_sriov_vf(adev))
+               r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
+       else
+               r = amdgpu_reset(adev, &reset_flags);
+
+       if (!r) {
+               if ((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags 
& AMD_IS_APU)) {
+                       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+                       struct amdgpu_bo *bo, *tmp;
+                       struct dma_fence *fence = NULL, *next = NULL;
+
+                       DRM_INFO("recover vram bo from shadow\n");
+                       mutex_lock(&adev->shadow_list_lock);
+                       list_for_each_entry_safe(bo, tmp, &adev->shadow_list, 
shadow_list) {
+                               next = NULL;
+                               amdgpu_recover_vram_from_shadow(adev, ring, bo, 
&next);
+                               if (fence) {
+                                       r = dma_fence_wait(fence, false);
+                                       if (r) {
+                                               WARN(r, "recovery from shadow isn't 
completed\n");
+                                               break;
+                                       }
+                               }
+
+                               dma_fence_put(fence);
+                               fence = next;
+                       }
+                       mutex_unlock(&adev->shadow_list_lock);
+                       if (fence) {
+                               r = dma_fence_wait(fence, false);
+                               if (r)
+                                       WARN(r, "recovery from shadow isn't 
completed\n");
+                       }
+                       dma_fence_put(fence);
+               }
+
+               for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+                       struct amdgpu_ring *ring = adev->rings[i];
+
+                       if (!ring || !ring->sched.thread)
+                               continue;
+
+                       /* only focus on the ring hit timeout if &job not NULL 
*/
+                       if (job && job->ring->idx != i)
+                               continue;
+
+                       amd_sched_job_recovery(&ring->sched);
+                       kthread_unpark(ring->sched.thread);
+               }
+       } else {
+               for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+                       struct amdgpu_ring *ring = adev->rings[i];
+
+                       if (!ring || !ring->sched.thread)
+                               continue;
+
+                       /* only focus on the ring hit timeout if &job not NULL 
*/
+                       if (job && job->ring->idx != i)
+                               continue;
+
+                       kthread_unpark(adev->rings[i]->sched.thread);
+               }
+       }
+
+       if (amdgpu_device_has_dc_support(adev)) {
+               if (drm_atomic_helper_resume(adev->ddev, state))
+                       dev_info(adev->dev, "drm resume failed:%d\n", r);
+               amdgpu_dm_display_resume(adev);
+       } else {
+               drm_helper_resume_force_mode(adev->ddev);
+       }
+
+       ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+
+       if (r) {
+               /* bad news, how to tell it to userspace ? */
+               dev_info(adev->dev, "GPU reset(%d) failed\n", 
atomic_read(&adev->gpu_reset_counter));
+               amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+       } else {
+               dev_info(adev->dev, "GPU reset(%d) 
successed!\n",atomic_read(&adev->gpu_reset_counter));
+       }
+
+       amdgpu_vf_error_trans_all(adev);
+       adev->in_sriov_reset = 0;
+       mutex_unlock(&adev->virt.lock_reset);
+       return r;
+}
+
  void amdgpu_get_pcie_info(struct amdgpu_device *adev)
  {
        u32 mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index a58e3c5..bd8c7cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -177,6 +177,21 @@ static struct dma_fence *amdgpu_job_dependency(struct 
amd_sched_job *sched_job)
        return fence;
  }
+/* Guilty for jobs with a guilty user context,
+ * or jobs whose karma exceed the limit from a kernel context,
+ * or jobs that encountered VRAM LOST issue
+ */
+static bool amdgpu_job_need_canceled(struct amdgpu_job *job) {
+       if (job->base.s_entity->guilty && 
atomic_read(job->base.s_entity->guilty) == 1)
+               return true;
+       else if (atomic_read(&job->base.karma) > amdgpu_job_hang_limit)
+               return true;
+       else if (job->vram_lost_counter != 
atomic_read(&job->adev->vram_lost_counter))
+               return true;
+
+       return false;
+}
+
  static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
  {
        struct dma_fence *fence = NULL;
@@ -194,10 +209,10 @@ static struct dma_fence *amdgpu_job_run(struct 
amd_sched_job *sched_job)
        BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
-       /* skip ib schedule when vram is lost */
-       if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
+
+       if (amdgpu_job_need_canceled(job)) {
+               /* skip ib schedule if looks needed, and set error */
                dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
-               DRM_ERROR("Skip scheduling IBs!\n");
        } else {
                r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
                                       &fence);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9cbeade..72ead8d 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -525,7 +525,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
                                          r);
                        dma_fence_put(fence);
                } else {
-                       DRM_ERROR("Failed to run job!\n");
+                       /* fake signal schedule fence here */
                        amd_sched_process_job(NULL, &s_fence->cb);
                }
                spin_lock(&sched->job_list_lock);
@@ -664,7 +664,7 @@ static int amd_sched_main(void *param)
                                          r);
                        dma_fence_put(fence);
                } else {
-                       DRM_ERROR("Failed to run job!\n");
+                       DRM_INFO("job skipped\n");
                        amd_sched_process_job(NULL, &s_fence->cb);
                }


_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to