Re: [PATCH v6 11/16] drm/sched: Make timeout timer rearm conditional.

2021-05-11 Thread Christian König

Am 10.05.21 um 18:36 schrieb Andrey Grodzovsky:

We don't want to rearm the timer if driver hook reports
that the device is gone.

v5: Update drm_gpu_sched_stat values in code.

Signed-off-by: Andrey Grodzovsky 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/scheduler/sched_main.c | 11 +++
  1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index f4f474944169..8d1211e87101 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -314,6 +314,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
  {
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
+   enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
  
  	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
  
@@ -331,7 +332,7 @@ static void drm_sched_job_timedout(struct work_struct *work)

list_del_init(>list);
spin_unlock(>job_list_lock);
  
-		job->sched->ops->timedout_job(job);

+   status = job->sched->ops->timedout_job(job);
  
  		/*

 * Guilty job did complete and hence needs to be manually 
removed
@@ -345,9 +346,11 @@ static void drm_sched_job_timedout(struct work_struct 
*work)
spin_unlock(>job_list_lock);
}
  
-	spin_lock(>job_list_lock);

-   drm_sched_start_timeout(sched);
-   spin_unlock(>job_list_lock);
+   if (status != DRM_GPU_SCHED_STAT_ENODEV) {
+   spin_lock(>job_list_lock);
+   drm_sched_start_timeout(sched);
+   spin_unlock(>job_list_lock);
+   }
  }
  
   /**




[PATCH v6 11/16] drm/sched: Make timeout timer rearm conditional.

2021-05-10 Thread Andrey Grodzovsky
We don't want to rearm the timer if driver hook reports
that the device is gone.

v5: Update drm_gpu_sched_stat values in code.

Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/scheduler/sched_main.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index f4f474944169..8d1211e87101 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -314,6 +314,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 {
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
+   enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
 
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
 
@@ -331,7 +332,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
list_del_init(>list);
spin_unlock(>job_list_lock);
 
-   job->sched->ops->timedout_job(job);
+   status = job->sched->ops->timedout_job(job);
 
/*
 * Guilty job did complete and hence needs to be manually 
removed
@@ -345,9 +346,11 @@ static void drm_sched_job_timedout(struct work_struct 
*work)
spin_unlock(>job_list_lock);
}
 
-   spin_lock(>job_list_lock);
-   drm_sched_start_timeout(sched);
-   spin_unlock(>job_list_lock);
+   if (status != DRM_GPU_SCHED_STAT_ENODEV) {
+   spin_lock(>job_list_lock);
+   drm_sched_start_timeout(sched);
+   spin_unlock(>job_list_lock);
+   }
 }
 
  /**
-- 
2.25.1