Re: [PATCH] drm/panthor: Kill the faulty_slots variable in panthor_sched_suspend()

2024-05-02 Thread Boris Brezillon
On Thu, 25 Apr 2024 12:18:29 +0100
Steven Price  wrote:

> On 25/04/2024 11:39, Boris Brezillon wrote:
> > We can use upd_ctx.timedout_mask directly, and the faulty_slots update
> > in the flush_caches_failed situation is never used.
> > 
> > Suggested-by: Suggested-by: Steven Price   
> 
> I'm obviously too full of suggestions! ;)

Pushed to drm-misc-next-fixes, but I realize I forgot to drop the extra
Suggested-by. Oh well.

> 
> And you're doing a much better job of my todo list than I am!
> 
> > Signed-off-by: Boris Brezillon   
> 
> Reviewed-by: Steven Price 
> 
> > ---
> >  drivers/gpu/drm/panthor/panthor_sched.c | 10 +++---
> >  1 file changed, 3 insertions(+), 7 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
> > b/drivers/gpu/drm/panthor/panthor_sched.c
> > index fad4678ca4c8..fed28c16d5d1 100644
> > --- a/drivers/gpu/drm/panthor/panthor_sched.c
> > +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> > @@ -2584,8 +2584,8 @@ void panthor_sched_suspend(struct panthor_device 
> > *ptdev)
> >  {
> > struct panthor_scheduler *sched = ptdev->scheduler;
> > struct panthor_csg_slots_upd_ctx upd_ctx;
> > -   u32 suspended_slots, faulty_slots;
> > struct panthor_group *group;
> > +   u32 suspended_slots;
> > u32 i;
> >  
> > mutex_lock(&sched->lock);
> > @@ -2605,10 +2605,9 @@ void panthor_sched_suspend(struct panthor_device 
> > *ptdev)
> >  
> > csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
> > suspended_slots &= ~upd_ctx.timedout_mask;
> > -   faulty_slots = upd_ctx.timedout_mask;
> >  
> > -   if (faulty_slots) {
> > -   u32 slot_mask = faulty_slots;
> > +   if (upd_ctx.timedout_mask) {
> > +   u32 slot_mask = upd_ctx.timedout_mask;
> >  
> > drm_err(&ptdev->base, "CSG suspend failed, escalating to 
> > termination");
> > csgs_upd_ctx_init(&upd_ctx);
> > @@ -2659,9 +2658,6 @@ void panthor_sched_suspend(struct panthor_device 
> > *ptdev)
> >  
> > slot_mask &= ~BIT(csg_id);
> > }
> > -
> > -   if (flush_caches_failed)
> > -   faulty_slots |= suspended_slots;
> > }
> >  
> > for (i = 0; i < sched->csg_slot_count; i++) {  
> 



Re: [PATCH] drm/panthor: Kill the faulty_slots variable in panthor_sched_suspend()

2024-04-26 Thread Liviu Dudau
On Thu, Apr 25, 2024 at 12:39:20PM +0200, Boris Brezillon wrote:
> We can use upd_ctx.timedout_mask directly, and the faulty_slots update
> in the flush_caches_failed situation is never used.
> 
> Suggested-by: Suggested-by: Steven Price 
> Signed-off-by: Boris Brezillon 

Reviewed-by: Liviu Dudau 

Best regards,
Liviu

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 10 +++---
>  1 file changed, 3 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
> b/drivers/gpu/drm/panthor/panthor_sched.c
> index fad4678ca4c8..fed28c16d5d1 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2584,8 +2584,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  {
>   struct panthor_scheduler *sched = ptdev->scheduler;
>   struct panthor_csg_slots_upd_ctx upd_ctx;
> - u32 suspended_slots, faulty_slots;
>   struct panthor_group *group;
> + u32 suspended_slots;
>   u32 i;
>  
>   mutex_lock(&sched->lock);
> @@ -2605,10 +2605,9 @@ void panthor_sched_suspend(struct panthor_device 
> *ptdev)
>  
>   csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
>   suspended_slots &= ~upd_ctx.timedout_mask;
> - faulty_slots = upd_ctx.timedout_mask;
>  
> - if (faulty_slots) {
> - u32 slot_mask = faulty_slots;
> + if (upd_ctx.timedout_mask) {
> + u32 slot_mask = upd_ctx.timedout_mask;
>  
>   drm_err(&ptdev->base, "CSG suspend failed, escalating to 
> termination");
>   csgs_upd_ctx_init(&upd_ctx);
> @@ -2659,9 +2658,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  
>   slot_mask &= ~BIT(csg_id);
>   }
> -
> - if (flush_caches_failed)
> - faulty_slots |= suspended_slots;
>   }
>  
>   for (i = 0; i < sched->csg_slot_count; i++) {
> -- 
> 2.44.0
> 

-- 

| I would like to |
| fix the world,  |
| but they're not |
| giving me the   |
 \ source code!  /
  ---
¯\_(ツ)_/¯


Re: [PATCH] drm/panthor: Kill the faulty_slots variable in panthor_sched_suspend()

2024-04-25 Thread Steven Price
On 25/04/2024 11:39, Boris Brezillon wrote:
> We can use upd_ctx.timedout_mask directly, and the faulty_slots update
> in the flush_caches_failed situation is never used.
> 
> Suggested-by: Suggested-by: Steven Price 

I'm obviously too full of suggestions! ;)

And you're doing a much better job of my todo list than I am!

> Signed-off-by: Boris Brezillon 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 10 +++---
>  1 file changed, 3 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
> b/drivers/gpu/drm/panthor/panthor_sched.c
> index fad4678ca4c8..fed28c16d5d1 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2584,8 +2584,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  {
>   struct panthor_scheduler *sched = ptdev->scheduler;
>   struct panthor_csg_slots_upd_ctx upd_ctx;
> - u32 suspended_slots, faulty_slots;
>   struct panthor_group *group;
> + u32 suspended_slots;
>   u32 i;
>  
>   mutex_lock(&sched->lock);
> @@ -2605,10 +2605,9 @@ void panthor_sched_suspend(struct panthor_device 
> *ptdev)
>  
>   csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
>   suspended_slots &= ~upd_ctx.timedout_mask;
> - faulty_slots = upd_ctx.timedout_mask;
>  
> - if (faulty_slots) {
> - u32 slot_mask = faulty_slots;
> + if (upd_ctx.timedout_mask) {
> + u32 slot_mask = upd_ctx.timedout_mask;
>  
>   drm_err(&ptdev->base, "CSG suspend failed, escalating to 
> termination");
>   csgs_upd_ctx_init(&upd_ctx);
> @@ -2659,9 +2658,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  
>   slot_mask &= ~BIT(csg_id);
>   }
> -
> - if (flush_caches_failed)
> - faulty_slots |= suspended_slots;
>   }
>  
>   for (i = 0; i < sched->csg_slot_count; i++) {



Re: [PATCH] drm/panthor: Kill the faulty_slots variable in panthor_sched_suspend()

2024-04-25 Thread Erik Faye-Lund
On Thu, 2024-04-25 at 12:39 +0200, Boris Brezillon wrote:
> We can use upd_ctx.timedout_mask directly, and the faulty_slots
> update
> in the flush_caches_failed situation is never used.
> 
> Suggested-by: Suggested-by: Steven Price 

Whoops? :)

> Signed-off-by: Boris Brezillon 
> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 10 +++---
>  1 file changed, 3 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c
> b/drivers/gpu/drm/panthor/panthor_sched.c
> index fad4678ca4c8..fed28c16d5d1 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2584,8 +2584,8 @@ void panthor_sched_suspend(struct
> panthor_device *ptdev)
>  {
>   struct panthor_scheduler *sched = ptdev->scheduler;
>   struct panthor_csg_slots_upd_ctx upd_ctx;
> - u32 suspended_slots, faulty_slots;
>   struct panthor_group *group;
> + u32 suspended_slots;
>   u32 i;
>  
>   mutex_lock(&sched->lock);
> @@ -2605,10 +2605,9 @@ void panthor_sched_suspend(struct
> panthor_device *ptdev)
>  
>   csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
>   suspended_slots &= ~upd_ctx.timedout_mask;
> - faulty_slots = upd_ctx.timedout_mask;
>  
> - if (faulty_slots) {
> - u32 slot_mask = faulty_slots;
> + if (upd_ctx.timedout_mask) {
> + u32 slot_mask = upd_ctx.timedout_mask;
>  
>   drm_err(&ptdev->base, "CSG suspend failed,
> escalating to termination");
>   csgs_upd_ctx_init(&upd_ctx);
> @@ -2659,9 +2658,6 @@ void panthor_sched_suspend(struct
> panthor_device *ptdev)
>  
>   slot_mask &= ~BIT(csg_id);
>   }
> -
> - if (flush_caches_failed)
> - faulty_slots |= suspended_slots;
>   }
>  
>   for (i = 0; i < sched->csg_slot_count; i++) {



[PATCH] drm/panthor: Kill the faulty_slots variable in panthor_sched_suspend()

2024-04-25 Thread Boris Brezillon
We can use upd_ctx.timedout_mask directly, and the faulty_slots update
in the flush_caches_failed situation is never used.

Suggested-by: Suggested-by: Steven Price 
Signed-off-by: Boris Brezillon 
---
 drivers/gpu/drm/panthor/panthor_sched.c | 10 +++---
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
b/drivers/gpu/drm/panthor/panthor_sched.c
index fad4678ca4c8..fed28c16d5d1 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -2584,8 +2584,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
 {
struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_csg_slots_upd_ctx upd_ctx;
-   u32 suspended_slots, faulty_slots;
struct panthor_group *group;
+   u32 suspended_slots;
u32 i;
 
mutex_lock(&sched->lock);
@@ -2605,10 +2605,9 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
 
csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
suspended_slots &= ~upd_ctx.timedout_mask;
-   faulty_slots = upd_ctx.timedout_mask;
 
-   if (faulty_slots) {
-   u32 slot_mask = faulty_slots;
+   if (upd_ctx.timedout_mask) {
+   u32 slot_mask = upd_ctx.timedout_mask;
 
drm_err(&ptdev->base, "CSG suspend failed, escalating to 
termination");
csgs_upd_ctx_init(&upd_ctx);
@@ -2659,9 +2658,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
 
slot_mask &= ~BIT(csg_id);
}
-
-   if (flush_caches_failed)
-   faulty_slots |= suspended_slots;
}
 
for (i = 0; i < sched->csg_slot_count; i++) {
-- 
2.44.0