The function panthor_fw_unplug() will free the FW memory sections.
The problem is that there could still be pending FW events which are yet
not handled at this point. process_fw_events_work() can in this case try
to access said freed memory.

This fix introduces a destroyed state for the panthor_scheduler object,
and we check for this before processing FW events.

Signed-off-by: Ketil Johnsen <[email protected]>
Fixes: de85488138247 ("drm/panthor: Add the scheduler logical block")
---
v2:
- Followed Boris's advice and handle the race purely within the
  scheduler block (by adding a destroyed state)
---
 drivers/gpu/drm/panthor/panthor_sched.c | 15 ++++++++++++---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
b/drivers/gpu/drm/panthor/panthor_sched.c
index 0cc9055f4ee52..4996f987b8183 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -315,6 +315,13 @@ struct panthor_scheduler {
                 */
                struct list_head stopped_groups;
        } reset;
+
+       /**
+        * @destroyed: Scheduler object is (being) destroyed
+        *
+        * Normal scheduler operations should no longer take place.
+        */
+       bool destroyed;
 };
 
 /**
@@ -1765,7 +1772,10 @@ static void process_fw_events_work(struct work_struct 
*work)
        u32 events = atomic_xchg(&sched->fw_events, 0);
        struct panthor_device *ptdev = sched->ptdev;
 
-       mutex_lock(&sched->lock);
+       guard(mutex)(&sched->lock);
+
+       if (sched->destroyed)
+               return;
 
        if (events & JOB_INT_GLOBAL_IF) {
                sched_process_global_irq_locked(ptdev);
@@ -1778,8 +1788,6 @@ static void process_fw_events_work(struct work_struct 
*work)
                sched_process_csg_irq_locked(ptdev, csg_id);
                events &= ~BIT(csg_id);
        }
-
-       mutex_unlock(&sched->lock);
 }
 
 /**
@@ -3882,6 +3890,7 @@ void panthor_sched_unplug(struct panthor_device *ptdev)
        cancel_delayed_work_sync(&sched->tick_work);
 
        mutex_lock(&sched->lock);
+       sched->destroyed = true;
        if (sched->pm.has_ref) {
                pm_runtime_put(ptdev->base.dev);
                sched->pm.has_ref = false;
-- 
2.47.2

Reply via email to