csg_slot_sync_queues_state_locked() queries the queues state which can
then be used to determine if a group is idle or not. Let's base our
idleness detection logic solely on the {idle,blocked}_queues masks to
avoid inconsistencies between the group state and the state of its
subqueues.

Signed-off-by: Boris Brezillon <[email protected]>
---
 drivers/gpu/drm/panthor/panthor_sched.c | 31 ++-----------------------
 1 file changed, 2 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
b/drivers/gpu/drm/panthor/panthor_sched.c
index e74ca071159d..9931f4a6cd96 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -108,15 +108,6 @@ struct panthor_csg_slot {
 
        /** @priority: Group priority. */
        u8 priority;
-
-       /**
-        * @idle: True if the group bound to this slot is idle.
-        *
-        * A group is idle when it has nothing waiting for execution on
-        * all its queues, or when queues are blocked waiting for something
-        * to happen (synchronization object).
-        */
-       bool idle;
 };
 
 /**
@@ -1607,17 +1598,6 @@ static bool cs_slot_process_irq_locked(struct 
panthor_device *ptdev,
        return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
 }
 
-static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 
csg_id)
-{
-       struct panthor_csg_slot *csg_slot = 
&ptdev->scheduler->csg_slots[csg_id];
-       struct panthor_fw_csg_iface *csg_iface;
-
-       lockdep_assert_held(&ptdev->scheduler->lock);
-
-       csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
-       csg_slot->idle = csg_iface->output->status_state & 
CSG_STATUS_STATE_IS_IDLE;
-}
-
 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, 
u32 csg_id)
 {
        struct panthor_scheduler *sched = ptdev->scheduler;
@@ -1879,10 +1859,8 @@ static int csgs_upd_ctx_apply_locked(struct 
panthor_device *ptdev,
                if (acked & CSG_STATE_MASK)
                        csg_slot_sync_state_locked(ptdev, csg_id);
 
-               if (acked & CSG_STATUS_UPDATE) {
+               if (acked & CSG_STATUS_UPDATE)
                        csg_slot_sync_queues_state_locked(ptdev, csg_id);
-                       csg_slot_sync_idle_state_locked(ptdev, csg_id);
-               }
 
                if (ret && acked != req_mask &&
                    ((csg_iface->input->req ^ csg_iface->output->ack) & 
req_mask) != 0) {
@@ -1919,13 +1897,8 @@ tick_ctx_is_full(const struct panthor_scheduler *sched,
 static bool
 group_is_idle(struct panthor_group *group)
 {
-       struct panthor_device *ptdev = group->ptdev;
-       u32 inactive_queues;
+       u32 inactive_queues = group->idle_queues | group->blocked_queues;
 
-       if (group->csg_id >= 0)
-               return ptdev->scheduler->csg_slots[group->csg_id].idle;
-
-       inactive_queues = group->idle_queues | group->blocked_queues;
        return hweight32(inactive_queues) == group->queue_count;
 }
 
-- 
2.51.1

Reply via email to