Keep track of the csg priority in panthor_group when the group is
scheduled/active.

This is useful to know the actual priority in use in the firmware
group slot.

Signed-off-by: Mary Guillemard <mary.guillem...@collabora.com>
---
 drivers/gpu/drm/panthor/panthor_sched.c | 18 +++++++++++++++---
 1 file changed, 15 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
b/drivers/gpu/drm/panthor/panthor_sched.c
index 86908ada7335..f15abeef4ece 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -574,6 +574,13 @@ struct panthor_group {
         */
        int csg_id;
 
+       /**
+        * @csg_id: Priority of the FW group slot.
+        *
+        * -1 when the group is not scheduled/active.
+        */
+       int csg_priority;
+
        /**
         * @destroyed: True when the group has been destroyed.
         *
@@ -894,11 +901,12 @@ group_get(struct panthor_group *group)
  * group_bind_locked() - Bind a group to a group slot
  * @group: Group.
  * @csg_id: Slot.
+ * @csg_priority: Priority of the slot.
  *
  * Return: 0 on success, a negative error code otherwise.
  */
 static int
-group_bind_locked(struct panthor_group *group, u32 csg_id)
+group_bind_locked(struct panthor_group *group, u32 csg_id, u32 csg_priority)
 {
        struct panthor_device *ptdev = group->ptdev;
        struct panthor_csg_slot *csg_slot;
@@ -917,6 +925,7 @@ group_bind_locked(struct panthor_group *group, u32 csg_id)
        csg_slot = &ptdev->scheduler->csg_slots[csg_id];
        group_get(group);
        group->csg_id = csg_id;
+       group->csg_priority = csg_priority;
 
        /* Dummy doorbell allocation: doorbell is assigned to the group and
         * all queues use the same doorbell.
@@ -956,6 +965,7 @@ group_unbind_locked(struct panthor_group *group)
        slot = &ptdev->scheduler->csg_slots[group->csg_id];
        panthor_vm_idle(group->vm);
        group->csg_id = -1;
+       group->csg_priority = -1;
 
        /* Tiler OOM events will be re-issued next time the group is scheduled. 
*/
        atomic_set(&group->tiler_oom, 0);
@@ -2193,8 +2203,9 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct 
panthor_sched_tick_ctx *c
 
                        csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
                        csg_slot = &sched->csg_slots[csg_id];
-                       group_bind_locked(group, csg_id);
-                       csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
+                       group_bind_locked(group, csg_id, new_csg_prio);
+                       csg_slot_prog_locked(ptdev, csg_id, new_csg_prio);
+                       new_csg_prio--;
                        csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
                                                group->state == 
PANTHOR_CS_GROUP_SUSPENDED ?
                                                CSG_STATE_RESUME : 
CSG_STATE_START,
@@ -3111,6 +3122,7 @@ int panthor_group_create(struct panthor_file *pfile,
        kref_init(&group->refcount);
        group->state = PANTHOR_CS_GROUP_CREATED;
        group->csg_id = -1;
+       group->csg_priority = -1;
 
        group->ptdev = ptdev;
        group->max_compute_cores = group_args->max_compute_cores;
-- 
2.46.0

Reply via email to