Ok, I found one other little issue where there could be something in the
parent queue that's lower priority than the base queue. We don't want to
inherit a lower priority.  :)

Steven

Index: rtai-core/include/rtai_sched.h
===================================================================
RCS file: /cvs/rtai/vesuvio/rtai-core/include/rtai_sched.h,v
retrieving revision 1.3
diff -u -r1.3 rtai_sched.h
--- rtai-core/include/rtai_sched.h    6 Aug 2004 11:29:25 -0000    1.3
+++ rtai-core/include/rtai_sched.h    2 Jun 2005 15:45:47 -0000
@@ -106,6 +106,8 @@
     int owndres;
     struct rt_queue *blocked_on;
     struct rt_queue msg_queue;
+    struct rt_queue parent_prio_queue; //other tasks add themselves here,
you are the parent
+    struct rt_queue child_prio_queue; //you add yourself to other tasks
here when locking resources
     int tid;    /* trace ID */
     unsigned msg;
     struct rt_queue ret_queue;
Index: rtai-core/include/rtai_schedcore.h
===================================================================
RCS file: /cvs/rtai/vesuvio/rtai-core/include/rtai_schedcore.h,v
retrieving revision 1.4
diff -u -r1.4 rtai_schedcore.h
--- rtai-core/include/rtai_schedcore.h    3 Sep 2004 07:24:02 -0000    1.4
+++ rtai-core/include/rtai_schedcore.h    2 Jun 2005 15:45:47 -0000
@@ -339,6 +339,46 @@
 }
 #endif
 
+static inline void flush_queue(QUEUE *queue)
+{
+    QUEUE *q, *qn=queue;
+    
+    do {
+        q = qn->next;
+        qn->next = qn;
+        qn->prev = qn;
+        qn = q;
+    } while (qn!=queue);
+}
+
+static inline void enqueue_prio(RT_TASK *task, QUEUE *parent_queue)
+{
+    QUEUE *q = parent_queue;
+    while ((q = q->next) != parent_queue && (q->task)->priority <=
task->priority);
+    q->prev = (task->child_prio_queue.prev = q->prev)->next =
&(task->child_prio_queue);
+    task->child_prio_queue.next = q;
+}
+
+static inline void dequeue_prio(RT_TASK *task)
+{
+    (task->child_prio_queue.prev)->next = task->child_prio_queue.next;
+    (task->child_prio_queue.next)->prev = task->child_prio_queue.prev;
+    task->child_prio_queue.next = &(task->child_prio_queue);
+    task->child_prio_queue.prev = &(task->child_prio_queue);
+}
+
+static inline void prio_sort(RT_TASK *task, QUEUE *queue)
+{
+    QUEUE *q;
+    /*resort the queue of all resources locked on its parent*/
+    q = queue->next;
+    queue->prev->next = queue->next;
+    queue->next->prev = queue->prev;
+    for(;(q->task)->priority <= task->priority; q = q->next);
+    q->prev = (queue->prev = q->prev)->next = queue;
+    queue->next = q;
+}
+
 static inline void enqueue_blocked(RT_TASK *task, QUEUE *queue, int qtype)
 {
         QUEUE *q;
@@ -383,6 +423,10 @@
                         while ((q = q->next) != to->blocked_on &&
(q->task)->priority <= to->priority);
                         q->prev = (to->queue.prev = q->prev)->next  =
&(to->queue);
                         to->queue.next = q;
+                  
+                        /*resort the queue of all resources locked on its
parent*/
+                        prio_sort(to, &to->child_prio_queue);
+                        prio_sort(to, &to->parent_prio_queue);
                 }
                 to = to->prio_passed_to;
     }
Index: rtai-core/ipc/sem/sem.c
===================================================================
RCS file: /cvs/rtai/vesuvio/rtai-core/ipc/sem/sem.c,v
retrieving revision 1.7
diff -u -r1.7 sem.c
--- rtai-core/ipc/sem/sem.c    22 Nov 2004 08:06:17 -0000    1.7
+++ rtai-core/ipc/sem/sem.c    2 Jun 2005 15:45:48 -0000
@@ -306,12 +306,29 @@
         }
         if (!rt_current->owndres) {
             sched = renq_current(rt_current, rt_current->base_priority);
-        } else if (!(rt_current->owndres & SEMHLF)) {
+            flush_queue(&rt_current->parent_prio_queue); //just in case
+        } 
+        else if (!(rt_current->owndres & SEMHLF)) {
             int priority;
             sched = renq_current(rt_current, rt_current->base_priority >
(priority = ((rt_current->msg_queue.next)->task)->priority) ? priority :
rt_current->base_priority);
-        } else {
-            sched = 0;
+        } 
+        else {
+            struct rt_queue *q = sem->queue.next;
+            RT_TASK *next_task;
+            int priority;
+            if(task) dequeue_prio(task); //remove the task the sem owned
from the prio queue
+
+            /* now to remove all tasks from our prio queue that were
waiting on this sem */
+            while(q->task) {
+                dequeue_prio(q->task);
+                q = q->next;
+            }
+            next_task = rt_current->parent_prio_queue.next->task;
+            
if(next_task==rt_current||next_task->priority>rt_current->base_priority)
priority = rt_current->base_priority; //last thing in queue, return to base
prio
+            else priority = next_task->priority;
+            sched = renq_current(rt_current, priority);
         }
+        
         if (rt_current->suspdepth) {
             if (rt_current->suspdepth > 0) {
                 rt_current->state |= RT_SCHED_SUSPENDED;
@@ -453,22 +470,35 @@
             schedmap = 0;
         }
         sem->count--;
+repeat:
         rt_current->state |= RT_SCHED_SEMAPHORE;
         rem_ready_current(rt_current);
+        if(sem->owndby) enqueue_prio(rt_current,
&sem->owndby->parent_prio_queue);
         enqueue_blocked(rt_current, &sem->queue, sem->qtype);
         RT_SCHEDULE_MAP_BOTH(schedmap);
         if (rt_current->blocked_on || sem->magic != RT_SEM_MAGIC) {
             rt_current->prio_passed_to = NOTHING;
             rt_global_restore_flags(flags);
+            //no reason to dequeue from prio queue here, because the owned
task will do it
             return SEM_ERR;
-        } else { 
+        } else {
+            //got sem ok but now check to make sure we didn't release and
then re-take this
+            //sem, and that a higher priority process is really supposed to
be running here
+            RT_TASK *check_next = sem->queue.next->task;
+            if(check_next&&check_next->priority<=rt_current->priority) {
+                dequeue_blocked(check_next);
+                enq_ready_task(check_next);
+
+                goto repeat;
+            }
+
             count = sem->count;
         }
     } else {
         sem->count--;
     }
     if (sem->type > 0) {
-        (sem->owndby = rt_current)->owndres++;
+        (sem->owndby = rt_current)->owndres++;
     }
     rt_global_restore_flags(flags);
     return count;
@@ -585,12 +615,20 @@
                 schedmap = 0;
             }    
             sem->count--;
-            rt_current->state |= (RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED);
+repeat:
+            rt_current->state |= (RT_SCHED_SEMAPHORE|RT_SCHED_DELAYED);
             rem_ready_current(rt_current);
+            if(sem->owndby) enqueue_prio(rt_current,
&sem->owndby->parent_prio_queue);
             enqueue_blocked(rt_current, &sem->queue, sem->qtype);
-            enq_timed_task(rt_current);
+            enq_timed_task(rt_current);
             RT_SCHEDULE_MAP_BOTH(schedmap);
         } else {
+            RT_TASK *check_next = sem->queue.next->task;
+            if(check_next&&check_next->priority<=rt_current->priority) {
+                dequeue_blocked(check_next);
+                enq_ready_task(check_next);
+                goto repeat;
+            }
             sem->count--;
             rt_current->queue.prev = rt_current->queue.next =
&rt_current->queue;
         }
@@ -601,6 +639,7 @@
         } else {
             if (rt_current->blocked_on) {
                 dequeue_blocked(rt_current);
+                dequeue_prio(rt_current);
                 if(++sem->count > 1 && sem->type) {
                     sem->count = 1;
                 }
Index: rtai-core/sched/rtai/sched_up.c
===================================================================
RCS file: /cvs/rtai/vesuvio/rtai-core/sched/rtai/sched_up.c,v
retrieving revision 1.10
diff -u -r1.10 sched_up.c
--- rtai-core/sched/rtai/sched_up.c    12 Nov 2004 08:14:29 -0000    1.10
+++ rtai-core/sched/rtai/sched_up.c    2 Jun 2005 15:45:48 -0000
@@ -278,8 +278,14 @@
     task->period = 0;
     task->resume_time = RT_TIME_END;
     task->queue.prev = &(task->queue);
-    task->queue.next = &(task->queue);
+    task->queue.next = &(task->queue);
     task->queue.task = task;
+    task->parent_prio_queue.prev = &(task->parent_prio_queue);
+    task->parent_prio_queue.next = &(task->parent_prio_queue);
+    task->parent_prio_queue.task = task;
+    task->child_prio_queue.prev = &(task->child_prio_queue);
+    task->child_prio_queue.next = &(task->child_prio_queue);
+    task->child_prio_queue.task = task;
     task->msg_queue.prev = &(task->msg_queue);
     task->msg_queue.next = &(task->msg_queue);
     task->msg_queue.task = task;
@@ -774,6 +780,7 @@
     TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_DELETE, task->tid, 0, 0);
 
     hard_save_flags_and_cli(flags);
+    flush_queue(&task->parent_prio_queue);
     if (!(task->owndres & SEMHLF) || task == rt_current ||
rt_current->priority == RT_SCHED_LINUX_PRIORITY) {
         call_exit_handlers(task);
         rem_timed_task(task);


Reply via email to