Module: xenomai-forge
Branch: next
Commit: 784a4345cdb2ea71b7f06eb259bf58a240b1f15f
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=784a4345cdb2ea71b7f06eb259bf58a240b1f15f

Author: Philippe Gerum <r...@xenomai.org>
Date:   Fri Jun  6 10:02:16 2014 +0200

cobalt/sched: fix core pick handler for cascaded policies

Some scheduling policies are implemented as variants of the core
SCHED_FIFO class, sharing its runqueue (e.g. SCHED_SPORADIC,
SCHED_QUOTA).

This means that we have to do some cascading to call the right pick
handler eventually.

---

 include/cobalt/kernel/sched-quota.h |    1 -
 include/cobalt/kernel/sched-rt.h    |   14 ++++++++-----
 kernel/cobalt/sched-quota.c         |   31 +++++++++++++++++------------
 kernel/cobalt/sched-rt.c            |    5 -----
 kernel/cobalt/sched-sporadic.c      |   25 +++++++++--------------
 kernel/cobalt/sched.c               |   37 ++++++++++++++++++++++++++++++++++-
 6 files changed, 73 insertions(+), 40 deletions(-)

diff --git a/include/cobalt/kernel/sched-quota.h 
b/include/cobalt/kernel/sched-quota.h
index 97d2387..4a55052 100644
--- a/include/cobalt/kernel/sched-quota.h
+++ b/include/cobalt/kernel/sched-quota.h
@@ -53,7 +53,6 @@ struct xnsched_quota {
        xnticks_t period_ns;
        struct xntimer refill_timer;
        struct xntimer limit_timer;
-       xnsched_queue_t runnable;
        struct list_head groups;
 };
 
diff --git a/include/cobalt/kernel/sched-rt.h b/include/cobalt/kernel/sched-rt.h
index 3cb85ef..70790dd 100644
--- a/include/cobalt/kernel/sched-rt.h
+++ b/include/cobalt/kernel/sched-rt.h
@@ -66,11 +66,6 @@ static inline void __xnsched_rt_dequeue(struct xnthread 
*thread)
        xnsched_delq(&thread->sched->rt.runnable, thread);
 }
 
-static inline struct xnthread *__xnsched_rt_pick(struct xnsched *sched)
-{
-       return xnsched_getq(&sched->rt.runnable);
-}
-
 static inline void __xnsched_rt_setparam(struct xnthread *thread,
                                         const union xnsched_policy_param *p)
 {
@@ -111,6 +106,15 @@ static inline int xnsched_rt_init_thread(struct xnthread 
*thread)
        return 0;
 }
 
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+struct xnthread *xnsched_rt_pick(struct xnsched *sched);
+#else
+static inline struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+       return xnsched_getq(&sched->rt.runnable);
+}
+#endif
+
 void xnsched_rt_tick(struct xnsched *sched);
 
 #endif /* !_COBALT_KERNEL_SCHED_RT_H */
diff --git a/kernel/cobalt/sched-quota.c b/kernel/cobalt/sched-quota.c
index a932ac4..9be8352 100644
--- a/kernel/cobalt/sched-quota.c
+++ b/kernel/cobalt/sched-quota.c
@@ -173,10 +173,11 @@ static void quota_refill_handler(struct xntimer *timer)
        struct xnsched_quota_group *tg;
        struct xnthread *thread, *tmp;
        struct xnsched_quota *qs;
+       struct xnsched *sched;
 
        qs = container_of(timer, struct xnsched_quota, refill_timer);
-
        XENO_BUGON(NUCLEUS, list_empty(&qs->groups));
+       sched = container_of(qs, struct xnsched, quota);
 
        list_for_each_entry(tg, &qs->groups, next) {
                /* Allot a new runtime budget for the group. */
@@ -195,7 +196,7 @@ static void quota_refill_handler(struct xntimer *timer)
                 */
                list_for_each_entry_safe_reverse(thread, tmp, &tg->expired, 
quota_expired) {
                        list_del_init(&thread->quota_expired);
-                       xnsched_addq(&qs->runnable, thread);
+                       xnsched_addq(&sched->rt.runnable, thread);
                }
        }
 
@@ -235,7 +236,6 @@ static void xnsched_quota_init(struct xnsched *sched)
        char limiter_name[XNOBJECT_NAME_LEN], refiller_name[XNOBJECT_NAME_LEN];
        struct xnsched_quota *qs = &sched->quota;
 
-       xnsched_initq(&qs->runnable);
        qs->period_ns = CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD * 1000ULL;
        INIT_LIST_HEAD(&qs->groups);
 
@@ -347,8 +347,8 @@ static void xnsched_quota_forget(struct xnthread *thread)
 
 static void xnsched_quota_kick(struct xnthread *thread)
 {
-       struct xnsched_quota *qs = &thread->sched->quota;
        struct xnsched_quota_group *tg = thread->quota;
+       struct xnsched *sched = thread->sched;
 
        /*
         * Allow a kicked thread to be elected for running until it
@@ -357,7 +357,7 @@ static void xnsched_quota_kick(struct xnthread *thread)
         */
        if (tg->run_budget_ns == 0 && !list_empty(&thread->quota_expired)) {
                list_del_init(&thread->quota_expired);
-               xnsched_addq_tail(&qs->runnable, thread);
+               xnsched_addq_tail(&sched->rt.runnable, thread);
        }
 }
 
@@ -369,39 +369,39 @@ static inline int thread_is_runnable(struct xnthread 
*thread)
 
 static void xnsched_quota_enqueue(struct xnthread *thread)
 {
-       struct xnsched_quota *qs = &thread->sched->quota;
        struct xnsched_quota_group *tg = thread->quota;
+       struct xnsched *sched = thread->sched;
 
        if (!thread_is_runnable(thread))
                list_add_tail(&thread->quota_expired, &tg->expired);
        else
-               xnsched_addq_tail(&qs->runnable, thread);
+               xnsched_addq_tail(&sched->rt.runnable, thread);
 
        tg->nr_active++;
 }
 
 static void xnsched_quota_dequeue(struct xnthread *thread)
 {
-       struct xnsched_quota *qs = &thread->sched->quota;
        struct xnsched_quota_group *tg = thread->quota;
+       struct xnsched *sched = thread->sched;
 
        if (!list_empty(&thread->quota_expired))
                list_del_init(&thread->quota_expired);
        else
-               xnsched_delq(&qs->runnable, thread);
+               xnsched_delq(&sched->rt.runnable, thread);
 
        tg->nr_active--;
 }
 
 static void xnsched_quota_requeue(struct xnthread *thread)
 {
-       struct xnsched_quota *qs = &thread->sched->quota;
        struct xnsched_quota_group *tg = thread->quota;
+       struct xnsched *sched = thread->sched;
 
        if (!thread_is_runnable(thread))
                list_add(&thread->quota_expired, &tg->expired);
        else
-               xnsched_addq(&qs->runnable, thread);
+               xnsched_addq(&sched->rt.runnable, thread);
 
        tg->nr_active++;
 }
@@ -428,13 +428,20 @@ static struct xnthread *xnsched_quota_pick(struct xnsched 
*sched)
        else
                otg->run_budget_ns = 0;
 pick:
-       next = xnsched_getq(&qs->runnable);
+       next = xnsched_getq(&sched->rt.runnable);
        if (next == NULL) {
                xntimer_stop(&qs->limit_timer);
                return NULL;
        }
 
+       /*
+        * As we basically piggyback on the SCHED_FIFO runqueue, make
+        * sure to detect non-quota threads.
+        */
        tg = next->quota;
+       if (tg == NULL)
+               return next;
+
        tg->run_start_ns = now;
 
        /*
diff --git a/kernel/cobalt/sched-rt.c b/kernel/cobalt/sched-rt.c
index 5e01063..ba3e520 100644
--- a/kernel/cobalt/sched-rt.c
+++ b/kernel/cobalt/sched-rt.c
@@ -83,11 +83,6 @@ static void xnsched_rt_rotate(struct xnsched *sched,
        xnsched_putback(thread);
 }
 
-static struct xnthread *xnsched_rt_pick(struct xnsched *sched)
-{
-       return __xnsched_rt_pick(sched);
-}
-
 void xnsched_rt_tick(struct xnsched *sched)
 {
        /*
diff --git a/kernel/cobalt/sched-sporadic.c b/kernel/cobalt/sched-sporadic.c
index 9ec3b0d..706cd66 100644
--- a/kernel/cobalt/sched-sporadic.c
+++ b/kernel/cobalt/sched-sporadic.c
@@ -358,7 +358,7 @@ static struct xnthread *xnsched_sporadic_pick(struct 
xnsched *sched)
 
        next = xnsched_getq(&sched->rt.runnable);
        if (next == NULL)
-               goto swap_budgets;
+               goto swap;
 
        if (curr == next)
                return next;
@@ -366,23 +366,16 @@ static struct xnthread *xnsched_sporadic_pick(struct 
xnsched *sched)
        /* Arm the drop timer for an incoming sporadic thread. */
        if (next->pss)
                sporadic_resume_activity(next);
-
-       /*
-        * Do not consider an outgoing thread that temporarily moved
-        * to the sporadic scheduling class (i.e. PIP enforcement): it
-        * has an infinite time budget to release asap what some
-        * sporadic thread wants, so there is no replenishment
-        * operation involved.
-        */
-swap_budgets:
-       if (curr->base_class != &xnsched_class_sporadic)
-               return next;
-
+swap:
        /*
-        * We are about to block or preempt a sporadic thread. Clear
-        * the drop timer, then schedule a replenishment operation.
+        * A non-sporadic outgoing thread is having a priority
+        * inheritance boost, so apply an infinite time budget as we
+        * want it to release the claimed resource asap. Otherwise,
+        * clear the drop timer, then schedule a replenishment
+        * operation.
         */
-       sporadic_suspend_activity(curr);
+       if (curr->pss)
+               sporadic_suspend_activity(curr);
 
        return next;
 }
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index dd80738..13fe9ac 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -276,7 +276,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 
        return NULL; /* Never executed because of the idle class. */
 #else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
-       thread = __xnsched_rt_pick(sched);
+       thread = xnsched_rt_pick(sched);
        if (unlikely(thread == NULL))
                thread = &sched->rootcb;
 
@@ -594,6 +594,41 @@ struct xnthread *xnsched_findq(struct xnsched_mlq *q, int 
prio)
        return list_first_entry(head, struct xnthread, rlink);
 }
 
+struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+       struct xnsched_mlq *q = &sched->rt.runnable;
+       struct xnthread *thread;
+       struct list_head *head;
+       int idx;
+
+       if (q->elems == 0)
+               return NULL;
+
+       /*
+        * Some scheduling policies may be implemented as variants of
+        * the core SCHED_FIFO class, sharing its runqueue
+        * (e.g. SCHED_SPORADIC, SCHED_QUOTA). This means that we have
+        * to do some cascading to call the right pick handler
+        * eventually.
+        */
+       idx = xnsched_weightq(q);
+       head = q->heads + idx;
+       XENO_BUGON(NUCLEUS, list_empty(head));
+
+       /*
+        * The active class (i.e. ->sched_class) is the one currently
+        * queuing the thread, reflecting any priority boost due to
+        * PIP.
+        */
+       thread = list_first_entry(head, struct xnthread, rlink);
+       if (unlikely(thread->sched_class != &xnsched_class_rt))
+               return thread->sched_class->sched_pick(sched);
+
+       del_q(q, &thread->rlink, idx);
+
+       return thread;
+}
+
 #else /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
 
 struct xnthread *xnsched_findq(struct list_head *q, int prio)


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to