Module: xenomai-forge
Branch: next
Commit: 9ba389886c9940e490025699d4510308ac50cbbb
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=9ba389886c9940e490025699d4510308ac50cbbb

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu Sep  5 15:31:39 2013 +0200

cobalt/timer: rework round-robin scheduling feature

Sanitize and fix the implementation, by replacing per-thread rrb
timers non-sense with a single per-scheduler timer.

This is not only more efficient, but also fixes the case when multiple
rrb timers timing out in the same (short) time frame would cause some
threads from the same priority group to starve.

---

 include/cobalt/kernel/sched-rt.h |    2 +-
 include/cobalt/kernel/sched.h    |   21 ++++++++++++---------
 include/cobalt/kernel/thread.h   |    2 --
 kernel/cobalt/sched-rt.c         |    4 ++--
 kernel/cobalt/sched.c            |   30 +++++++++++++++++++++++++++---
 kernel/cobalt/thread.c           |   35 +++++++++++++----------------------
 6 files changed, 55 insertions(+), 39 deletions(-)

diff --git a/include/cobalt/kernel/sched-rt.h b/include/cobalt/kernel/sched-rt.h
index cf495ee..7f08bf2 100644
--- a/include/cobalt/kernel/sched-rt.h
+++ b/include/cobalt/kernel/sched-rt.h
@@ -110,6 +110,6 @@ static inline int xnsched_rt_init_thread(struct xnthread 
*thread)
        return 0;
 }
 
-void xnsched_rt_tick(struct xnthread *curr);
+void xnsched_rt_tick(struct xnsched *sched);
 
 #endif /* !_COBALT_KERNEL_SCHED_RT_H */
diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index 21160e0..addfd0c 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -88,6 +88,8 @@ struct xnsched {
        volatile unsigned inesting;
        /*!< Host timer. */
        struct xntimer htimer;
+       /*!< Round-robin timer. */
+       struct xntimer rrbtimer;
        /*!< Root thread control block. */
        struct xnthread rootcb;
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
@@ -131,7 +133,7 @@ struct xnsched_class {
        void (*sched_dequeue)(struct xnthread *thread);
        void (*sched_requeue)(struct xnthread *thread);
        struct xnthread *(*sched_pick)(struct xnsched *sched);
-       void (*sched_tick)(struct xnthread *thread);
+       void (*sched_tick)(struct xnsched *sched);
        void (*sched_rotate)(struct xnsched *sched,
                             const union xnsched_policy_param *p);
        void (*sched_migrate)(struct xnthread *thread,
@@ -507,19 +509,20 @@ static inline struct xnsched_class 
*xnsched_root_class(struct xnsched *sched)
        return sched->rootcb.sched_class;
 }
 
-static inline void xnsched_tick(struct xnthread *thread)
+static inline void xnsched_tick(struct xnsched *sched)
 {
-       struct xnsched_class *sched_class = thread->sched_class;
+       struct xnthread *curr = sched->curr;
+       struct xnsched_class *sched_class = curr->sched_class;
        /*
         * A thread that undergoes round-robin scheduling only
         * consumes its time slice when it runs within its own
-        * scheduling class, which excludes temporary PIP boosts.
+        * scheduling class, which excludes temporary PIP boosts, and
+        * does not hold the scheduler lock.
         */
-       if (sched_class != &xnsched_class_idle &&
-           sched_class == thread->base_class &&
-           thread->sched->curr == thread &&
-           xnthread_test_state(thread, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == 
XNRRB)
-               sched_class->sched_tick(thread);
+       if (sched_class == curr->base_class &&
+           sched_class->sched_tick &&
+           xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == 
XNRRB)
+               sched_class->sched_tick(sched);
 }
 
 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index 58db4ed..feb4da2 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -129,8 +129,6 @@ typedef struct xnthread {
 
        struct xntimer ptimer;          /* Periodic timer */
 
-       struct xntimer rrbtimer;        /* Round-robin timer */
-
        xnticks_t rrperiod;             /* Allotted round-robin period (ns) */
 
        struct xnthread_wait_context *wcontext; /* Active wait context. */
diff --git a/kernel/cobalt/sched-rt.c b/kernel/cobalt/sched-rt.c
index 3de0299..3ec8f89 100644
--- a/kernel/cobalt/sched-rt.c
+++ b/kernel/cobalt/sched-rt.c
@@ -89,7 +89,7 @@ static struct xnthread *xnsched_rt_pick(struct xnsched *sched)
        return __xnsched_rt_pick(sched);
 }
 
-void xnsched_rt_tick(struct xnthread *curr)
+void xnsched_rt_tick(struct xnsched *sched)
 {
        /*
         * The round-robin time credit is only consumed by a running
@@ -98,7 +98,7 @@ void xnsched_rt_tick(struct xnthread *curr)
         * exhausted for the running thread, move it back to the
         * runnable queue at the end of its priority group.
         */
-       xnsched_putback(curr);
+       xnsched_putback(sched->curr);
 }
 
 void xnsched_rt_setparam(struct xnthread *thread,
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 9359584..ab26ab1 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -123,8 +123,15 @@ static void watchdog_handler(struct xntimer *timer)
 
 #endif /* CONFIG_XENO_OPT_WATCHDOG */
 
+static void roundrobin_handler(struct xntimer *timer)
+{
+       struct xnsched *sched = container_of(timer, struct xnsched, rrbtimer);
+       xnsched_tick(sched);
+}
+
 void xnsched_init(struct xnsched *sched, int cpu)
 {
+       char rrbtimer_name[XNOBJECT_NAME_LEN];
        char htimer_name[XNOBJECT_NAME_LEN];
        char root_name[XNOBJECT_NAME_LEN];
        union xnsched_policy_param param;
@@ -134,10 +141,12 @@ void xnsched_init(struct xnsched *sched, int cpu)
 #ifdef CONFIG_SMP
        sched->cpu = cpu;
        sprintf(htimer_name, "[host-timer/%u]", cpu);
+       sprintf(rrbtimer_name, "[rrb-timer/%u]", cpu);
        sprintf(root_name, "ROOT/%u", cpu);
        cpus_clear(sched->resched);
 #else
        strcpy(htimer_name, "[host-timer]");
+       strcpy(rrbtimer_name, "[rrb-timer]");
        strcpy(root_name, "ROOT");
 #endif
        for_each_xnsched_class(p) {
@@ -167,6 +176,10 @@ void xnsched_init(struct xnsched *sched, int cpu)
        xntimer_init(&sched->htimer, &nkclock, NULL, &sched->rootcb);
        xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO);
        xntimer_set_name(&sched->htimer, htimer_name);
+       xntimer_init(&sched->rrbtimer, &nkclock,
+                    roundrobin_handler, &sched->rootcb);
+       xntimer_set_name(&sched->rrbtimer, rrbtimer_name);
+       xntimer_set_priority(&sched->rrbtimer, XNTIMER_LOPRIO);
 
        xnstat_exectime_set_current(sched, &sched->rootcb.stat.account);
 #ifdef CONFIG_XENO_HW_FPU
@@ -188,14 +201,25 @@ void xnsched_init(struct xnsched *sched, int cpu)
 void xnsched_destroy(struct xnsched *sched)
 {
        xntimer_destroy(&sched->htimer);
+       xntimer_destroy(&sched->rrbtimer);
        xntimer_destroy(&sched->rootcb.ptimer);
        xntimer_destroy(&sched->rootcb.rtimer);
-       xntimer_destroy(&sched->rootcb.rrbtimer);
 #ifdef CONFIG_XENO_OPT_WATCHDOG
        xntimer_destroy(&sched->wdtimer);
 #endif /* CONFIG_XENO_OPT_WATCHDOG */
 }
 
+static inline void set_thread_running(struct xnsched *sched,
+                                     struct xnthread *thread)
+{
+       xnthread_clear_state(thread, XNREADY);
+       if (xnthread_test_state(thread, XNRRB))
+               xntimer_start(&sched->rrbtimer,
+                             thread->rrperiod, XN_INFINITE, XN_RELATIVE);
+       else
+               xntimer_stop(&sched->rrbtimer);
+}
+
 /* Must be called with nklock locked, interrupts off. */
 struct xnthread *xnsched_pick_next(struct xnsched *sched)
 {
@@ -231,7 +255,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
        for_each_xnsched_class(p) {
                thread = p->sched_pick(sched);
                if (thread) {
-                       xnthread_clear_state(thread, XNREADY);
+                       set_thread_running(sched, thread);
                        return thread;
                }
        }
@@ -242,7 +266,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
        if (unlikely(thread == NULL))
                thread = &sched->rootcb;
 
-       xnthread_clear_state(thread, XNREADY);
+       set_thread_running(sched, thread);
 
        return thread;
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index eb2dd1f..b68187d 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -72,13 +72,6 @@ static void periodic_handler(struct xntimer *timer)
        xntimer_set_sched(timer, thread->sched);
 }
 
-static void roundrobin_handler(struct xntimer *timer)
-{
-       struct xnthread *thread = container_of(timer, struct xnthread, 
rrbtimer);
-       xnsched_tick(thread);
-       xntimer_set_sched(timer, thread->sched);
-}
-
 struct kthread_arg {
        struct xnthread *thread;
        struct completion *done;
@@ -198,9 +191,6 @@ int __xnthread_init(struct xnthread *thread,
        xntimer_init(&thread->ptimer, &nkclock, periodic_handler, thread);
        xntimer_set_name(&thread->ptimer, thread->name);
        xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO);
-       xntimer_init(&thread->rrbtimer, &nkclock, roundrobin_handler, thread);
-       xntimer_set_name(&thread->rrbtimer, thread->name);
-       xntimer_set_priority(&thread->rrbtimer, XNTIMER_LOPRIO);
 
        thread->init_class = sched_class;
        thread->base_class = NULL; /* xnsched_set_policy() will set it. */
@@ -495,7 +485,6 @@ void __xnthread_cleanup(struct xnthread *curr)
 
        xntimer_destroy(&curr->rtimer);
        xntimer_destroy(&curr->ptimer);
-       xntimer_destroy(&curr->rrbtimer);
 
        if (curr->selector) {
                xnselector_destroy(curr->selector);
@@ -1484,12 +1473,13 @@ EXPORT_SYMBOL_GPL(xnthread_wait_period);
  */
 int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
 {
+       struct xnsched *sched;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
 
+       sched = thread->sched;
        thread->rrperiod = quantum;
-       xntimer_stop(&thread->rrbtimer);
 
        if (quantum != XN_INFINITE) {
                if (thread->base_class->sched_tick == NULL) {
@@ -1497,11 +1487,14 @@ int xnthread_set_slice(struct xnthread *thread, 
xnticks_t quantum)
                        return -EINVAL;
                }
                xnthread_set_state(thread, XNRRB);
-               xntimer_set_sched(&thread->rrbtimer, thread->sched);
-               xntimer_start(&thread->rrbtimer,
-                             quantum, quantum, XN_RELATIVE);
-       } else
+               if (sched->curr == thread)
+                       xntimer_start(&sched->rrbtimer,
+                                     quantum, XN_INFINITE, XN_RELATIVE);
+       } else {
                xnthread_clear_state(thread, XNRRB);
+               if (sched->curr == thread)
+                       xntimer_stop(&sched->rrbtimer);
+       }
 
        xnlock_put_irqrestore(&nklock, s);
 
@@ -1672,14 +1665,12 @@ int xnthread_migrate(int cpu)
        xnsched_migrate(thread, sched);
 
        /*
-        * Migrate the thread's periodic and round-robin timers. We
-        * don't have to care about the resource timer, since we can
-        * only deal with the current thread, which is, well, running,
-        * so it can't be sleeping on any timed wait at the moment.
+        * Migrate the thread's periodic timer. We don't have to care
+        * about the resource timer, since we can only deal with the
+        * current thread, which is, well, running, so it can't be
+        * sleeping on any timed wait at the moment.
         */
        __xntimer_migrate(&thread->ptimer, sched);
-       if (xnthread_test_state(thread, XNRRB))
-               __xntimer_migrate(&thread->rrbtimer, sched);
 
        /*
         * Reset execution time measurement period so that we don't


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to