Module: xenomai-forge
Branch: next
Commit: c17fca7588d0eb25e34edfa7a1dcf72bb654c793
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=c17fca7588d0eb25e34edfa7a1dcf72bb654c793

Author: Philippe Gerum <r...@xenomai.org>
Date:   Mon Apr 14 17:30:13 2014 +0200

cobalt/sched: use fixed level range for O(1) scheduler

Fix the valid range of runlevels in the O(1) scheduler queue to
[XNSCHED_RT_MIN_PRIO..XNSCHED_RT_MAX_PRIO], as threads may cross
policies freely during PIP boosts, therefore all possible priority
level must be valid for all scheduler queues, regardless of the
policy.

---

 include/cobalt/kernel/sched-rt.h   |    2 +-
 include/cobalt/kernel/sched-weak.h |    4 ++--
 include/cobalt/kernel/sched.h      |    2 +-
 include/cobalt/kernel/schedqueue.h |   11 +++++------
 kernel/cobalt/sched-quota.c        |    3 +--
 kernel/cobalt/sched-rt.c           |    3 +--
 kernel/cobalt/sched-tp.c           |    7 +++----
 kernel/cobalt/sched-weak.c         |    3 +--
 kernel/cobalt/sched.c              |   11 ++++-------
 9 files changed, 19 insertions(+), 27 deletions(-)

diff --git a/include/cobalt/kernel/sched-rt.h b/include/cobalt/kernel/sched-rt.h
index 1b49be9..4655ac9 100644
--- a/include/cobalt/kernel/sched-rt.h
+++ b/include/cobalt/kernel/sched-rt.h
@@ -42,7 +42,7 @@
 #define XNSCHED_FIFO_MIN_PRIO  1
 #define XNSCHED_FIFO_MAX_PRIO  99
 
-#if XNSCHED_RT_NR_PRIO > XNSCHED_CLASS_MAX_PRIO ||     \
+#if XNSCHED_RT_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||        \
   (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&          \
    XNSCHED_RT_NR_PRIO > XNSCHED_MLQ_LEVELS)
 #error "RT class has too many priority levels"
diff --git a/include/cobalt/kernel/sched-weak.h 
b/include/cobalt/kernel/sched-weak.h
index 1a038d8..132c293 100644
--- a/include/cobalt/kernel/sched-weak.h
+++ b/include/cobalt/kernel/sched-weak.h
@@ -29,8 +29,8 @@
 #define XNSCHED_WEAK_MAX_PRIO  99
 #define XNSCHED_WEAK_NR_PRIO   (XNSCHED_WEAK_MAX_PRIO - XNSCHED_WEAK_MIN_PRIO 
+ 1)
 
-#if XNSCHED_WEAK_NR_PRIO > XNSCHED_CLASS_MAX_PRIO ||   \
-       (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&     \
+#if XNSCHED_WEAK_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||      \
+       (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&             \
         XNSCHED_WEAK_NR_PRIO > XNSCHED_MLQ_LEVELS)
 #error "WEAK class has too many priority levels"
 #endif
diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index f4a47f9..00c6fcf 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -164,7 +164,7 @@ struct xnsched_class {
        const char *name;
 };
 
-#define XNSCHED_CLASS_WEIGHT(n)                (n * XNSCHED_CLASS_MAX_PRIO)
+#define XNSCHED_CLASS_WEIGHT(n)                (n * 
XNSCHED_CLASS_WEIGHT_FACTOR)
 
 /* Placeholder for current thread priority */
 #define XNSCHED_RUNPRIO   0x80000000
diff --git a/include/cobalt/kernel/schedqueue.h 
b/include/cobalt/kernel/schedqueue.h
index b671f5c..15817b2 100644
--- a/include/cobalt/kernel/schedqueue.h
+++ b/include/cobalt/kernel/schedqueue.h
@@ -22,7 +22,7 @@
 
 #include <cobalt/kernel/list.h>
 
-#define XNSCHED_CLASS_MAX_PRIO         1024
+#define XNSCHED_CLASS_WEIGHT_FACTOR    1024
 
 #ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
 /*
@@ -40,7 +40,7 @@
 #define __MLQ_LONGS ((XNSCHED_MLQ_LEVELS+BITS_PER_LONG-1)/BITS_PER_LONG)
 
 struct xnsched_mlq {
-       int loprio, hiprio, elems;
+       int elems;
        unsigned long himap, lomap[__MLQ_LONGS];
        struct list_head heads[XNSCHED_MLQ_LEVELS];
 };
@@ -49,8 +49,7 @@ struct xnsched_mlq {
 
 struct xnthread;
 
-void xnsched_initq(struct xnsched_mlq *q,
-                  int loprio, int hiprio);
+void xnsched_initq(struct xnsched_mlq *q);
 
 void xnsched_addq(struct xnsched_mlq *q,
                  struct xnthread *thread);
@@ -81,11 +80,11 @@ typedef struct xnsched_mlq xnsched_queue_t;
 
 typedef struct list_head xnsched_queue_t;
 
-#define xnsched_initq(__q, __minp, __maxp)     INIT_LIST_HEAD(__q)
+#define xnsched_initq(__q)                     INIT_LIST_HEAD(__q)
 #define xnsched_emptyq_p(__q)                  list_empty(__q)
 #define xnsched_addq(__q, __t)                 list_add_prilf(__t, __q, cprio, 
rlink)
 #define xnsched_addq_tail(__q, __t)            list_add_priff(__t, __q, cprio, 
rlink)
-#define xnsched_delq(__q, __t)                 list_del(&(__t)->rlink)
+#define xnsched_delq(__q, __t)                 (void)(__q), 
list_del(&(__t)->rlink)
 #define xnsched_getq(__q)                                                      
\
        ({                                                                      
\
                struct xnthread *__t = NULL;                                    
\
diff --git a/kernel/cobalt/sched-quota.c b/kernel/cobalt/sched-quota.c
index efe5c2b..fd56003 100644
--- a/kernel/cobalt/sched-quota.c
+++ b/kernel/cobalt/sched-quota.c
@@ -224,8 +224,7 @@ static void xnsched_quota_init(struct xnsched *sched)
         * CAUTION: we may inherit RT priority during PIP boost, so we
         * need as many levels as SCHED_RT defines.
         */
-       xnsched_initq(&qs->runnable,
-                     XNSCHED_RT_MIN_PRIO, XNSCHED_RT_MAX_PRIO);
+       xnsched_initq(&qs->runnable);
        qs->period_ns = CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD * 1000ULL;
        INIT_LIST_HEAD(&qs->groups);
 
diff --git a/kernel/cobalt/sched-rt.c b/kernel/cobalt/sched-rt.c
index c5001fc..9afa900 100644
--- a/kernel/cobalt/sched-rt.c
+++ b/kernel/cobalt/sched-rt.c
@@ -23,8 +23,7 @@
 
 static void xnsched_rt_init(struct xnsched *sched)
 {
-       xnsched_initq(&sched->rt.runnable,
-                   XNSCHED_RT_MIN_PRIO, XNSCHED_RT_MAX_PRIO);
+       xnsched_initq(&sched->rt.runnable);
 }
 
 static void xnsched_rt_requeue(struct xnthread *thread)
diff --git a/kernel/cobalt/sched-tp.c b/kernel/cobalt/sched-tp.c
index 6c024d4..676b96a 100644
--- a/kernel/cobalt/sched-tp.c
+++ b/kernel/cobalt/sched-tp.c
@@ -95,10 +95,9 @@ static void xnsched_tp_init(struct xnsched *sched)
         * need as many levels as SCHED_RT defines.
         */
        for (n = 0; n < CONFIG_XENO_OPT_SCHED_TP_NRPART; n++)
-               xnsched_initq(&tp->partitions[n].runnable,
-                             XNSCHED_RT_MIN_PRIO, XNSCHED_RT_MAX_PRIO);
-       xnsched_initq(&tp->idle.runnable,
-                     XNSCHED_RT_MIN_PRIO, XNSCHED_RT_MAX_PRIO);
+               xnsched_initq(&tp->partitions[n].runnable);
+
+       xnsched_initq(&tp->idle.runnable);
 
 #ifdef CONFIG_SMP
        sprintf(timer_name, "[tp-tick/%u]", sched->cpu);
diff --git a/kernel/cobalt/sched-weak.c b/kernel/cobalt/sched-weak.c
index e55a25b..fc4c0b8 100644
--- a/kernel/cobalt/sched-weak.c
+++ b/kernel/cobalt/sched-weak.c
@@ -20,8 +20,7 @@
 
 static void xnsched_weak_init(struct xnsched *sched)
 {
-       xnsched_initq(&sched->weak.runnable,
-                     XNSCHED_WEAK_MIN_PRIO, XNSCHED_WEAK_MAX_PRIO);
+       xnsched_initq(&sched->weak.runnable);
 }
 
 static void xnsched_weak_requeue(struct xnthread *thread)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 6ee8fa8..d89889e 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -492,15 +492,11 @@ void xnsched_migrate_passive(struct xnthread *thread, 
struct xnsched *sched)
 
 #ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
 
-void xnsched_initq(struct xnsched_mlq *q, int loprio, int hiprio)
+void xnsched_initq(struct xnsched_mlq *q)
 {
        int prio;
 
-       XENO_BUGON(NUCLEUS, hiprio - loprio + 1 >= XNSCHED_MLQ_LEVELS);
-
        q->elems = 0;
-       q->loprio = loprio;
-       q->hiprio = hiprio;
        q->himap = 0;
        memset(&q->lomap, 0, sizeof(q->lomap));
 
@@ -510,7 +506,8 @@ void xnsched_initq(struct xnsched_mlq *q, int loprio, int 
hiprio)
 
 static inline int get_qindex(struct xnsched_mlq *q, int prio)
 {
-       XENO_BUGON(NUCLEUS, prio < q->loprio || prio > q->hiprio);
+       XENO_BUGON(NUCLEUS, prio < XNSCHED_RT_MIN_PRIO ||
+                  prio > XNSCHED_RT_MAX_PRIO);
        /*
         * BIG FAT WARNING: We need to rescale the priority level to a
         * 0-based range. We use ffnz() to scan the bitmap which MUST
@@ -519,7 +516,7 @@ static inline int get_qindex(struct xnsched_mlq *q, int 
prio)
         * significant bits will be found first when scanning the
         * bitmaps).
         */
-       return q->hiprio - prio;
+       return XNSCHED_RT_MAX_PRIO - prio;
 }
 
 static struct list_head *add_q(struct xnsched_mlq *q, int prio)


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to