Module: xenomai-forge
Branch: next
Commit: 472b44bfecf566cd14eb2c1ed8c6eecd3fe19228
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=472b44bfecf566cd14eb2c1ed8c6eecd3fe19228

Author: Philippe Gerum <r...@xenomai.org>
Date:   Wed Jun 19 16:34:03 2013 +0200

cobalt/kernel/sched: turn runqueues into regular kernel lists

---

 include/cobalt/kernel/list.h       |   20 +++-
 include/cobalt/kernel/sched-rt.h   |   11 +--
 include/cobalt/kernel/sched-tp.h   |   33 ++++---
 include/cobalt/kernel/schedqueue.h |  121 ++++++-----------------
 include/cobalt/kernel/thread.h     |   16 ++-
 kernel/cobalt/sched-rt.c           |   11 +-
 kernel/cobalt/sched-sporadic.c     |    9 +-
 kernel/cobalt/sched-tp.c           |   40 ++++----
 kernel/cobalt/sched-weak.c         |   15 +--
 kernel/cobalt/sched.c              |  191 +++++++++++++++++------------------
 kernel/cobalt/thread.c             |    1 -
 11 files changed, 207 insertions(+), 261 deletions(-)

diff --git a/include/cobalt/kernel/list.h b/include/cobalt/kernel/list.h
index bc573e8..dbbc3b7 100644
--- a/include/cobalt/kernel/list.h
+++ b/include/cobalt/kernel/list.h
@@ -22,18 +22,32 @@
 
 #include <linux/list.h>
 
-#define list_add_priff(__new, __head, __member_pri, __member_next)             
\
+#define __list_add_pri(__new, __head, __member_pri, __member_next, __relop)    
\
 do {                                                                           
\
        typeof(*__new) *__pos;                                                  
\
        if (list_empty(__head))                                                 
\
                list_add(&(__new)->__member_next, __head);                      
\
        else {                                                                  
\
                list_for_each_entry_reverse(__pos, __head, __member_next) {     
\
-                       if ((__new)->__member_pri <= __pos->__member_pri)       
\
+                       if ((__new)->__member_pri __relop __pos->__member_pri)  
\
                                break;                                          
\
                }                                                               
\
                list_add(&(__new)->__member_next, &__pos->__member_next);       
\
-               }                                                               
\
+       }                                                                       
\
 } while (0)
 
+#define list_add_priff(__new, __head, __member_pri, __member_next)             
\
+       __list_add_pri(__new, __head, __member_pri, __member_next, <=)
+
+#define list_add_prilf(__new, __head, __member_pri, __member_next)             
\
+       __list_add_pri(__new, __head, __member_pri, __member_next, <)
+
+#define list_get_entry(__head, __type, __member)               \
+  ({                                                           \
+         __type *__item;                                       \
+         __item = list_first_entry(__head, __type, __member);  \
+         list_del(&__item->__member);                          \
+         __item;                                               \
+  })
+
 #endif /* !_COBALT_KERNEL_LIST_H_ */
diff --git a/include/cobalt/kernel/sched-rt.h b/include/cobalt/kernel/sched-rt.h
index 28be9f0..5b0fecf 100644
--- a/include/cobalt/kernel/sched-rt.h
+++ b/include/cobalt/kernel/sched-rt.h
@@ -54,25 +54,22 @@ extern struct xnsched_class xnsched_class_rt;
 
 static inline void __xnsched_rt_requeue(struct xnthread *thread)
 {
-       sched_insertpql(&thread->sched->rt.runnable,
-                       &thread->rlink, thread->cprio);
+       sched_insertqlf(&thread->sched->rt.runnable, thread);
 }
 
 static inline void __xnsched_rt_enqueue(struct xnthread *thread)
 {
-       sched_insertpqf(&thread->sched->rt.runnable,
-                       &thread->rlink, thread->cprio);
+       sched_insertqff(&thread->sched->rt.runnable, thread);
 }
 
 static inline void __xnsched_rt_dequeue(struct xnthread *thread)
 {
-       sched_removepq(&thread->sched->rt.runnable, &thread->rlink);
+       sched_removeq(&thread->sched->rt.runnable, thread);
 }
 
 static inline struct xnthread *__xnsched_rt_pick(struct xnsched *sched)
 {
-       struct xnpholder *h = sched_getpq(&sched->rt.runnable);
-       return h ? link2thread(h, rlink) : NULL;
+       return sched_getq(&sched->rt.runnable);
 }
 
 static inline void __xnsched_rt_setparam(struct xnthread *thread,
diff --git a/include/cobalt/kernel/sched-tp.h b/include/cobalt/kernel/sched-tp.h
index 7fdb9eb..8c70041 100644
--- a/include/cobalt/kernel/sched-tp.h
+++ b/include/cobalt/kernel/sched-tp.h
@@ -1,6 +1,6 @@
-/*!\file sched-tp.h
- * \brief Definitions for the TP scheduling class.
- * \author Philippe Gerum
+/** @file sched-tp.h
+ * @brief Definitions for the TP scheduling class.
+ * @author Philippe Gerum
  *
  * Copyright (C) 2008 Philippe Gerum <r...@xenomai.org>.
  *
@@ -43,23 +43,28 @@ struct xnsched_tp_schedule {
 };
 
 struct xnsched_tp {
-
        struct xnsched_tpslot {
-               xnsched_queue_t runnable; /*!< Runnable thread queue. */
+               /** Per-partition runqueue. */
+               xnsched_queue_t runnable;
        } partitions[CONFIG_XENO_OPT_SCHED_TP_NRPART];
-
-       struct xnsched_tpslot idle;     /* !< Idle slot for passive windows. */
-       struct xnsched_tpslot *tps;     /* !< Active partition slot */
-       struct xntimer tf_timer;        /* !< Time frame timer */
-       struct xnsched_tp_schedule *gps; /* !< Global partition schedule */
-       int wnext;                       /* !< Next partition window */
-       xnticks_t tf_start;              /* !< Start of next time frame */
-       struct xnqueue threads;          /* !< Assigned thread queue */
+       /** Idle slot for passive windows. */
+       struct xnsched_tpslot idle;
+       /** Active partition slot */
+       struct xnsched_tpslot *tps;
+       /** Time frame timer */
+       struct xntimer tf_timer;
+       /** Global partition schedule */
+       struct xnsched_tp_schedule *gps;
+       /** Window index of next partition */
+       int wnext;
+       /** Start of next time frame */
+       xnticks_t tf_start;
+       /** Assigned thread queue */
+       struct list_head threads;
 };
 
 static inline int xnsched_tp_init_thread(struct xnthread *thread)
 {
-       inith(&thread->tp_link);
        thread->tps = NULL;
 
        return 0;
diff --git a/include/cobalt/kernel/schedqueue.h 
b/include/cobalt/kernel/schedqueue.h
index 895e073..36bf590 100644
--- a/include/cobalt/kernel/schedqueue.h
+++ b/include/cobalt/kernel/schedqueue.h
@@ -20,7 +20,7 @@
 #ifndef _COBALT_KERNEL_SCHEDQUEUE_H
 #define _COBALT_KERNEL_SCHEDQUEUE_H
 
-#include <cobalt/kernel/queue.h>
+#include <cobalt/kernel/list.h>
 
 #define XNSCHED_CLASS_MAX_PRIO         1024
 
@@ -34,122 +34,61 @@
 #define XNSCHED_MLQ_LEVELS  264
 
 #if BITS_PER_LONG * BITS_PER_LONG < XNSCHED_MLQ_LEVELS
-#error "Internal bitmap cannot hold so many priority levels"
+#error "internal bitmap cannot hold so many priority levels"
 #endif
 
 #define __MLQ_LONGS ((XNSCHED_MLQ_LEVELS+BITS_PER_LONG-1)/BITS_PER_LONG)
 
 struct xnsched_mlq {
-
        int loprio, hiprio, elems;
        unsigned long himap, lomap[__MLQ_LONGS];
-       struct xnqueue queue[XNSCHED_MLQ_LEVELS];
-
+       struct list_head heads[XNSCHED_MLQ_LEVELS];
 };
 
 #undef __MLQ_LONGS
 
-void initmlq(struct xnsched_mlq *q, int loprio, int hiprio);
+struct xnthread;
 
-void addmlq(struct xnsched_mlq *q,
-           struct xnpholder *holder, int idx, int lifo);
+void sched_initq(struct xnsched_mlq *q,
+                int loprio, int hiprio);
 
-void removemlq(struct xnsched_mlq *q, struct xnpholder *holder);
+void sched_insertqff(struct xnsched_mlq *q, 
+                    struct xnthread *thread);
 
-struct xnpholder *findmlqh(struct xnsched_mlq *q, int prio);
+void sched_insertqlf(struct xnsched_mlq *q,
+                    struct xnthread *thread);
 
-struct xnpholder *getheadmlq(struct xnsched_mlq *q);
+void sched_removeq(struct xnsched_mlq *q,
+                  struct xnthread *thread);
 
-struct xnpholder *getmlq(struct xnsched_mlq *q);
+struct xnthread *sched_getq(struct xnsched_mlq *q);
 
-struct xnpholder *nextmlq(struct xnsched_mlq *q,
-                         struct xnpholder *h);
-
-static inline int countmlq(struct xnsched_mlq *q)
-{
-       return q->elems;
-}
-
-static inline int emptymlq_p(struct xnsched_mlq *q)
+static inline int sched_emptyq_p(struct xnsched_mlq *q)
 {
        return q->himap == 0;
 }
 
-static inline int indexmlq(struct xnsched_mlq *q, int prio)
-{
-       XENO_ASSERT(QUEUES,
-                   prio >= q->loprio && prio <= q->hiprio,
-                   xnpod_fatal("priority level %d is out of range ", prio));
-       /*
-        * BIG FAT WARNING: We need to rescale the priority level to a
-        * 0-based range. We use ffnz() to scan the bitmap which MUST
-        * be based on a bit scan forward op. Therefore, the lower the
-        * index value, the higher the priority (since least
-        * significant bits will be found first when scanning the
-        * bitmaps).
-        */
-       return q->hiprio - prio;
-}
-
-static inline int ffsmlq(struct xnsched_mlq *q)
-{
-       int hi = ffnz(q->himap);
-       int lo = ffnz(q->lomap[hi]);
-       return hi * BITS_PER_LONG + lo; /* Result is undefined if none set. */
-}
-
-static inline void insertmlql(struct xnsched_mlq *q,
-                             struct xnpholder *holder, int prio)
-{
-       addmlq(q, holder, indexmlq(q, prio), 1);
-}
-
-static inline void insertmlqf(struct xnsched_mlq *q,
-                             struct xnpholder *holder, int prio)
-{
-       addmlq(q, holder, indexmlq(q, prio), 0);
-}
-
-static inline void appendmlq(struct xnsched_mlq *q, struct xnpholder *holder)
-{
-       addmlq(q, holder, indexmlq(q, q->hiprio), 0);
-}
-
-static inline void prependmlq(struct xnsched_mlq *q, struct xnpholder *holder)
-{
-       addmlq(q, holder, indexmlq(q, q->loprio), 1);
-}
-
 typedef struct xnsched_mlq xnsched_queue_t;
 
-#define sched_initpq           initmlq
-#define sched_emptypq_p                emptymlq_p
-#define sched_insertpql                insertmlql
-#define sched_insertpqf                insertmlqf
-#define sched_appendpq         appendmlq
-#define sched_prependpq                prependmlq
-#define sched_removepq         removemlq
-#define sched_getheadpq                getheadmlq
-#define sched_nextpq           nextmlq
-#define sched_getpq            getmlq
-#define sched_findpqh          findmlqh
-
 #else /* ! CONFIG_XENO_OPT_SCALABLE_SCHED */
 
-typedef xnpqueue_t xnsched_queue_t;
-
-#define sched_initpq(q, minp, maxp)    initpq(q)
-#define sched_emptypq_p                        emptypq_p
-#define sched_insertpql                        insertpql
-#define sched_insertpqf                        insertpqf
-#define sched_appendpq                 appendpq
-#define sched_prependpq                        prependpq
-#define sched_removepq                 removepq
-#define sched_getheadpq                        getheadpq
-#define sched_nextpq                   nextpq
-#define sched_getpq                    getpq
-#define sched_findpqh                  findpqh
+typedef struct list_head xnsched_queue_t;
+
+#define sched_initq(__q, __minp, __maxp)       INIT_LIST_HEAD(__q)
+#define sched_emptyq_p(__q)                    list_empty(__q)
+#define sched_insertqlf(__q, __t)              list_add_prilf(__t, __q, cprio, 
rlink)
+#define sched_insertqff(__q, __t)              list_add_priff(__t, __q, cprio, 
rlink)
+#define sched_removeq(__q, __t)                        list_del(&(__t)->rlink)
+#define sched_getq(__q)                                                        
        \
+       ({                                                                      
\
+               struct xnthread *__t = NULL;                                    
\
+               if (!list_empty(__q))                                           
\
+                       __t = list_get_entry(__q, struct xnthread, rlink);      
\
+               __t;                                                            
\
+       })
 
 #endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
 
+struct xnthread *sched_findq(xnsched_queue_t *q, int prio);
+
 #endif /* !_COBALT_KERNEL_SCHEDQUEUE_H */
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index 4cc8ad4..1adb32e 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -145,7 +145,7 @@ struct xnthread_user_window {
 
 #ifdef __KERNEL__
 
-#include <linux/sched.h>
+#include <cobalt/kernel/list.h>
 #include <cobalt/kernel/stat.h>
 #include <cobalt/kernel/timer.h>
 #include <cobalt/kernel/registry.h>
@@ -200,7 +200,7 @@ typedef struct xnthread {
 
 #ifdef CONFIG_XENO_OPT_SCHED_TP
        struct xnsched_tpslot *tps;     /* Current partition slot for TP 
scheduling */
-       struct xnholder tp_link;        /* Link in per-sched TP thread queue */
+       struct list_head tp_link;       /* Link in per-sched TP thread queue */
 #endif
 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
        struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
@@ -210,18 +210,22 @@ typedef struct xnthread {
 
        cpumask_t affinity;     /* Processor affinity. */
 
-       int bprio;                      /* Base priority (before PIP boost) */
+       int bprio;              /* Base priority (before PIP boost) */
 
-       int cprio;                      /* Current priority */
+       int cprio;              /* Current priority */
 
        /**
         * Weighted priority (cprio + scheduling class weight).
         */
        int wprio;
 
-       u_long schedlck;                /*!< Scheduler lock count. */
+       u_long schedlck;        /** Scheduler lock count. */
 
-       xnpholder_t rlink;              /* Thread holder in ready queue */
+       /**
+        * Thread holder in xnsched runnable queue. Prioritized by
+        * thread->cprio.
+        */
+       struct list_head rlink;
 
        /**
         * Thread holder in xnsynch pendq. Prioritized by
diff --git a/kernel/cobalt/sched-rt.c b/kernel/cobalt/sched-rt.c
index fe383a7..91a4b41 100644
--- a/kernel/cobalt/sched-rt.c
+++ b/kernel/cobalt/sched-rt.c
@@ -26,7 +26,8 @@
 
 static void xnsched_rt_init(struct xnsched *sched)
 {
-       sched_initpq(&sched->rt.runnable, XNSCHED_RT_MIN_PRIO, 
XNSCHED_RT_MAX_PRIO);
+       sched_initq(&sched->rt.runnable,
+                   XNSCHED_RT_MIN_PRIO, XNSCHED_RT_MAX_PRIO);
 }
 
 static void xnsched_rt_requeue(struct xnthread *thread)
@@ -59,9 +60,8 @@ static void xnsched_rt_rotate(struct xnsched *sched,
                              const union xnsched_policy_param *p)
 {
        struct xnthread *thread, *curr;
-       struct xnpholder *h;
 
-       if (sched_emptypq_p(&sched->rt.runnable))
+       if (sched_emptyq_p(&sched->rt.runnable))
                return; /* No runnable thread in this class. */
 
        curr = sched->curr;
@@ -69,10 +69,9 @@ static void xnsched_rt_rotate(struct xnsched *sched,
        if (p->rt.prio == XNSCHED_RUNPRIO)
                thread = curr;
        else {
-               h = sched_findpqh(&sched->rt.runnable, p->rt.prio);
-               if (h == NULL)
+               thread = sched_findq(&sched->rt.runnable, p->rt.prio);
+               if (thread == NULL)
                        return;
-               thread = link2thread(h, rlink);
        }
 
        /*
diff --git a/kernel/cobalt/sched-sporadic.c b/kernel/cobalt/sched-sporadic.c
index 8adb8f3..cc56c9f 100644
--- a/kernel/cobalt/sched-sporadic.c
+++ b/kernel/cobalt/sched-sporadic.c
@@ -349,16 +349,16 @@ static void xnsched_sporadic_requeue(struct xnthread 
*thread)
 static struct xnthread *xnsched_sporadic_pick(struct xnsched *sched)
 {
        struct xnthread *curr = sched->curr, *next;
-       struct xnpholder *h;
 
-       h = sched_getpq(&sched->rt.runnable);
-       next = h ? link2thread(h, rlink) : NULL;
+       next = sched_getq(&sched->rt.runnable);
+       if (next == NULL)
+               goto swap_budgets;
 
        if (curr == next)
                return next;
 
        /* Arm the drop timer for an incoming sporadic thread. */
-       if (next && next->pss)
+       if (next->pss)
                sporadic_resume_activity(next);
 
        /*
@@ -368,6 +368,7 @@ static struct xnthread *xnsched_sporadic_pick(struct 
xnsched *sched)
         * sporadic thread wants, so there is no replenishment
         * operation involved.
         */
+swap_budgets:
        if (curr->base_class != &xnsched_class_sporadic)
                return next;
 
diff --git a/kernel/cobalt/sched-tp.c b/kernel/cobalt/sched-tp.c
index 62cf734..8186c0a 100644
--- a/kernel/cobalt/sched-tp.c
+++ b/kernel/cobalt/sched-tp.c
@@ -96,14 +96,14 @@ static void xnsched_tp_init(struct xnsched *sched)
         * are valid RT priorities. TP is actually a subset of RT.
         */
        for (n = 0; n < CONFIG_XENO_OPT_SCHED_TP_NRPART; n++)
-               sched_initpq(&tp->partitions[n].runnable,
-                            XNSCHED_RT_MIN_PRIO, XNSCHED_RT_MAX_PRIO);
-       sched_initpq(&tp->idle.runnable,
-                    XNSCHED_RT_MIN_PRIO, XNSCHED_RT_MAX_PRIO);
+               sched_initq(&tp->partitions[n].runnable,
+                           XNSCHED_RT_MIN_PRIO, XNSCHED_RT_MAX_PRIO);
+       sched_initq(&tp->idle.runnable,
+                   XNSCHED_RT_MIN_PRIO, XNSCHED_RT_MAX_PRIO);
 
        tp->tps = NULL;
        tp->gps = NULL;
-       initq(&tp->threads);
+       INIT_LIST_HEAD(&tp->threads);
        xntimer_init_noblock(&tp->tf_timer, tp_tick_handler);
        xntimer_set_name(&tp->tf_timer, "tp-tick");
 }
@@ -165,45 +165,39 @@ static int xnsched_tp_declare(struct xnthread *thread,
            p->tp.prio > XNSCHED_RT_MAX_PRIO)
                return -EINVAL;
 
-       appendq(&sched->tp.threads, &thread->tp_link);
+       list_add_tail(&thread->tp_link, &sched->tp.threads);
 
        return 0;
 }
 
 static void xnsched_tp_forget(struct xnthread *thread)
 {
+       list_del(&thread->tp_link);
        thread->tps = NULL;
-       removeq(&thread->sched->tp.threads, &thread->tp_link);
 }
 
 static void xnsched_tp_enqueue(struct xnthread *thread)
 {
-       sched_insertpqf(&thread->tps->runnable,
-                       &thread->rlink, thread->cprio);
+       sched_insertqff(&thread->tps->runnable, thread);
 }
 
 static void xnsched_tp_dequeue(struct xnthread *thread)
 {
-       sched_removepq(&thread->tps->runnable, &thread->rlink);
+       sched_removeq(&thread->tps->runnable, thread);
 }
 
 static void xnsched_tp_requeue(struct xnthread *thread)
 {
-       sched_insertpql(&thread->tps->runnable,
-                       &thread->rlink, thread->cprio);
+       sched_insertqlf(&thread->tps->runnable, thread);
 }
 
 static struct xnthread *xnsched_tp_pick(struct xnsched *sched)
 {
-       struct xnpholder *h;
-
        /* Never pick a thread if we don't schedule partitions. */
        if (!xntimer_running_p(&sched->tp.tf_timer))
                return NULL;
 
-       h = sched_getpq(&sched->tp.tps->runnable);
-
-       return h ? link2thread(h, rlink) : NULL;
+       return sched_getq(&sched->tp.tps->runnable);
 }
 
 static void xnsched_tp_migrate(struct xnthread *thread, struct xnsched *sched)
@@ -246,8 +240,7 @@ xnsched_tp_set_schedule(struct xnsched *sched,
        struct xnsched_tp_schedule *old_gps;
        struct xnsched_tp *tp = &sched->tp;
        union xnsched_policy_param param;
-       struct xnthread *thread;
-       struct xnholder *h;
+       struct xnthread *thread, *tmp;
 
        XENO_BUGON(NUCLEUS, gps != NULL &&
                    (gps->pwin_nr <= 0 || gps->pwins[0].w_offset != 0));
@@ -258,12 +251,15 @@ xnsched_tp_set_schedule(struct xnsched *sched,
         * Move all TP threads on this scheduler to the RT class,
         * until we call xnsched_set_policy() for them again.
         */
-       while ((h = getq(&tp->threads)) != NULL) {
-               thread = link2thread(h, tp_link);
+       if (list_empty(&tp->threads))
+               goto done;
+
+       list_for_each_entry_safe(thread, tmp, &tp->threads, tp_link) {
+               list_del(&thread->tp_link);
                param.rt.prio = thread->cprio;
                xnsched_set_policy(thread, &xnsched_class_rt, &param);
        }
-
+done:
        old_gps = tp->gps;
        tp->gps = gps;
 
diff --git a/kernel/cobalt/sched-weak.c b/kernel/cobalt/sched-weak.c
index dc2e01a..21b1b0f 100644
--- a/kernel/cobalt/sched-weak.c
+++ b/kernel/cobalt/sched-weak.c
@@ -26,31 +26,28 @@
 
 static void xnsched_weak_init(struct xnsched *sched)
 {
-       sched_initpq(&sched->weak.runnable,
-                    XNSCHED_WEAK_MIN_PRIO, XNSCHED_WEAK_MAX_PRIO);
+       sched_initq(&sched->weak.runnable,
+                   XNSCHED_WEAK_MIN_PRIO, XNSCHED_WEAK_MAX_PRIO);
 }
 
 static void xnsched_weak_requeue(struct xnthread *thread)
 {
-       sched_insertpql(&thread->sched->weak.runnable,
-                       &thread->rlink, thread->cprio);
+       sched_insertqlf(&thread->sched->weak.runnable, thread);
 }
 
 static void xnsched_weak_enqueue(struct xnthread *thread)
 {
-       sched_insertpqf(&thread->sched->weak.runnable,
-                       &thread->rlink, thread->cprio);
+       sched_insertqff(&thread->sched->weak.runnable, thread);
 }
 
 static void xnsched_weak_dequeue(struct xnthread *thread)
 {
-       sched_removepq(&thread->sched->weak.runnable, &thread->rlink);
+       sched_removeq(&thread->sched->weak.runnable, thread);
 }
 
 static struct xnthread *xnsched_weak_pick(struct xnsched *sched)
 {
-       struct xnpholder *h = sched_getpq(&sched->weak.runnable);
-       return h ? link2thread(h, rlink) : NULL;
+       return sched_getq(&sched->weak.runnable);
 }
 
 void xnsched_weak_setparam(struct xnthread *thread,
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 6302ebf..808d461 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -463,7 +463,7 @@ void xnsched_migrate_passive(struct xnthread *thread, 
struct xnsched *sched)
 
 #ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
 
-void initmlq(struct xnsched_mlq *q, int loprio, int hiprio)
+void sched_initq(struct xnsched_mlq *q, int loprio, int hiprio)
 {
        int prio;
 
@@ -474,149 +474,144 @@ void initmlq(struct xnsched_mlq *q, int loprio, int 
hiprio)
        memset(&q->lomap, 0, sizeof(q->lomap));
 
        for (prio = 0; prio < XNSCHED_MLQ_LEVELS; prio++)
-               initq(&q->queue[prio]);
+               INIT_LIST_HEAD(q->heads + prio);
 
-       XENO_ASSERT(QUEUES,
+       XENO_ASSERT(NUCLEUS,
                    hiprio - loprio + 1 < XNSCHED_MLQ_LEVELS,
                    xnpod_fatal("priority range [%d..%d] is beyond multi-level "
                                "queue indexing capabilities",
                                loprio, hiprio));
 }
 
-void addmlq(struct xnsched_mlq *q,
-           struct xnpholder *h, int idx, int lifo)
+static inline int indexmlq(struct xnsched_mlq *q, int prio)
 {
-       struct xnqueue *queue = &q->queue[idx];
-       int hi = idx / BITS_PER_LONG;
-       int lo = idx % BITS_PER_LONG;
+       XENO_ASSERT(NUCLEUS,
+                   prio >= q->loprio && prio <= q->hiprio,
+                   xnpod_fatal("priority level %d is out of range ", prio));
+       /*
+        * BIG FAT WARNING: We need to rescale the priority level to a
+        * 0-based range. We use ffnz() to scan the bitmap which MUST
+        * be based on a bit scan forward op. Therefore, the lower the
+        * index value, the higher the priority (since least
+        * significant bits will be found first when scanning the
+        * bitmaps).
+        */
+       return q->hiprio - prio;
+}
 
-       if (lifo)
-               prependq(queue, &h->plink);
-       else
-               appendq(queue, &h->plink);
+static struct list_head *addmlq(struct xnsched_mlq *q, int prio)
+{
+       struct list_head *head;
+       int hi, lo, idx;
 
-       h->prio = idx;
+       idx = indexmlq(q, prio);
+       head = q->heads + idx;
        q->elems++;
-       __setbits(q->himap, 1UL << hi);
-       __setbits(q->lomap[hi], 1UL << lo);
+
+       /* New item is not linked yet. */
+       if (list_empty(head)) {
+               hi = idx / BITS_PER_LONG;
+               lo = idx % BITS_PER_LONG;
+               __setbits(q->himap, 1UL << hi);
+               __setbits(q->lomap[hi], 1UL << lo);
+       }
+
+       return head;
+}
+
+void sched_insertqlf(struct xnsched_mlq *q, struct xnthread *thread)
+{
+       struct list_head *head = addmlq(q, thread->cprio);
+       list_add(&thread->rlink, head);
+}
+
+void sched_insertqff(struct xnsched_mlq *q, struct xnthread *thread)
+{
+       struct list_head *head = addmlq(q, thread->cprio);
+       list_add_tail(&thread->rlink, head);
 }
 
-void removemlq(struct xnsched_mlq *q, struct xnpholder *h)
+static void removemlq(struct xnsched_mlq *q,
+                     struct list_head *entry, int idx)
 {
-       int idx = h->prio;
-       struct xnqueue *queue = &q->queue[idx];
+       struct list_head *head;
+       int hi, lo;
 
+       head = q->heads + idx;
+       list_del(entry);
        q->elems--;
 
-       removeq(queue, &h->plink);
-
-       if (emptyq_p(queue)) {
-               int hi = idx / BITS_PER_LONG;
-               int lo = idx % BITS_PER_LONG;
+       if (list_empty(head)) {
+               hi = idx / BITS_PER_LONG;
+               lo = idx % BITS_PER_LONG;
                __clrbits(q->lomap[hi], 1UL << lo);
                if (q->lomap[hi] == 0)
                        __clrbits(q->himap, 1UL << hi);
        }
 }
 
-struct xnpholder *findmlqh(struct xnsched_mlq *q, int prio)
+void sched_removeq(struct xnsched_mlq *q, struct xnthread *thread)
 {
-       struct xnqueue *queue = &q->queue[indexmlq(q, prio)];
-       return (struct xnpholder *)getheadq(queue);
+       removemlq(q, &thread->rlink, indexmlq(q, thread->cprio));
 }
 
-struct xnpholder *getheadmlq(struct xnsched_mlq *q)
+static inline int ffsmlq(struct xnsched_mlq *q)
 {
-       struct xnqueue *queue;
-       struct xnpholder *h;
-
-       if (emptymlq_p(q))
-               return NULL;
-
-       queue = &q->queue[ffsmlq(q)];
-       h = (struct xnpholder *)getheadq(queue);
-
-       XENO_ASSERT(QUEUES, h,
-                   xnpod_fatal
-                   ("corrupted multi-level queue, qslot=%p at %s:%d", q,
-                    __FILE__, __LINE__);
-               );
-
-       return h;
+       int hi = ffnz(q->himap);
+       int lo = ffnz(q->lomap[hi]);
+       return hi * BITS_PER_LONG + lo; /* Result is undefined if none set. */
 }
 
-struct xnpholder *getmlq(struct xnsched_mlq *q)
+struct xnthread *sched_getq(struct xnsched_mlq *q)
 {
-       struct xnqueue *queue;
-       struct xnholder *h;
-       int idx, hi, lo;
+       struct xnthread *thread;
+       struct list_head *head;
+       int idx;
 
-       if (emptymlq_p(q))
+       if (q->elems == 0)
                return NULL;
 
        idx = ffsmlq(q);
-       queue = &q->queue[idx];
-       h = getq(queue);
+       head = q->heads + idx;
+       XENO_BUGON(NUCLEUS, list_empty(head));
+       thread = list_first_entry(head, struct xnthread, rlink);
+       removemlq(q, &thread->rlink, idx);
 
-       XENO_ASSERT(QUEUES, h,
-                   xnpod_fatal
-                   ("corrupted multi-level queue, qslot=%p at %s:%d", q,
-                    __FILE__, __LINE__);
-           );
+       return thread;
+}
 
-       q->elems--;
+struct xnthread *sched_findq(struct xnsched_mlq *q, int prio)
+{
+       struct list_head *head;
+       int idx;
 
-       if (emptyq_p(queue)) {
-               hi = idx / BITS_PER_LONG;
-               lo = idx % BITS_PER_LONG;
-               __clrbits(q->lomap[hi], 1UL << lo);
-               if (q->lomap[hi] == 0)
-                       __clrbits(q->himap, 1UL << hi);
-       }
+       idx = indexmlq(q, prio);
+       head = q->heads + idx;
+       if (list_empty(head))
+               return NULL;
 
-       return (struct xnpholder *)h;
+       return list_first_entry(head, struct xnthread, rlink);
 }
 
-struct xnpholder *nextmlq(struct xnsched_mlq *q, struct xnpholder *h)
+#else /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+struct xnthread *sched_findq(struct list_head *q, int prio)
 {
-       unsigned long hibits, lobits;
-       int idx = h->prio, hi, lo;
-       struct xnqueue *queue;
-       struct xnholder *nh;
+       struct xnthread *thread;
 
-       hi = idx / BITS_PER_LONG;
-       lo = idx % BITS_PER_LONG;
-       lobits = q->lomap[hi] >> lo;
-       hibits = q->himap >> hi;
+       if (list_empty(q))
+               return NULL;
 
-       for (;;) {
-               queue = &q->queue[idx];
-               if (!emptyq_p(queue)) {
-                       nh = h ? nextq(queue, &h->plink) : getheadq(queue);
-                       if (nh)
-                               return (struct xnpholder *)nh;
-               }
-               for (;;) {
-                       lobits >>= 1;
-                       if (lobits == 0) {
-                               hibits >>= 1;
-                               if (hibits == 0)
-                                       return NULL;
-                               lobits = q->lomap[++hi];
-                               idx = hi * BITS_PER_LONG;
-                       } else
-                               idx++;
-                       if (lobits & 1) {
-                               h = NULL;
-                               break;
-                       }
-               }
+       /* Find thread leading a priority group. */
+       list_for_each_entry(thread, q, rlink) {
+               if (prio == thread->cprio)
+                       return thread;
        }
 
        return NULL;
 }
 
-#endif /* CONFIG_XENO_OPT_SCALABLE_SCHED */
+#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
 
 #ifdef CONFIG_XENO_OPT_VFILE
 
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 70400bb..48d0734 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -181,7 +181,6 @@ int xnthread_init(struct xnthread *thread,
        thread->cookie = NULL;
 
        inith(&thread->glink);
-       initph(&thread->rlink);
        thread->selector = NULL;
        INIT_LIST_HEAD(&thread->claimq);
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to