Module: xenomai-forge
Branch: next
Commit: 91a51f0f592cf97cff929eae8754b9e7af2a1f3f
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=91a51f0f592cf97cff929eae8754b9e7af2a1f3f

Author: Philippe Gerum <r...@xenomai.org>
Date:   Tue Jun 18 18:56:03 2013 +0200

cobalt/kernel/synch: turn pendq into regular kernel list

This is the first patch of a (long) series aimed at gradually
replacing xnqueue data structures with regular list_head
objects.

Legacy Xenomai queues and kernel lists will live in parallel, until
the rebase is complete, at which point include/cobalt/kernel/queue.h
will be dropped from the tree.

The motivation is to better comply with kernel standards and remove
all useless overhead and specifics about Xenomai queues/lists.

For instance, the element count maintained by the xnqueue object is
most often used only for testing for emptiness, which makes it
overkill. The few users of the actual item count should rather do
their own accounting instead.

---

 include/cobalt/kernel/Makefile.am |    1 +
 include/cobalt/kernel/list.h      |   39 +++++++
 include/cobalt/kernel/synch.h     |   29 ++++--
 include/cobalt/kernel/thread.h    |   11 ++-
 kernel/cobalt/pipe.c              |    2 +-
 kernel/cobalt/posix/cond.c        |    4 +-
 kernel/cobalt/posix/event.c       |   30 ++---
 kernel/cobalt/posix/monitor.c     |    3 +-
 kernel/cobalt/posix/sem.c         |   28 +++--
 kernel/cobalt/registry.c          |   27 ++---
 kernel/cobalt/synch.c             |  212 ++++++++++++++++---------------------
 kernel/cobalt/thread.c            |    1 -
 12 files changed, 201 insertions(+), 186 deletions(-)

diff --git a/include/cobalt/kernel/Makefile.am 
b/include/cobalt/kernel/Makefile.am
index 722c168..f70eeaf 100644
--- a/include/cobalt/kernel/Makefile.am
+++ b/include/cobalt/kernel/Makefile.am
@@ -8,6 +8,7 @@ includesub_HEADERS = \
        heap.h \
        hostrt.h \
        intr.h \
+       list.h \
        lock.h \
        map.h \
        pipe.h \
diff --git a/include/cobalt/kernel/list.h b/include/cobalt/kernel/list.h
new file mode 100644
index 0000000..bc573e8
--- /dev/null
+++ b/include/cobalt/kernel/list.h
@@ -0,0 +1,39 @@
+/*
+ * @note Copyright (C) 2013 Philippe Gerum <r...@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _COBALT_KERNEL_LIST_H
+#define _COBALT_KERNEL_LIST_H
+
+#include <linux/list.h>
+
+#define list_add_priff(__new, __head, __member_pri, __member_next)             
\
+do {                                                                           
\
+       typeof(*__new) *__pos;                                                  
\
+       if (list_empty(__head))                                                 
\
+               list_add(&(__new)->__member_next, __head);                      
\
+       else {                                                                  
\
+               list_for_each_entry_reverse(__pos, __head, __member_next) {     
\
+                       if ((__new)->__member_pri <= __pos->__member_pri)       
\
+                               break;                                          
\
+               }                                                               
\
+               list_add(&(__new)->__member_next, &__pos->__member_next);       
\
+               }                                                               
\
+} while (0)
+
+#endif /* !_COBALT_KERNEL_LIST_H_ */
diff --git a/include/cobalt/kernel/synch.h b/include/cobalt/kernel/synch.h
index 71d33a6..605f5ef 100644
--- a/include/cobalt/kernel/synch.h
+++ b/include/cobalt/kernel/synch.h
@@ -72,6 +72,7 @@ static inline int xnsynch_fast_release(atomic_long_t 
*fastlock,
 
 #ifdef __KERNEL__
 
+#include <cobalt/kernel/list.h>
 #include <cobalt/kernel/queue.h>
 
 #define XNSYNCH_CLAIMED 0x10   /* Claimed by other thread(s) w/ PIP */
@@ -100,11 +101,9 @@ typedef struct xnsynch {
 
     xnpholder_t link;  /* Link in claim queues */
 
-#define link2synch(ln)         container_of(ln, struct xnsynch, link)
-
     xnflags_t status;  /* Status word */
 
-    xnpqueue_t pendq;  /* Pending threads */
+    struct list_head pendq;    /* Pending threads */
 
     struct xnthread *owner; /* Thread which owns the resource */
 
@@ -117,10 +116,22 @@ typedef struct xnsynch {
 #define xnsynch_test_flags(synch,flags)        testbits((synch)->status,flags)
 #define xnsynch_set_flags(synch,flags) setbits((synch)->status,flags)
 #define xnsynch_clear_flags(synch,flags)       clrbits((synch)->status,flags)
-#define xnsynch_wait_queue(synch)              (&((synch)->pendq))
-#define xnsynch_nsleepers(synch)               countpq(&((synch)->pendq))
-#define xnsynch_pended_p(synch)                (!emptypq_p(&((synch)->pendq)))
-#define xnsynch_owner(synch)           ((synch)->owner)
+
+#define xnsynch_for_each_sleeper(__pos, __synch)               \
+       list_for_each_entry(__pos, &(__synch)->pendq, plink)
+
+#define xnsynch_for_each_sleeper_safe(__pos, __tmp, __synch)   \
+       list_for_each_entry_safe(__pos, __tmp, &(__synch)->pendq, plink)
+
+static inline int xnsynch_pended_p(struct xnsynch *synch)
+{
+       return !list_empty(&synch->pendq);
+}
+
+static inline struct xnthread *xnsynch_owner(struct xnsynch *synch)
+{
+       return synch->owner;
+}
 
 #define xnsynch_fastlock(synch)                ((synch)->fastlock)
 #define xnsynch_fastlock_p(synch)      ((synch)->fastlock != NULL)
@@ -182,8 +193,8 @@ struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch 
*synch);
 
 int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr);
 
-xnpholder_t *xnsynch_wakeup_this_sleeper(struct xnsynch *synch,
-                                        xnpholder_t *holder);
+void xnsynch_wakeup_this_sleeper(struct xnsynch *synch,
+                                struct xnthread *sleeper);
 
 xnflags_t xnsynch_acquire(struct xnsynch *synch,
                          xnticks_t timeout,
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index 379c72e..861f341 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -214,13 +214,20 @@ typedef struct xnthread {
 
        int cprio;                      /* Current priority */
 
-       int wprio;                      /* Weighted priority (dep. scheduling 
class) */
+       /**
+        * Weighted priority (cprio + scheduling class weight).
+        */
+       int wprio;
 
        u_long schedlck;                /*!< Scheduler lock count. */
 
        xnpholder_t rlink;              /* Thread holder in ready queue */
 
-       xnpholder_t plink;              /* Thread holder in synchronization 
queue(s) */
+       /**
+        * Thread holder in xnsynch pendq. Prioritized by
+        * thread->cprio + scheduling class weight.
+        */
+       struct list_head plink;
 
        xnholder_t glink;               /* Thread holder in global queue */
 
diff --git a/kernel/cobalt/pipe.c b/kernel/cobalt/pipe.c
index c97be37..145529b 100644
--- a/kernel/cobalt/pipe.c
+++ b/kernel/cobalt/pipe.c
@@ -700,7 +700,7 @@ static int xnpipe_release(struct inode *inode, struct file 
*file)
 
        if (testbits(state->status, XNPIPE_KERN_CONN)) {
                /* Unblock waiters. */
-               if (xnsynch_nsleepers(&state->synchbase) > 0) {
+               if (xnsynch_pended_p(&state->synchbase)) {
                        xnsynch_flush(&state->synchbase, XNRMID);
                        xnpod_schedule();
                }
diff --git a/kernel/cobalt/posix/cond.c b/kernel/cobalt/posix/cond.c
index 3578476..0b436d8 100644
--- a/kernel/cobalt/posix/cond.c
+++ b/kernel/cobalt/posix/cond.c
@@ -222,7 +222,7 @@ static inline int pthread_cond_destroy(struct __shadow_cond 
*cnd)
                return -EPERM;
        }
 
-       if (xnsynch_nsleepers(&cond->synchbase) || cond->mutex) {
+       if (xnsynch_pended_p(&cond->synchbase) || cond->mutex) {
                xnlock_put_irqrestore(&nklock, s);
                return -EBUSY;
        }
@@ -330,7 +330,7 @@ static inline int cobalt_cond_timedwait_epilogue(xnthread_t 
*cur,
 
        /* Unbind mutex and cond, if no other thread is waiting, if the job was
           not already done. */
-       if (!xnsynch_nsleepers(&cond->synchbase)
+       if (!xnsynch_pended_p(&cond->synchbase)
            && cond->mutex == mutex) {
                cond->mutex = NULL;
                removeq(&mutex->conds, &cond->mutex_link);
diff --git a/kernel/cobalt/posix/event.c b/kernel/cobalt/posix/event.c
index 1c59265..f48ef7b 100644
--- a/kernel/cobalt/posix/event.c
+++ b/kernel/cobalt/posix/event.c
@@ -150,7 +150,6 @@ int cobalt_event_wait(struct cobalt_event_shadow __user 
*u_evtsh,
                goto out;
        }
 
-       datp->nwaiters = xnsynch_nsleepers(&event->synch) + 1;
        setbits(datp->flags, COBALT_EVENT_PENDED);
        rbits = datp->value & bits;
        testval = mode & COBALT_EVENT_ANY ? rbits : datp->value;
@@ -165,21 +164,20 @@ int cobalt_event_wait(struct cobalt_event_shadow __user 
*u_evtsh,
        ewc.value = bits;
        ewc.mode = mode;
        xnthread_prepare_wait(&ewc.wc);
-
+       datp->nwaiters++;
        info = xnsynch_sleep_on(&event->synch, timeout, tmode);
        xnthread_finish_wait(&ewc.wc, NULL);
+
        if (info & XNRMID) {
                ret = -EIDRM;
                goto out;
        }
-       if (info & XNBREAK)
-               ret = -EINTR;
-       else if (info & XNTIMEO)
-               ret = -ETIMEDOUT;
-       else
+       if (info & (XNBREAK|XNTIMEO)) {
+               datp->nwaiters--;
+               ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
+       } else
                rbits = ewc.value;
 done:
-       datp->nwaiters = xnsynch_nsleepers(&event->synch);
        if (!xnsynch_pended_p(&event->synch))
                clrbits(datp->flags, COBALT_EVENT_PENDED);
 out:
@@ -199,9 +197,7 @@ int cobalt_event_sync(struct cobalt_event_shadow __user 
*u_evtsh)
        struct xnthread_wait_context *wc;
        struct cobalt_event_data *datp;
        struct event_wait_context *ewc;
-       struct xnpholder *h, *nh;
-       struct xnpqueue *waitq;
-       struct xnthread *p;
+       struct xnthread *p, *tmp;
        int ret = 0;
        spl_t s;
 
@@ -222,19 +218,17 @@ int cobalt_event_sync(struct cobalt_event_shadow __user 
*u_evtsh)
         */
        datp = event->data;
        bits = datp->value;
-       waitq = xnsynch_wait_queue(&event->synch);
-       nh = getheadpq(waitq);
-       while ((h = nh) != NULL) {
-               p = link2thread(h, plink);
+
+       xnsynch_for_each_sleeper_safe(p, tmp, &event->synch) {
                wc = xnthread_get_wait_context(p);
                ewc = container_of(wc, struct event_wait_context, wc);
                waitval = ewc->value & bits;
                testval = ewc->mode & COBALT_EVENT_ANY ? waitval : ewc->value;
                if (waitval && waitval == testval) {
+                       datp->nwaiters--;
                        ewc->value = waitval;
-                       nh = xnsynch_wakeup_this_sleeper(&event->synch, h);
-               } else
-                       nh = nextpq(waitq, h);
+                       xnsynch_wakeup_this_sleeper(&event->synch, p);
+               }
        }
 
        xnpod_schedule();
diff --git a/kernel/cobalt/posix/monitor.c b/kernel/cobalt/posix/monitor.c
index 780c02d..bdb4709 100644
--- a/kernel/cobalt/posix/monitor.c
+++ b/kernel/cobalt/posix/monitor.c
@@ -200,8 +200,7 @@ static void cobalt_monitor_wakeup(struct cobalt_monitor 
*mon)
                 */
                if (bcast ||
                    (p->u_window->grant_value && p->wchan == 
&tid->monitor_synch)) {
-                       xnsynch_wakeup_this_sleeper(&tid->monitor_synch,
-                                                   &p->plink);
+                       xnsynch_wakeup_this_sleeper(&tid->monitor_synch, p);
                        removeq(&mon->waiters, &tid->monitor_link);
                        tid->monitor_queued = 0;
                }
diff --git a/kernel/cobalt/posix/sem.c b/kernel/cobalt/posix/sem.c
index 616e102..b756fed 100644
--- a/kernel/cobalt/posix/sem.c
+++ b/kernel/cobalt/posix/sem.c
@@ -46,6 +46,7 @@ typedef struct cobalt_sem {
        xnholder_t link;        /* Link in semq */
        unsigned int value;
        int flags;
+       int nwaiters;
        struct cobalt_kqueues *owningq;
 } cobalt_sem_t;
 
@@ -113,6 +114,7 @@ static int sem_init_inner(cobalt_sem_t *sem, int flags, 
unsigned int value)
        xnsynch_init(&sem->synchbase, sflags, NULL);
        sem->value = value;
        sem->flags = flags;
+       sem->nwaiters = 0;
        sem->owningq = cobalt_kqueues(pshared);
 
        return 0;
@@ -129,7 +131,7 @@ static int do_sem_init(struct __shadow_sem *sm, int flags, 
unsigned int value)
        if ((flags & SEM_PULSE) != 0 && value > 0)
                return -EINVAL;
 
-       sem = (cobalt_sem_t *)xnmalloc(sizeof(*sem));
+       sem = xnmalloc(sizeof(*sem));
        if (sem == NULL)
                return -ENOSPC;
 
@@ -540,6 +542,8 @@ sem_timedwait_internal(cobalt_sem_t *sem, int timed, 
xnticks_t to)
        if (ret != -EAGAIN)
                return ret;
 
+       sem->nwaiters++;
+
        if (timed) {
                tmode = sem->flags & SEM_RAWCLOCK ? XN_ABSOLUTE : XN_REALTIME;
                info = xnsynch_sleep_on(&sem->synchbase, to, tmode);
@@ -549,11 +553,10 @@ sem_timedwait_internal(cobalt_sem_t *sem, int timed, 
xnticks_t to)
        if (info & XNRMID)
                return -EINVAL;
 
-       if (info & XNBREAK)
-               return -EINTR;
-
-       if (info & XNTIMEO)
-               return -ETIMEDOUT;
+       if (info & (XNBREAK|XNTIMEO)) {
+               sem->nwaiters--;
+               return (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
+       }
 
        return 0;
 }
@@ -666,14 +669,17 @@ int sem_post_inner(cobalt_sem_t *sem, struct 
cobalt_kqueues *ownq, int bcast)
                return -EINVAL;
 
        if (!bcast) {
-               if (xnsynch_wakeup_one_sleeper(&sem->synchbase) != NULL)
+               if (xnsynch_wakeup_one_sleeper(&sem->synchbase)) {
+                       sem->nwaiters--;
                        xnpod_schedule();
-               else if ((sem->flags & SEM_PULSE) == 0)
+               } else if ((sem->flags & SEM_PULSE) == 0)
                        ++sem->value;
        } else {
                sem->value = 0;
-               if (xnsynch_flush(&sem->synchbase, 0) == XNSYNCH_RESCHED)
+               if (xnsynch_flush(&sem->synchbase, 0) == XNSYNCH_RESCHED) {
+                       sem->nwaiters = 0;
                        xnpod_schedule();
+               }
        }
 
        return 0;
@@ -751,18 +757,16 @@ int sem_getvalue(cobalt_sem_t *sem, int *value)
 
        if (sem->magic != COBALT_SEM_MAGIC) {
                xnlock_put_irqrestore(&nklock, s);
-
                return -EINVAL;
        }
 
        if (sem->owningq != sem_kqueue(sem)) {
                xnlock_put_irqrestore(&nklock, s);
-
                return -EPERM;
        }
 
        if (sem->value == 0 && (sem->flags & SEM_REPORT) != 0)
-               *value = -xnsynch_nsleepers(&sem->synchbase);
+               *value = -sem->nwaiters;
        else
                *value = sem->value;
 
diff --git a/kernel/cobalt/registry.c b/kernel/cobalt/registry.c
index 44e0a03..c07b7c9 100644
--- a/kernel/cobalt/registry.c
+++ b/kernel/cobalt/registry.c
@@ -569,34 +569,25 @@ static struct xnobject *registry_hash_find(const char 
*key)
 
        for (ecurr = registry_hash_table[registry_hash_crunch(key)];
             ecurr != NULL; ecurr = ecurr->hnext) {
-               if (!strcmp(key, ecurr->key))
+               if (strcmp(key, ecurr->key) == 0)
                        return ecurr;
        }
 
        return NULL;
 }
 
-static inline unsigned registry_wakeup_sleepers(const char *key)
+static inline int registry_wakeup_sleepers(const char *key)
 {
-       xnpholder_t *holder, *nholder;
-       unsigned cnt = 0;
-
-       nholder = getheadpq(xnsynch_wait_queue(&registry_hash_synch));
-
-       while ((holder = nholder) != NULL) {
-               xnthread_t *sleeper = link2thread(holder, plink);
+       struct xnthread *sleeper, *tmp;
+       int cnt = 0;
 
+       xnsynch_for_each_sleeper_safe(sleeper, tmp, &registry_hash_synch) {
                if (*key == *sleeper->registry.waitkey &&
-                   !strcmp(key, sleeper->registry.waitkey)) {
+                   strcmp(key, sleeper->registry.waitkey) == 0) {
                        sleeper->registry.waitkey = NULL;
-                       nholder =
-                           xnsynch_wakeup_this_sleeper(&registry_hash_synch,
-                                                       holder);
+                       xnsynch_wakeup_this_sleeper(&registry_hash_synch, 
sleeper);
                        ++cnt;
-               } else
-                       nholder =
-                           nextpq(xnsynch_wait_queue(&registry_hash_synch),
-                                  holder);
+               }
        }
 
        return cnt;
@@ -1159,7 +1150,7 @@ u_long xnregistry_put(xnhandle_t handle)
 
        if ((newlock = object->safelock) > 0 &&
            (newlock = --object->safelock) == 0 &&
-           xnsynch_nsleepers(&object->safesynch) > 0) {
+           xnsynch_pended_p(&object->safesynch)) {
                xnsynch_flush(&object->safesynch, 0);
                xnpod_schedule();
        }
diff --git a/kernel/cobalt/synch.c b/kernel/cobalt/synch.c
index a964e8a..383359d 100644
--- a/kernel/cobalt/synch.c
+++ b/kernel/cobalt/synch.c
@@ -107,15 +107,14 @@ void xnsynch_init(struct xnsynch *synch, xnflags_t flags, 
atomic_long_t *fastloc
        synch->status = flags & ~XNSYNCH_CLAIMED;
        synch->owner = NULL;
        synch->cleanup = NULL;  /* Only works for PIP-enabled objects. */
-       if ((flags & XNSYNCH_OWNER)) {
-               if (fastlock) {
-                       synch->fastlock = fastlock;
-                       atomic_long_set(fastlock, XN_NO_HANDLE);
-               } else
-                       BUG();
+       INIT_LIST_HEAD(&synch->pendq);
+
+       if (flags & XNSYNCH_OWNER) {
+               BUG_ON(fastlock == NULL);
+               synch->fastlock = fastlock;
+               atomic_long_set(fastlock, XN_NO_HANDLE);
        } else
                synch->fastlock = NULL;
-       initpq(&synch->pendq);
 }
 EXPORT_SYMBOL_GPL(xnsynch_init);
 
@@ -178,9 +177,9 @@ xnflags_t xnsynch_sleep_on(struct xnsynch *synch, xnticks_t 
timeout,
                   thread, xnthread_name(thread), synch);
 
        if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
-               appendpq(&synch->pendq, &thread->plink);
+               list_add_tail(&thread->plink, &synch->pendq);
        else /* i.e. priority-sorted */
-               insertpqf(&synch->pendq, &thread->plink, thread->wprio);
+               list_add_priff(thread, &synch->pendq, wprio, plink);
 
        xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
 
@@ -192,7 +191,7 @@ EXPORT_SYMBOL_GPL(xnsynch_sleep_on);
 
 /*!
  * \fn struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch);
- * \brief Give the resource ownership to the next waiting thread.
+ * \brief Unblock the heading thread from wait.
  *
  * This service wakes up the thread which is currently leading the
  * synchronization object's pending list. The sleeping thread is
@@ -208,15 +207,6 @@ EXPORT_SYMBOL_GPL(xnsynch_sleep_on);
  *
  * @return The descriptor address of the unblocked thread.
  *
- * Side-effects:
- *
- * - The effective priority of the previous resource owner might be
- * lowered to its base priority value as a consequence of the priority
- * inheritance boost being cleared.
- *
- * - The synchronization object ownership is transfered to the
- * unblocked thread.
- *
  * Environments:
  *
  * This service can be called from:
@@ -231,24 +221,26 @@ EXPORT_SYMBOL_GPL(xnsynch_sleep_on);
 
 struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch)
 {
-       struct xnthread *thread = NULL;
-       struct xnpholder *holder;
+       struct xnthread *thread;
        spl_t s;
 
        XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));
 
        xnlock_get_irqsave(&nklock, s);
 
-       holder = getpq(&synch->pendq);
-       if (holder) {
-               thread = link2thread(holder, plink);
-               thread->wchan = NULL;
-               trace_mark(xn_nucleus, synch_wakeup_one,
-                          "thread %p thread_name %s synch %p",
-                          thread, xnthread_name(thread), synch);
-               xnpod_resume_thread(thread, XNPEND);
+       if (list_empty(&synch->pendq)) {
+               thread = NULL;
+               goto out;
        }
 
+       thread = list_first_entry(&synch->pendq, struct xnthread, plink);
+       list_del(&thread->plink);
+       thread->wchan = NULL;
+       trace_mark(xn_nucleus, synch_wakeup_one,
+                  "thread %p thread_name %s synch %p",
+                  thread, xnthread_name(thread), synch);
+       xnpod_resume_thread(thread, XNPEND);
+out:
        xnlock_put_irqrestore(&nklock, s);
 
        return thread;
@@ -257,38 +249,37 @@ EXPORT_SYMBOL_GPL(xnsynch_wakeup_one_sleeper);
 
 int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr)
 {
-       struct xnpholder *holder;
-       struct xnthread *thread;
-       int sleepers, i;
+       struct xnthread *thread, *tmp;
+       int nwakeups = 0;
        spl_t s;
 
        XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));
 
        xnlock_get_irqsave(&nklock, s);
 
-       sleepers = xnsynch_nsleepers(synch);
-       if (nr > sleepers)
-               nr = sleepers;
-       for (i = 0; i < nr; i++) {
-               holder = getpq(&synch->pendq);
+       if (list_empty(&synch->pendq))
+               goto out;
 
-               thread = link2thread(holder, plink);
+       list_for_each_entry_safe(thread, tmp, &synch->pendq, plink) {
+               if (nwakeups++ >= nr)
+                       break;
+               list_del(&thread->plink);
                thread->wchan = NULL;
                trace_mark(xn_nucleus, synch_wakeup_many,
                           "thread %p thread_name %s synch %p",
                           thread, xnthread_name(thread), synch);
                xnpod_resume_thread(thread, XNPEND);
        }
-
+out:
        xnlock_put_irqrestore(&nklock, s);
 
-       return nr;
+       return nwakeups;
 }
 EXPORT_SYMBOL_GPL(xnsynch_wakeup_many_sleepers);
 
 /*!
  * \fn void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct 
xnpholder *holder);
- * \brief Give the resource ownership to a given waiting thread.
+ * \brief Unblock a particular thread from wait.
  *
  * This service wakes up a specific thread which is currently pending on
  * the given synchronization object. The sleeping thread is unblocked
@@ -302,21 +293,8 @@ EXPORT_SYMBOL_GPL(xnsynch_wakeup_many_sleepers);
  * @param synch The descriptor address of the synchronization object
  * whose ownership is changed.
  *
- * @param holder The link holder address of the thread to unblock
- * (&thread->plink) which MUST be currently linked to the
- * synchronization object's pending queue (i.e. synch->pendq).
- *
- * @return The link address of the unblocked thread in the
- * synchronization object's pending queue.
- *
- * Side-effects:
- *
- * - The effective priority of the previous resource owner might be
- * lowered to its base priority value as a consequence of the priority
- * inheritance boost being cleared.
- *
- * - The synchronization object ownership is transfered to the
- * unblocked thread.
+ * @param sleeper The thread to unblock which MUST be currently linked
+ * to the synchronization object's pending queue (i.e. synch->pendq).
  *
  * Environments:
  *
@@ -330,27 +308,22 @@ EXPORT_SYMBOL_GPL(xnsynch_wakeup_many_sleepers);
  * Rescheduling: never.
  */
 
-struct xnpholder *xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct 
xnpholder *holder)
+void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread 
*sleeper)
 {
-       struct xnthread *thread;
-       struct xnpholder *nholder;
        spl_t s;
 
        XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));
 
        xnlock_get_irqsave(&nklock, s);
 
-       nholder = poppq(&synch->pendq, holder);
-       thread = link2thread(holder, plink);
-       thread->wchan = NULL;
+       list_del(&sleeper->plink);
+       sleeper->wchan = NULL;
        trace_mark(xn_nucleus, synch_wakeup_this,
                   "thread %p thread_name %s synch %p",
-                  thread, xnthread_name(thread), synch);
-       xnpod_resume_thread(thread, XNPEND);
+                  sleeper, xnthread_name(sleeper), synch);
+       xnpod_resume_thread(sleeper, XNPEND);
 
        xnlock_put_irqrestore(&nklock, s);
-
-       return nholder;
 }
 EXPORT_SYMBOL_GPL(xnsynch_wakeup_this_sleeper);
 
@@ -488,7 +461,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
        xnsynch_detect_relaxed_owner(synch, thread);
 
        if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
-               appendpq(&synch->pendq, &thread->plink);
+               list_add_tail(&thread->plink, &synch->pendq);
        else if (thread->wprio > owner->wprio) {
                if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == 
synch) {
                        /* Ownership is still pending, steal the resource. */
@@ -498,7 +471,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                        goto grab_and_exit;
                }
 
-               insertpqf(&synch->pendq, &thread->plink, thread->wprio);
+               list_add_priff(thread, &synch->pendq, wprio, plink);
 
                if (testbits(synch->status, XNSYNCH_PIP)) {
                        if (!xnthread_test_state(owner, XNBOOST)) {
@@ -515,7 +488,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                        xnsynch_renice_thread(owner, thread);
                }
        } else
-               insertpqf(&synch->pendq, &thread->plink, thread->wprio);
+               list_add_priff(thread, &synch->pendq, wprio, plink);
 
        xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
 
@@ -622,7 +595,6 @@ static void xnsynch_clear_boost(struct xnsynch *synch,
 {
        struct xnthread *target;
        struct xnsynch *hsynch;
-       struct xnpholder *h;
        int wprio;
 
        removepq(&owner->claimq, &synch->link);
@@ -634,10 +606,9 @@ static void xnsynch_clear_boost(struct xnsynch *synch,
                target = owner;
        } else {
                /* Find the highest priority needed to enforce the PIP. */
-               hsynch = link2synch(getheadpq(&owner->claimq));
-               h = getheadpq(&hsynch->pendq);
-               XENO_BUGON(NUCLEUS, h == NULL);
-               target = link2thread(h, plink);
+               hsynch = container_of(getheadpq(&owner->claimq), struct 
xnsynch, link);
+               XENO_BUGON(NUCLEUS, list_empty(&hsynch->pendq));
+               target = list_first_entry(&hsynch->pendq, struct xnthread, 
plink);
                if (target->wprio > wprio)
                        wprio = target->wprio;
                else
@@ -670,8 +641,8 @@ void xnsynch_requeue_sleeper(struct xnthread *thread)
        if (!testbits(synch->status, XNSYNCH_PRIO))
                return;
 
-       removepq(&synch->pendq, &thread->plink);
-       insertpqf(&synch->pendq, &thread->plink, thread->wprio);
+       list_del(&thread->plink);
+       list_add_priff(thread, &synch->pendq, wprio, plink);
        owner = synch->owner;
 
        if (owner != NULL && thread->wprio > owner->wprio) {
@@ -723,41 +694,40 @@ EXPORT_SYMBOL_GPL(__xnsynch_fixup_rescnt);
 struct xnthread *__xnsynch_transfer_ownership(struct xnsynch *synch,
                                              struct xnthread *lastowner)
 {
-       struct xnthread *newowner;
-       struct xnpholder *holder;
+       struct xnthread *nextowner;
+       xnhandle_t nextownerh;
        atomic_long_t *lockp;
-       xnhandle_t newownerh;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
 
        lockp = xnsynch_fastlock(synch);
 
-       if (emptypq_p(&synch->pendq)) {
+       if (list_empty(&synch->pendq)) {
                synch->owner = NULL;
                atomic_long_set(lockp, XN_NO_HANDLE);
                xnlock_put_irqrestore(&nklock, s);
                return NULL;
        }
 
-       holder = getpq(&synch->pendq);
-       newowner = link2thread(holder, plink);
-       newowner->wchan = NULL;
-       newowner->wwake = synch;
-       synch->owner = newowner;
-       xnthread_set_info(newowner, XNWAKEN);
-       xnpod_resume_thread(newowner, XNPEND);
+       nextowner = list_first_entry(&synch->pendq, struct xnthread, plink);
+       list_del(&nextowner->plink);
+       nextowner->wchan = NULL;
+       nextowner->wwake = synch;
+       synch->owner = nextowner;
+       xnthread_set_info(nextowner, XNWAKEN);
+       xnpod_resume_thread(nextowner, XNPEND);
 
        if (testbits(synch->status, XNSYNCH_CLAIMED))
                xnsynch_clear_boost(synch, lastowner);
 
-       newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
-                                            xnsynch_pended_p(synch));
-       atomic_long_set(lockp, newownerh);
+       nextownerh = xnsynch_fast_set_claimed(xnthread_handle(nextowner),
+                                             xnsynch_pended_p(synch));
+       atomic_long_set(lockp, nextownerh);
 
        xnlock_put_irqrestore(&nklock, s);
 
-       return newowner;
+       return nextowner;
 }
 EXPORT_SYMBOL_GPL(__xnsynch_transfer_ownership);
 
@@ -785,16 +755,18 @@ EXPORT_SYMBOL_GPL(__xnsynch_transfer_ownership);
  */
 struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch)
 {
-       struct xnthread *thread = NULL;
-       struct xnpholder *holder;
+       struct xnthread *thread;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
 
-       holder = getheadpq(&synch->pendq);
-       if (holder)
-               thread = link2thread(holder, plink);
+       if (list_empty(&synch->pendq)) {
+               thread = NULL;
+               goto out;
+       }
 
+       thread = list_first_entry(&synch->pendq, struct xnthread, plink);
+out:
        xnlock_put_irqrestore(&nklock, s);
 
        return thread;
@@ -852,8 +824,8 @@ EXPORT_SYMBOL_GPL(xnsynch_peek_pendq);
 
 int xnsynch_flush(struct xnsynch *synch, xnflags_t reason)
 {
-       struct xnpholder *holder;
-       int status;
+       struct xnthread *sleeper, *tmp;
+       int ret;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
@@ -861,23 +833,24 @@ int xnsynch_flush(struct xnsynch *synch, xnflags_t reason)
        trace_mark(xn_nucleus, synch_flush, "synch %p reason %lu",
                   synch, reason);
 
-       status = emptypq_p(&synch->pendq) ? XNSYNCH_DONE : XNSYNCH_RESCHED;
-
-       while ((holder = getpq(&synch->pendq)) != NULL) {
-               struct xnthread *sleeper = link2thread(holder, plink);
-               xnthread_set_info(sleeper, reason);
-               sleeper->wchan = NULL;
-               xnpod_resume_thread(sleeper, XNPEND);
-       }
-
-       if (testbits(synch->status, XNSYNCH_CLAIMED)) {
-               xnsynch_clear_boost(synch, synch->owner);
-               status = XNSYNCH_RESCHED;
+       if (list_empty(&synch->pendq)) {
+               XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_CLAIMED));
+               ret = XNSYNCH_DONE;
+       } else {
+               ret = XNSYNCH_RESCHED;
+               list_for_each_entry_safe(sleeper, tmp, &synch->pendq, plink) {
+                       list_del(&sleeper->plink);
+                       xnthread_set_info(sleeper, reason);
+                       sleeper->wchan = NULL;
+                       xnpod_resume_thread(sleeper, XNPEND);
+               }
+               if (testbits(synch->status, XNSYNCH_CLAIMED))
+                       xnsynch_clear_boost(synch, synch->owner);
        }
 
        xnlock_put_irqrestore(&nklock, s);
 
-       return status;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(xnsynch_flush);
 
@@ -912,17 +885,16 @@ void xnsynch_forget_sleeper(struct xnthread *thread)
 
        xnthread_clear_state(thread, XNPEND);
        thread->wchan = NULL;
-       removepq(&synch->pendq, &thread->plink);
+       list_del(&thread->plink);
 
        if (testbits(synch->status, XNSYNCH_CLAIMED)) {
                /* Find the highest priority needed to enforce the PIP. */
                owner = synch->owner;
-
-               if (emptypq_p(&synch->pendq))
+               if (list_empty(&synch->pendq))
                        /* No more sleepers: clear the boost. */
                        xnsynch_clear_boost(synch, owner);
                else {
-                       target = link2thread(getheadpq(&synch->pendq), plink);
+                       target = list_first_entry(&synch->pendq, struct 
xnthread, plink);
                        h = getheadpq(&owner->claimq);
                        if (target->wprio != h->prio) {
                                /*
@@ -969,7 +941,7 @@ void xnsynch_release_all_ownerships(struct xnthread *thread)
                 * Since xnsynch_release() alters the claim queue, we
                 * need to be conservative while scanning it.
                 */
-               synch = link2synch(holder);
+               synch = container_of(holder, struct xnsynch, link);
                nholder = nextpq(&thread->claimq, holder);
                xnsynch_release(synch, thread);
                if (synch->cleanup)
@@ -1006,16 +978,14 @@ void xnsynch_detect_relaxed_owner(struct xnsynch *synch, 
struct xnthread *sleepe
  */
 void xnsynch_detect_claimed_relax(struct xnthread *owner)
 {
-       struct xnpholder *hs, *ht;
        struct xnthread *sleeper;
        struct xnsynch *synch;
+       struct xnpholder *hs;
 
        for (hs = getheadpq(&owner->claimq); hs != NULL;
             hs = nextpq(&owner->claimq, hs)) {
-               synch = link2synch(hs);
-               for (ht = getheadpq(&synch->pendq); ht != NULL;
-                    ht = nextpq(&synch->pendq, ht)) {
-                       sleeper = link2thread(ht, plink);
+               synch = container_of(hs, struct xnsynch, link);
+               list_for_each_entry(sleeper, &synch->pendq, plink) {
                        if (xnthread_test_state(sleeper, XNTRAPSW)) {
                                xnthread_set_info(sleeper, XNSWREP);
                                xnshadow_send_sig(sleeper, SIGDEBUG,
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index b13ae76..6b8b3b1 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -182,7 +182,6 @@ int xnthread_init(struct xnthread *thread,
 
        inith(&thread->glink);
        initph(&thread->rlink);
-       initph(&thread->plink);
        thread->selector = NULL;
        initpq(&thread->claimq);
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to