Module: xenomai-forge
Branch: next
Commit: 008de1d6573327f75042c2a3326b845b435faac2
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=008de1d6573327f75042c2a3326b845b435faac2

Author: Philippe Gerum <r...@xenomai.org>
Date:   Mon May 27 16:05:55 2013 +0200

cobalt/nucleus: introduce SCHED_WEAK scheduling class

This code enables support for binding threads from the Linux
SCHED_FIFO/RR scheduling classes to the Xenomai domain as members of
the SCHED_WEAK class, with up to 100 priority levels from [0..99]
included.  When enabled, SCHED_WEAK is the low priority class of the
Xenomai system, providing no real-time guarantee.

Members from the SCHED_WEAK class are weakly scheduled by Xenomai,
only for the purpose of synchronizing with real-time threads from
other scheduling classes.  However, they cannot compete for CPU
resources with real-time threads, and leave the primary domain upon
return from Xenomai syscalls automatically (*).

This feature is an extension of Xenomai's special handling of
SCHED_OTHER threads, to the SCHED_FIFO/RR POSIX classes from a regular
Linux kernel. If disabled, SCHED_WEAK is interpreted as an alias to
SCHED_OTHER by the Xenomai scheduler, restricted to priority
0. Conversely, SCHED_OTHER threads are eventually members of Xenomai's
SCHED_WEAK class at priority 0, when this feature is enabled.

(*) Like with SCHED_OTHER, Xenomai assumes no real-time requirement
for SCHED_WEAK threads. Therefore, they are automatically moved back
to secondary mode upon return from a Xenomai syscall, unless they hold
a mutex, which would defer the transition until the mutex is released.

---

 include/cobalt/nucleus/sched-idle.h    |    4 +-
 include/cobalt/nucleus/sched-rt.h      |   28 ++--
 include/cobalt/nucleus/sched-weak.h    |   55 +++++++++
 include/cobalt/nucleus/sched.h         |   17 +--
 include/cobalt/nucleus/schedparam.h    |    7 +
 include/cobalt/nucleus/schedqueue.h    |    2 +
 include/cobalt/nucleus/thread.h        |    4 +-
 include/cobalt/sched.h                 |    5 +-
 include/rtdm/rtdm_driver.h             |    4 +-
 kernel/cobalt/internal.h               |    3 -
 kernel/cobalt/monitor.c                |    2 +-
 kernel/cobalt/mutex.c                  |    2 +-
 kernel/cobalt/nucleus/Kconfig          |   54 ++++++++-
 kernel/cobalt/nucleus/Makefile         |    1 +
 kernel/cobalt/nucleus/sched-rt.c       |    2 +-
 kernel/cobalt/nucleus/sched-sporadic.c |    4 +-
 kernel/cobalt/nucleus/sched-tp.c       |    4 +-
 kernel/cobalt/nucleus/sched-weak.c     |  208 ++++++++++++++++++++++++++++++++
 kernel/cobalt/nucleus/sched.c          |    3 +
 kernel/cobalt/nucleus/shadow.c         |   15 +--
 kernel/cobalt/nucleus/synch.c          |    4 +-
 kernel/cobalt/thread.c                 |  101 ++++++++++------
 lib/cobalt/internal.c                  |    6 +-
 lib/cobalt/mutex.c                     |    8 +-
 lib/cobalt/thread.c                    |    6 +-
 testsuite/unit/mutex-torture.c         |   10 +-
 26 files changed, 447 insertions(+), 112 deletions(-)

diff --git a/include/cobalt/nucleus/sched-idle.h 
b/include/cobalt/nucleus/sched-idle.h
index 25c546e..cbdcd2a 100644
--- a/include/cobalt/nucleus/sched-idle.h
+++ b/include/cobalt/nucleus/sched-idle.h
@@ -34,12 +34,10 @@
 
 extern struct xnsched_class xnsched_class_idle;
 
-#define xnsched_class_default xnsched_class_rt
-
 static inline void __xnsched_idle_setparam(struct xnthread *thread,
                                           const union xnsched_policy_param *p)
 {
-       xnthread_clear_state(thread, XNOTHER);
+       xnthread_clear_state(thread, XNWEAK);
        thread->cprio = p->idle.prio;
 }
 
diff --git a/include/cobalt/nucleus/sched-rt.h 
b/include/cobalt/nucleus/sched-rt.h
index 1bb9e85..048a2f7 100644
--- a/include/cobalt/nucleus/sched-rt.h
+++ b/include/cobalt/nucleus/sched-rt.h
@@ -27,20 +27,20 @@
 #error "please don't include nucleus/sched-rt.h directly"
 #endif
 
-/* Priority scale for the RT scheduling class. */
+/*
+ * Global priority scale for Xenomai's RT scheduling class, available
+ * to SCHED_COBALT members.
+ */
 #define XNSCHED_RT_MIN_PRIO    0
 #define XNSCHED_RT_MAX_PRIO    257
 #define XNSCHED_RT_NR_PRIO     (XNSCHED_RT_MAX_PRIO - XNSCHED_RT_MIN_PRIO + 1)
 
 /*
- * Builtin priorities shared by SCHED_FIFO over Cobalt and RTDM, only
- * use a sub-range of the available priority levels from the RT
- * scheduling class, in order to exhibit a 1:1 mapping with Linux's
- * SCHED_FIFO ascending priority scale [1..99].
+ * Common POSIX priority range for SCHED_FIFO, and all other classes
+ * except SCHED_COBALT.
  */
-#define XNSCHED_LOW_PRIO       0
-#define XNSCHED_HIGH_PRIO      99
-#define XNSCHED_IRQ_PRIO       XNSCHED_RT_MAX_PRIO /* For IRQ servers. */
+#define XNSCHED_FIFO_MIN_PRIO  1
+#define XNSCHED_FIFO_MAX_PRIO  99
 
 #ifdef __KERNEL__
 
@@ -52,10 +52,6 @@
 
 extern struct xnsched_class xnsched_class_rt;
 
-extern struct xnsched_class xnsched_class_idle;
-
-#define xnsched_class_default xnsched_class_rt
-
 static inline void __xnsched_rt_requeue(struct xnthread *thread)
 {
        sched_insertpql(&thread->sched->rt.runnable,
@@ -84,10 +80,14 @@ static inline void __xnsched_rt_setparam(struct xnthread 
*thread,
 {
        thread->cprio = p->rt.prio;
        if (!xnthread_test_state(thread, XNBOOST)) {
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+               xnthread_clear_state(thread, XNWEAK);
+#else
                if (thread->cprio)
-                       xnthread_clear_state(thread, XNOTHER);
+                       xnthread_clear_state(thread, XNWEAK);
                else
-                       xnthread_set_state(thread, XNOTHER);
+                       xnthread_set_state(thread, XNWEAK);
+#endif
        }
 }
 
diff --git a/include/cobalt/nucleus/sched-weak.h 
b/include/cobalt/nucleus/sched-weak.h
new file mode 100644
index 0000000..8d65c58
--- /dev/null
+++ b/include/cobalt/nucleus/sched-weak.h
@@ -0,0 +1,55 @@
+/*!@file sched-weak.h
+ * @brief Definitions for the weak scheduling class.
+ * @author Philippe Gerum
+ *
+ * Copyright (C) 2013 Philippe Gerum <r...@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _XENO_NUCLEUS_SCHED_WEAK_H
+#define _XENO_NUCLEUS_SCHED_WEAK_H
+
+#ifndef _XENO_NUCLEUS_SCHED_H
+#error "please don't include nucleus/sched-weak.h directly"
+#endif
+
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+
+#define XNSCHED_WEAK_MIN_PRIO  0
+#define XNSCHED_WEAK_MAX_PRIO  99
+#define XNSCHED_WEAK_NR_PRIO   (XNSCHED_WEAK_MAX_PRIO - XNSCHED_WEAK_MIN_PRIO 
+ 1)
+
+#if XNSCHED_WEAK_NR_PRIO > XNSCHED_CLASS_MAX_PRIO ||   \
+       (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&     \
+        XNSCHED_WEAK_NR_PRIO > XNSCHED_MLQ_LEVELS)
+#error "WEAK class has too many priority levels"
+#endif
+
+extern struct xnsched_class xnsched_class_weak;
+
+struct xnsched_weak {
+       xnsched_queue_t runnable;       /*!< Runnable thread queue. */
+};
+
+static inline int xnsched_weak_init_thread(struct xnthread *thread)
+{
+       return 0;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_WEAK */
+
+#endif /* !_XENO_NUCLEUS_SCHED_WEAK_H */
diff --git a/include/cobalt/nucleus/sched.h b/include/cobalt/nucleus/sched.h
index ae3cc9e..4c1aa19 100644
--- a/include/cobalt/nucleus/sched.h
+++ b/include/cobalt/nucleus/sched.h
@@ -34,6 +34,7 @@
 
 #include <nucleus/schedqueue.h>
 #include <nucleus/sched-tp.h>
+#include <nucleus/sched-weak.h>
 #include <nucleus/sched-sporadic.h>
 #include <nucleus/vfile.h>
 
@@ -64,36 +65,33 @@ typedef struct xnsched {
        int cpu;
        struct xnthread *curr;          /*!< Current thread. */
 #ifdef CONFIG_SMP
-       cpumask_t resched;      /*!< Mask of CPUs needing rescheduling. */
+       cpumask_t resched;              /*!< Mask of CPUs needing rescheduling. 
*/
 #endif
-
        struct xnsched_rt rt;           /*!< Context of built-in real-time 
class. */
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+       struct xnsched_weak weak;       /*!< Context of weak scheduling class. 
*/
+#endif
 #ifdef CONFIG_XENO_OPT_SCHED_TP
        struct xnsched_tp tp;           /*!< Context of TP class. */
 #endif
 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
        struct xnsched_sporadic pss;    /*!< Context of sporadic scheduling 
class. */
 #endif
-
        xntimerq_t timerqueue;          /* !< Core timer queue. */
        volatile unsigned inesting;     /*!< Interrupt nesting level. */
        struct xntimer htimer;          /*!< Host timer. */
        struct xnthread *zombie;
        struct xnthread rootcb;         /*!< Root thread control block. */
-
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
        struct xnthread *last;
 #endif
-
 #ifdef CONFIG_XENO_HW_FPU
        struct xnthread *fpuholder;     /*!< Thread owning the current FPU 
context. */
 #endif
-
 #ifdef CONFIG_XENO_OPT_WATCHDOG
-       struct xntimer wdtimer; /*!< Watchdog timer object. */
-       int wdcount;            /*!< Watchdog tick count. */
+       struct xntimer wdtimer;         /*!< Watchdog timer object. */
+       int wdcount;                    /*!< Watchdog tick count. */
 #endif
-
 #ifdef CONFIG_XENO_OPT_STATS
        xnticks_t last_account_switch;  /*!< Last account switch date (ticks). 
*/
        xnstat_exectime_t *current_account;     /*!< Currently active account */
@@ -134,7 +132,6 @@ struct xnsched_class {
        const char *name;
 };
 
-#define XNSCHED_CLASS_MAX_PRIO         1024
 #define XNSCHED_CLASS_WEIGHT(n)                (n * XNSCHED_CLASS_MAX_PRIO)
 
 /* Placeholder for current thread priority */
diff --git a/include/cobalt/nucleus/schedparam.h 
b/include/cobalt/nucleus/schedparam.h
index f53352b..93c1c8e 100644
--- a/include/cobalt/nucleus/schedparam.h
+++ b/include/cobalt/nucleus/schedparam.h
@@ -24,6 +24,10 @@ struct xnsched_idle_param {
        int prio;
 };
 
+struct xnsched_weak_param {
+       int prio;
+};
+
 struct xnsched_rt_param {
        int prio;
 };
@@ -45,6 +49,9 @@ struct xnsched_sporadic_param {
 union xnsched_policy_param {
        struct xnsched_idle_param idle;
        struct xnsched_rt_param rt;
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+       struct xnsched_weak_param weak;
+#endif
 #ifdef CONFIG_XENO_OPT_SCHED_TP
        struct xnsched_tp_param tp;
 #endif
diff --git a/include/cobalt/nucleus/schedqueue.h 
b/include/cobalt/nucleus/schedqueue.h
index 63f895b..7dacf4c 100644
--- a/include/cobalt/nucleus/schedqueue.h
+++ b/include/cobalt/nucleus/schedqueue.h
@@ -22,6 +22,8 @@
 
 #include <nucleus/queue.h>
 
+#define XNSCHED_CLASS_MAX_PRIO         1024
+
 #ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
 /*
  * Multi-level priority queue, suitable for handling the runnable
diff --git a/include/cobalt/nucleus/thread.h b/include/cobalt/nucleus/thread.h
index fa672a4..c68a914 100644
--- a/include/cobalt/nucleus/thread.h
+++ b/include/cobalt/nucleus/thread.h
@@ -52,7 +52,7 @@
 #define XNTRAPSW  0x00020000 /**< Trap execution mode switches */
 #define XNFPU     0x00040000 /**< Thread uses FPU */
 #define XNROOT    0x00080000 /**< Root thread (that is, Linux/IDLE) */
-#define XNOTHER   0x00100000 /**< Non real-time shadow (prio=0) */
+#define XNWEAK    0x00100000 /**< Non real-time shadow (from the WEAK class) */
 #define XNUSER    0x00200000 /**< Shadow thread running in userland */
 
 /*! @} */ /* Ends doxygen comment group: nucleus_state_flags */
@@ -401,7 +401,7 @@ xnsynch_release(struct xnsynch *synch, struct xnthread 
*thread)
 
        trace_mark(xn_nucleus, synch_release, "synch %p", synch);
 
-       if (unlikely(xnthread_test_state(thread, XNOTHER)))
+       if (unlikely(xnthread_test_state(thread, XNWEAK)))
                __xnsynch_fixup_rescnt(thread);
 
        lockp = xnsynch_fastlock(synch);
diff --git a/include/cobalt/sched.h b/include/cobalt/sched.h
index b277cc1..b66f42b 100644
--- a/include/cobalt/sched.h
+++ b/include/cobalt/sched.h
@@ -48,6 +48,9 @@ int __sched_cpucount(size_t __setsize, const cpu_set_t 
*__setp);
 #ifndef __sched_extensions_defined
 #define __sched_extensions_defined
 
+#define SCHED_COBALT           42
+#define SCHED_WEAK             43
+
 #ifndef SCHED_SPORADIC
 #define SCHED_SPORADIC         10
 #define sched_ss_low_priority  sched_u.ss.__sched_low_priority
@@ -56,8 +59,6 @@ int __sched_cpucount(size_t __setsize, const cpu_set_t 
*__setp);
 #define sched_ss_max_repl      sched_u.ss.__sched_max_repl
 #endif /* !SCHED_SPORADIC */
 
-#define SCHED_COBALT           42
-
 #define sched_rr_quantum       sched_u.rr.__sched_rr_quantum
 
 struct __sched_ss_param {
diff --git a/include/rtdm/rtdm_driver.h b/include/rtdm/rtdm_driver.h
index f60081e..d1fbc2d 100644
--- a/include/rtdm/rtdm_driver.h
+++ b/include/rtdm/rtdm_driver.h
@@ -1063,8 +1063,8 @@ typedef void (*rtdm_task_proc_t)(void *arg);
  * @anchor rtdmtaskprio @name Task Priority Range
  * Maximum and minimum task priorities
  * @{ */
-#define RTDM_TASK_LOWEST_PRIORITY      XNSCHED_LOW_PRIO
-#define RTDM_TASK_HIGHEST_PRIORITY     XNSCHED_HIGH_PRIO
+#define RTDM_TASK_LOWEST_PRIORITY      0
+#define RTDM_TASK_HIGHEST_PRIORITY     XNSCHED_FIFO_MAX_PRIO
 /** @} Task Priority Range */
 
 /**
diff --git a/kernel/cobalt/internal.h b/kernel/cobalt/internal.h
index 71fab21..bb006d4 100644
--- a/kernel/cobalt/internal.h
+++ b/kernel/cobalt/internal.h
@@ -58,9 +58,6 @@
 #define COBALT_EVENT_MAGIC       COBALT_MAGIC(0F)
 #define COBALT_MONITOR_MAGIC     COBALT_MAGIC(10)
 
-#define COBALT_MIN_PRIORITY      XNSCHED_LOW_PRIO
-#define COBALT_MAX_PRIORITY      XNSCHED_HIGH_PRIO
-
 #define ONE_BILLION             1000000000
 
 #define cobalt_obj_active(h,m,t)                       \
diff --git a/kernel/cobalt/monitor.c b/kernel/cobalt/monitor.c
index 3582927..1ccbe98 100644
--- a/kernel/cobalt/monitor.c
+++ b/kernel/cobalt/monitor.c
@@ -125,7 +125,7 @@ static int cobalt_monitor_enter_inner(struct cobalt_monitor 
*mon)
        ret = xnsynch_fast_acquire(mon->gate.fastlock, xnthread_handle(cur));
        switch(ret) {
        case 0:
-               if (xnthread_test_state(cur, XNOTHER))
+               if (xnthread_test_state(cur, XNWEAK))
                        xnthread_inc_rescnt(cur);
                break;
        default:
diff --git a/kernel/cobalt/mutex.c b/kernel/cobalt/mutex.c
index fa6a89a..59d67e5 100644
--- a/kernel/cobalt/mutex.c
+++ b/kernel/cobalt/mutex.c
@@ -317,7 +317,7 @@ int cobalt_mutex_trylock(struct __shadow_mutex __user *u_mx)
                                   xnthread_handle(cur));
        switch(err) {
        case 0:
-               if (xnthread_test_state(cur, XNOTHER))
+               if (xnthread_test_state(cur, XNWEAK))
                        xnthread_inc_rescnt(cur);
                break;
 
diff --git a/kernel/cobalt/nucleus/Kconfig b/kernel/cobalt/nucleus/Kconfig
index 400797a..d541c46 100644
--- a/kernel/cobalt/nucleus/Kconfig
+++ b/kernel/cobalt/nucleus/Kconfig
@@ -10,14 +10,60 @@ config XENO_OPT_SCHED_CLASSES
        when and how to select a new thread to run.
 
        The Xenomai core has a built-in real-time class, which
-       supports both preemptive fixed- priority FIFO scheduling, and
-       round-robin scheduling.
+       supports both preemptive fixed-priority FIFO, and round-robin
+       scheduling.
 
-       Enabling this option will allow you to select additional
-       scheduling classes to compile in.
+       When CONFIG_XENO_OPT_SCHED_WEAK is disabled, Xenomai's
+       real-time class also supports threads from the SCHED_OTHER
+       class at FIFO priority level 0. Such threads can access
+       Xenomai resources, wait on Xenomai synchronization objects,
+       without competing for the CPU with members of other scheduling
+       classes.
+
+       NOTE: Xenomai assumes no real-time requirement for SCHED_OTHER
+       threads. Therefore, they are automatically moved back to
+       secondary mode upon return from a Xenomai syscall, unless they
+       hold a mutex, which would defer the transition until the mutex
+       is released.
+
+       Enabling CONFIG_XENO_OPT_SCHED_CLASSES will allow you to
+       select additional scheduling classes to compile in.
+
+       If in doubt, say N.
+
+config XENO_OPT_SCHED_WEAK
+       bool "Weak scheduling class"
+       default n
+       depends on XENO_OPT_SCHED_CLASSES
+       help
+
+       This option enables support for binding threads from the Linux
+       SCHED_FIFO/RR scheduling classes to the Xenomai domain as
+       members of the SCHED_WEAK class, with up to 100 priority
+       levels from [0..99] included.  When enabled, SCHED_WEAK is the
+       low priority class of the Xenomai system, providing no
+       real-time guarantee.
+
+       Members from the SCHED_WEAK class are weakly scheduled by
+       Xenomai, only for the purpose of synchronizing with real-time
+       threads from other scheduling classes.  However, they cannot
+       compete for CPU resources with real-time threads, and leave
+       the primary domain upon return from Xenomai syscalls
+       automatically (*).
+
+       This feature is an extension of Xenomai's special handling of
+       SCHED_OTHER threads, to the SCHED_FIFO/RR POSIX classes from a
+       regular Linux kernel. If disabled, SCHED_WEAK is interpreted
+       as an alias to SCHED_OTHER by the Xenomai scheduler,
+       restricted to priority 0. Conversely, SCHED_OTHER threads are
+       eventually members of Xenomai's SCHED_WEAK class at priority
+       0, when this feature is enabled.
 
        If in doubt, say N.
 
+       (*) With a special exception when a mutex is held. See NOTE
+       from CONFIG_XENO_OPT_SCHED_CLASSES.
+
 config XENO_OPT_SCHED_TP
        bool "Temporal partitioning"
        default n
diff --git a/kernel/cobalt/nucleus/Makefile b/kernel/cobalt/nucleus/Makefile
index 8f526db..fbbefc1 100644
--- a/kernel/cobalt/nucleus/Makefile
+++ b/kernel/cobalt/nucleus/Makefile
@@ -6,6 +6,7 @@ xeno_nucleus-y := \
         registry.o synch.o thread.o clock.o timer.o shadow.o \
        sched.o sched-idle.o sched-rt.o select.o
 
+xeno_nucleus-$(CONFIG_XENO_OPT_SCHED_WEAK) += sched-weak.o
 xeno_nucleus-$(CONFIG_XENO_OPT_SCHED_SPORADIC) += sched-sporadic.o
 xeno_nucleus-$(CONFIG_XENO_OPT_SCHED_TP) += sched-tp.o
 
diff --git a/kernel/cobalt/nucleus/sched-rt.c b/kernel/cobalt/nucleus/sched-rt.c
index 7cd3dce..68bfb1d 100644
--- a/kernel/cobalt/nucleus/sched-rt.c
+++ b/kernel/cobalt/nucleus/sched-rt.c
@@ -253,7 +253,7 @@ struct xnsched_class xnsched_class_rt = {
        .sched_init_vfile       =       xnsched_rt_init_vfile,
        .sched_cleanup_vfile    =       xnsched_rt_cleanup_vfile,
 #endif
-       .weight                 =       XNSCHED_CLASS_WEIGHT(3),
+       .weight                 =       XNSCHED_CLASS_WEIGHT(4),
        .name                   =       "rt"
 };
 EXPORT_SYMBOL_GPL(xnsched_class_rt);
diff --git a/kernel/cobalt/nucleus/sched-sporadic.c 
b/kernel/cobalt/nucleus/sched-sporadic.c
index b8639f6..375a866 100644
--- a/kernel/cobalt/nucleus/sched-sporadic.c
+++ b/kernel/cobalt/nucleus/sched-sporadic.c
@@ -258,7 +258,7 @@ static void xnsched_sporadic_setparam(struct xnthread 
*thread,
                }
        }
 
-       xnthread_clear_state(thread, XNOTHER);
+       xnthread_clear_state(thread, XNWEAK);
        thread->cprio = p->pss.current_prio;
 }
 
@@ -532,7 +532,7 @@ struct xnsched_class xnsched_class_sporadic = {
        .sched_init_vfile       =       xnsched_sporadic_init_vfile,
        .sched_cleanup_vfile    =       xnsched_sporadic_cleanup_vfile,
 #endif
-       .weight                 =       XNSCHED_CLASS_WEIGHT(2),
+       .weight                 =       XNSCHED_CLASS_WEIGHT(3),
        .name                   =       "pss"
 };
 EXPORT_SYMBOL_GPL(xnsched_class_sporadic);
diff --git a/kernel/cobalt/nucleus/sched-tp.c b/kernel/cobalt/nucleus/sched-tp.c
index ed0de47..6277354 100644
--- a/kernel/cobalt/nucleus/sched-tp.c
+++ b/kernel/cobalt/nucleus/sched-tp.c
@@ -113,7 +113,7 @@ static void xnsched_tp_setparam(struct xnthread *thread,
 {
        struct xnsched *sched = thread->sched;
 
-       xnthread_clear_state(thread, XNOTHER);
+       xnthread_clear_state(thread, XNWEAK);
        thread->tps = &sched->tp.partitions[p->tp.ptid];
        thread->cprio = p->tp.prio;
 }
@@ -406,7 +406,7 @@ struct xnsched_class xnsched_class_tp = {
        .sched_init_vfile       =       xnsched_tp_init_vfile,
        .sched_cleanup_vfile    =       xnsched_tp_cleanup_vfile,
 #endif
-       .weight                 =       XNSCHED_CLASS_WEIGHT(1),
+       .weight                 =       XNSCHED_CLASS_WEIGHT(2),
        .name                   =       "tp"
 };
 EXPORT_SYMBOL_GPL(xnsched_class_tp);
diff --git a/kernel/cobalt/nucleus/sched-weak.c 
b/kernel/cobalt/nucleus/sched-weak.c
new file mode 100644
index 0000000..d7f47d2
--- /dev/null
+++ b/kernel/cobalt/nucleus/sched-weak.c
@@ -0,0 +1,208 @@
+/*!@file sched-weak.c
+ * @author Philippe Gerum
+ * @brief WEAK class implementation (non-RT userland shadows)
+ *
+ * Copyright (C) 2013 Philippe Gerum <r...@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ * \ingroup sched
+ */
+
+#include <nucleus/pod.h>
+
+static void xnsched_weak_init(struct xnsched *sched)
+{
+       sched_initpq(&sched->weak.runnable,
+                    XNSCHED_WEAK_MIN_PRIO, XNSCHED_WEAK_MAX_PRIO);
+}
+
+static void xnsched_weak_requeue(struct xnthread *thread)
+{
+       sched_insertpql(&thread->sched->weak.runnable,
+                       &thread->rlink, thread->cprio);
+}
+
+static void xnsched_weak_enqueue(struct xnthread *thread)
+{
+       sched_insertpqf(&thread->sched->weak.runnable,
+                       &thread->rlink, thread->cprio);
+}
+
+static void xnsched_weak_dequeue(struct xnthread *thread)
+{
+       sched_removepq(&thread->sched->weak.runnable, &thread->rlink);
+}
+
+static struct xnthread *xnsched_weak_pick(struct xnsched *sched)
+{
+       struct xnpholder *h = sched_getpq(&sched->weak.runnable);
+       return h ? link2thread(h, rlink) : NULL;
+}
+
+void xnsched_weak_setparam(struct xnthread *thread,
+                          const union xnsched_policy_param *p)
+{
+       thread->cprio = p->weak.prio;
+       if (!xnthread_test_state(thread, XNBOOST))
+               xnthread_set_state(thread, XNWEAK);
+}
+
+void xnsched_weak_getparam(struct xnthread *thread,
+                          union xnsched_policy_param *p)
+{
+       p->weak.prio = thread->cprio;
+}
+
+void xnsched_weak_trackprio(struct xnthread *thread,
+                           const union xnsched_policy_param *p)
+{
+       if (p)
+               xnsched_weak_setparam(thread, p);
+       else
+               thread->cprio = thread->bprio;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_weak_vfroot;
+
+struct vfile_sched_weak_priv {
+       struct xnholder *curr;
+};
+
+struct vfile_sched_weak_data {
+       int cpu;
+       pid_t pid;
+       char name[XNOBJECT_NAME_LEN];
+       int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_weak_ops;
+
+static struct xnvfile_snapshot vfile_sched_weak = {
+       .privsz = sizeof(struct vfile_sched_weak_priv),
+       .datasz = sizeof(struct vfile_sched_weak_data),
+       .tag = &nkpod_struct.threadlist_tag,
+       .ops = &vfile_sched_weak_ops,
+};
+
+static int vfile_sched_weak_rewind(struct xnvfile_snapshot_iterator *it)
+{
+       struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it);
+       int nrthreads = xnsched_class_weak.nthreads;
+
+       if (nrthreads == 0)
+               return -ESRCH;
+
+       priv->curr = getheadq(&nkpod->threadq);
+
+       return nrthreads;
+}
+
+static int vfile_sched_weak_next(struct xnvfile_snapshot_iterator *it,
+                                void *data)
+{
+       struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it);
+       struct vfile_sched_weak_data *p = data;
+       struct xnthread *thread;
+
+       if (priv->curr == NULL)
+               return 0;       /* All done. */
+
+       thread = link2thread(priv->curr, glink);
+       priv->curr = nextq(&nkpod->threadq, priv->curr);
+
+       if (thread->base_class != &xnsched_class_weak)
+               return VFILE_SEQ_SKIP;
+
+       p->cpu = xnsched_cpu(thread->sched);
+       p->pid = xnthread_host_pid(thread);
+       memcpy(p->name, thread->name, sizeof(p->name));
+       p->cprio = thread->cprio;
+
+       return 1;
+}
+
+static int vfile_sched_weak_show(struct xnvfile_snapshot_iterator *it,
+                                void *data)
+{
+       struct vfile_sched_weak_data *p = data;
+       char pribuf[16];
+
+       if (p == NULL)
+               xnvfile_printf(it, "%-3s  %-6s %-4s %s\n",
+                              "CPU", "PID", "PRI", "NAME");
+       else {
+               snprintf(pribuf, sizeof(pribuf), "%3d", p->cprio);
+               xnvfile_printf(it, "%3u  %-6d %-4s %s\n",
+                              p->cpu,
+                              p->pid,
+                              pribuf,
+                              p->name);
+       }
+
+       return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_weak_ops = {
+       .rewind = vfile_sched_weak_rewind,
+       .next = vfile_sched_weak_next,
+       .show = vfile_sched_weak_show,
+};
+
+static int xnsched_weak_init_vfile(struct xnsched_class *schedclass,
+                                  struct xnvfile_directory *vfroot)
+{
+       int ret;
+
+       ret = xnvfile_init_dir(schedclass->name, &sched_weak_vfroot, vfroot);
+       if (ret)
+               return ret;
+
+       return xnvfile_init_snapshot("threads", &vfile_sched_weak,
+                                    &sched_weak_vfroot);
+}
+
+static void xnsched_weak_cleanup_vfile(struct xnsched_class *schedclass)
+{
+       xnvfile_destroy_snapshot(&vfile_sched_weak);
+       xnvfile_destroy_dir(&sched_weak_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_weak = {
+       .sched_init             =       xnsched_weak_init,
+       .sched_enqueue          =       xnsched_weak_enqueue,
+       .sched_dequeue          =       xnsched_weak_dequeue,
+       .sched_requeue          =       xnsched_weak_requeue,
+       .sched_pick             =       xnsched_weak_pick,
+       .sched_tick             =       NULL,
+       .sched_rotate           =       NULL,
+       .sched_forget           =       NULL,
+       .sched_declare          =       NULL,
+       .sched_setparam         =       xnsched_weak_setparam,
+       .sched_trackprio        =       xnsched_weak_trackprio,
+       .sched_getparam         =       xnsched_weak_getparam,
+#ifdef CONFIG_XENO_OPT_VFILE
+       .sched_init_vfile       =       xnsched_weak_init_vfile,
+       .sched_cleanup_vfile    =       xnsched_weak_cleanup_vfile,
+#endif
+       .weight                 =       XNSCHED_CLASS_WEIGHT(1),
+       .name                   =       "weak"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_weak);
diff --git a/kernel/cobalt/nucleus/sched.c b/kernel/cobalt/nucleus/sched.c
index 8a085aa..2dc686b 100644
--- a/kernel/cobalt/nucleus/sched.c
+++ b/kernel/cobalt/nucleus/sched.c
@@ -53,6 +53,9 @@ static void xnsched_register_class(struct xnsched_class 
*sched_class)
 void xnsched_register_classes(void)
 {
        xnsched_register_class(&xnsched_class_idle);
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+       xnsched_register_class(&xnsched_class_weak);
+#endif
 #ifdef CONFIG_XENO_OPT_SCHED_TP
        xnsched_register_class(&xnsched_class_tp);
 #endif
diff --git a/kernel/cobalt/nucleus/shadow.c b/kernel/cobalt/nucleus/shadow.c
index 0a043c6..9beb718 100644
--- a/kernel/cobalt/nucleus/shadow.c
+++ b/kernel/cobalt/nucleus/shadow.c
@@ -857,11 +857,6 @@ int xnshadow_map_user(struct xnthread *thread,
        xnarch_atomic_inc(&sys_ppd->refcnt);
        ipipe_enable_notifier(current);
 
-       if (xnthread_base_priority(thread) == 0 &&
-           current->policy == SCHED_NORMAL)
-               /* Non real-time shadow. */
-               xnthread_set_state(thread, XNOTHER);
-
        attr.mode = 0;
        attr.affinity = affinity;
        attr.entry = NULL;
@@ -988,10 +983,6 @@ int xnshadow_map_kernel(struct xnthread *thread, struct 
completion *done)
        xndebug_shadow_init(thread);
        ipipe_enable_notifier(p);
 
-       if (xnthread_base_priority(thread) == 0 &&
-           p->policy == SCHED_NORMAL)
-               xnthread_set_state(thread, XNOTHER);
-
        /*
         * CAUTION: Soon after xnpod_init_thread() has returned,
         * xnpod_start_thread() is commonly invoked from the root
@@ -1948,7 +1939,7 @@ done:
                    xnthread_test_info(thread, XNKICKED)) {
                        sigs = 1;
                        request_syscall_restart(thread, regs, sysflags);
-               } else if (xnthread_test_state(thread, XNOTHER) &&
+               } else if (xnthread_test_state(thread, XNWEAK) &&
                           xnthread_get_rescnt(thread) == 0) {
                        if (switched)
                                switched = 0;
@@ -2028,7 +2019,7 @@ static int handle_root_syscall(struct ipipe_domain *ipd, 
struct pt_regs *regs)
 
        /*
         * Catch cancellation requests pending for user shadows
-        * running mostly in secondary mode, i.e. XNOTHER. In that
+        * running mostly in secondary mode, i.e. XNWEAK. In that
         * case, we won't run request_syscall_restart() that
         * frequently, so check for cancellation here.
         */
@@ -2110,7 +2101,7 @@ restart:
                if (signal_pending(current)) {
                        sigs = 1;
                        request_syscall_restart(thread, regs, sysflags);
-               } else if (xnthread_test_state(thread, XNOTHER) &&
+               } else if (xnthread_test_state(thread, XNWEAK) &&
                           xnthread_get_rescnt(thread) == 0)
                        sysflags |= __xn_exec_switchback;
        }
diff --git a/kernel/cobalt/nucleus/synch.c b/kernel/cobalt/nucleus/synch.c
index 21ca9aa..1e142fc 100644
--- a/kernel/cobalt/nucleus/synch.c
+++ b/kernel/cobalt/nucleus/synch.c
@@ -440,7 +440,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
        fastlock = xnarch_atomic_cmpxchg(lockp, XN_NO_HANDLE, threadh);
 
        if (likely(fastlock == XN_NO_HANDLE)) {
-               if (xnthread_test_state(thread, XNOTHER))
+               if (xnthread_test_state(thread, XNWEAK))
                        xnthread_inc_rescnt(thread);
                xnthread_clear_info(thread,
                                    XNRMID | XNTIMEO | XNBREAK);
@@ -541,7 +541,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                xnthread_set_info(thread, XNTIMEO);
        } else {
              grab_and_exit:
-               if (xnthread_test_state(thread, XNOTHER))
+               if (xnthread_test_state(thread, XNWEAK))
                        xnthread_inc_rescnt(thread);
 
                /* We are the new owner, update the fastlock
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index a7a62b2..eb050b7 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -392,6 +392,7 @@ unlock_and_exit:
  */
 static inline int pthread_create(pthread_t *tid, const pthread_attr_t *attr)
 {
+       struct xnsched_class *sched_class;
        union xnsched_policy_param param;
        struct xnthread_init_attr iattr;
        pthread_t thread, cur;
@@ -404,20 +405,18 @@ static inline int pthread_create(pthread_t *tid, const 
pthread_attr_t *attr)
                return -EINVAL;
 
        thread = (pthread_t)xnmalloc(sizeof(*thread));
-
-       if (!thread)
+       if (thread == NULL)
                return -EAGAIN;
 
-       thread->attr = attr ? *attr : default_thread_attr;
-
        cur = cobalt_current_thread();
-
+       thread->attr = attr ? *attr : default_thread_attr;
        if (thread->attr.inheritsched == PTHREAD_INHERIT_SCHED) {
-               /* cur may be NULL if pthread_create is not called by a cobalt
-                  thread, in which case trying to inherit scheduling
-                  parameters is treated as an error. */
-
-               if (!cur) {
+               /*
+                * cur may be NULL if pthread_create is not called by
+                * a cobalt thread, in which case trying to inherit
+                * scheduling parameters is treated as an error.
+                */
+               if (cur == NULL) {
                        xnfree(thread);
                        return -EINVAL;
                }
@@ -428,19 +427,38 @@ static inline int pthread_create(pthread_t *tid, const 
pthread_attr_t *attr)
 
        prio = thread->attr.schedparam_ex.sched_priority;
        name = thread->attr.name;
+       flags |= XNUSER;
 
        if (thread->attr.fp)
                flags |= XNFPU;
 
-       flags |= XNUSER;
-
        iattr.name = name;
        iattr.flags = flags;
        iattr.ops = &cobalt_thread_ops;
-       param.rt.prio = prio;
+
+       /*
+        * When the weak scheduling class is compiled in, SCHED_WEAK
+        * and SCHED_OTHER threads are scheduled by
+        * xnsched_class_weak, at their respective priority
+        * levels. Otherwise, SCHED_OTHER is scheduled by
+        * xnsched_class_rt at priority level #0.
+        */
+       switch (thread->attr.policy) {
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+       case SCHED_OTHER:
+       case SCHED_WEAK:
+               param.weak.prio = prio;
+               sched_class = &xnsched_class_weak;
+               break;
+#endif
+       default:
+               param.rt.prio = prio;
+               sched_class = &xnsched_class_rt;
+               break;
+       }
 
        if (xnpod_init_thread(&thread->threadbase,
-                             &iattr, &xnsched_class_rt, &param) != 0) {
+                             &iattr, sched_class, &param) != 0) {
                xnfree(thread);
                return -EAGAIN;
        }
@@ -619,8 +637,8 @@ static inline int pthread_set_mode_np(int clrmask, int 
setmask, int *mode_r)
  *
  * @param tid target thread;
  *
- * @param pol scheduling policy, one of SCHED_FIFO, SCHED_COBALT,
- * SCHED_RR or SCHED_OTHER;
+ * @param pol scheduling policy, one of SCHED_WEAK, SCHED_FIFO,
+ * SCHED_COBALT, SCHED_RR or SCHED_OTHER;
  *
  * @param par scheduling parameters address.
  *
@@ -662,6 +680,7 @@ static inline int pthread_set_mode_np(int clrmask, int 
setmask, int *mode_r)
 static inline int
 pthread_setschedparam(pthread_t tid, int pol, const struct sched_param *par)
 {
+       struct xnsched_class *sched_class;
        union xnsched_policy_param param;
        struct xnthread *thread;
        xnticks_t tslice;
@@ -678,11 +697,21 @@ pthread_setschedparam(pthread_t tid, int pol, const 
struct sched_param *par)
        thread = &tid->threadbase;
        prio = par->sched_priority;
        tslice = XN_INFINITE;
+       sched_class = &xnsched_class_rt;
+       param.rt.prio = prio;
 
        switch (pol) {
        case SCHED_OTHER:
                if (prio)
                        goto fail;
+       case SCHED_WEAK:
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+               param.weak.prio = prio;
+               sched_class = &xnsched_class_weak;
+#else
+               if (prio)
+                       goto fail;
+#endif
                break;
        case SCHED_RR:
                tslice = xnthread_time_slice(thread);
@@ -690,13 +719,11 @@ pthread_setschedparam(pthread_t tid, int pol, const 
struct sched_param *par)
                        tslice = cobalt_time_slice;
                /* falldown wanted */
        case SCHED_FIFO:
-       case SCHED_SPORADIC:
-       case SCHED_TP:
-               if (prio < COBALT_MIN_PRIORITY || prio > COBALT_MAX_PRIORITY)
+               if (prio < XNSCHED_FIFO_MIN_PRIO || prio > 
XNSCHED_FIFO_MAX_PRIO)
                        goto fail;
                break;
        case SCHED_COBALT:
-               if (prio < COBALT_MIN_PRIORITY || prio > XNSCHED_RT_MAX_PRIO)
+               if (prio < XNSCHED_RT_MIN_PRIO || prio > XNSCHED_RT_MAX_PRIO)
                        goto fail;
                break;
        default:
@@ -708,8 +735,7 @@ pthread_setschedparam(pthread_t tid, int pol, const struct 
sched_param *par)
        xnpod_set_thread_tslice(thread, tslice);
 
        tid->sched_policy = pol;
-       param.rt.prio = prio;
-       xnpod_set_thread_schedparam(thread, &xnsched_class_rt, &param);
+       xnpod_set_thread_schedparam(thread, sched_class, &param);
 
        xnpod_schedule();
 
@@ -761,14 +787,14 @@ static inline int pthread_setschedparam_ex(pthread_t tid, 
int pol,
        case SCHED_OTHER:
        case SCHED_FIFO:
        case SCHED_COBALT:
+       case SCHED_WEAK:
                xnpod_set_thread_tslice(&tid->threadbase, XN_INFINITE);
                short_param.sched_priority = par->sched_priority;
                return pthread_setschedparam(tid, pol, &short_param);
        default:
-               if (par->sched_priority < COBALT_MIN_PRIORITY ||
-                   par->sched_priority >  COBALT_MAX_PRIORITY) {
-                       return EINVAL;
-               }
+               if (par->sched_priority < XNSCHED_FIFO_MIN_PRIO ||
+                   par->sched_priority > XNSCHED_FIFO_MAX_PRIO)
+                       return -EINVAL;
        }
 
        xnlock_get_irqsave(&nklock, s);
@@ -1292,12 +1318,12 @@ int cobalt_sched_min_prio(int policy)
        case SCHED_RR:
        case SCHED_SPORADIC:
        case SCHED_TP:
+               return XNSCHED_FIFO_MIN_PRIO;
        case SCHED_COBALT:
-               return COBALT_MIN_PRIORITY;
-
+               return XNSCHED_RT_MIN_PRIO;
        case SCHED_OTHER:
+       case SCHED_WEAK:
                return 0;
-
        default:
                return -EINVAL;
        }
@@ -1310,14 +1336,17 @@ int cobalt_sched_max_prio(int policy)
        case SCHED_RR:
        case SCHED_SPORADIC:
        case SCHED_TP:
-               return COBALT_MAX_PRIORITY;
-
+               return XNSCHED_FIFO_MAX_PRIO;
        case SCHED_COBALT:
                return XNSCHED_RT_MAX_PRIO;
-
        case SCHED_OTHER:
                return 0;
-
+       case SCHED_WEAK:
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+               return XNSCHED_FIFO_MAX_PRIO;
+#else
+               return 0;
+#endif
        default:
                return -EINVAL;
        }
@@ -1393,7 +1422,7 @@ int set_tp_config(int cpu, union sched_config *config, 
size_t len)
 cleanup_and_fail:
        xnfree(gps);
 fail:
-       return EINVAL;
+       return -EINVAL;
 }
 
 #else /* !CONFIG_XENO_OPT_SCHED_TP */
@@ -1401,7 +1430,7 @@ fail:
 static inline
 int set_tp_config(int cpu, union sched_config *config, size_t len)
 {
-       return EINVAL;
+       return -EINVAL;
 }
 
 #endif /* !CONFIG_XENO_OPT_SCHED_TP */
@@ -1476,7 +1505,7 @@ int cobalt_sched_setconfig_np(int cpu, int policy,
 
        switch (policy) {
        case SCHED_TP:
-               ret = -set_tp_config(cpu, buf, len);
+               ret = set_tp_config(cpu, buf, len);
                break;
        default:
                ret = -EINVAL;
diff --git a/lib/cobalt/internal.c b/lib/cobalt/internal.c
index f144fa5..74cb391 100644
--- a/lib/cobalt/internal.c
+++ b/lib/cobalt/internal.c
@@ -43,7 +43,7 @@ void __cobalt_thread_harden(void)
        unsigned long status = xeno_get_current_mode();
 
        /* non-RT shadows are NOT allowed to force primary mode. */
-       if ((status & (XNRELAX|XNOTHER)) == XNRELAX)
+       if ((status & (XNRELAX|XNWEAK)) == XNRELAX)
                XENOMAI_SYSCALL1(sc_nucleus_migrate, XENOMAI_XENO_DOMAIN);
 }
 
@@ -117,7 +117,7 @@ int cobalt_monitor_enter(cobalt_monitor_t *mon)
         */
 
        status = xeno_get_current_mode();
-       if (status & (XNRELAX|XNOTHER))
+       if (status & (XNRELAX|XNWEAK))
                goto syscall;
 
        datp = get_monitor_data(mon);
@@ -155,7 +155,7 @@ int cobalt_monitor_exit(cobalt_monitor_t *mon)
                goto syscall;
 
        status = xeno_get_current_mode();
-       if (status & XNOTHER)
+       if (status & XNWEAK)
                goto syscall;
 
        cur = xeno_get_current();
diff --git a/lib/cobalt/mutex.c b/lib/cobalt/mutex.c
index f89e276..1e6492b 100644
--- a/lib/cobalt/mutex.c
+++ b/lib/cobalt/mutex.c
@@ -141,7 +141,7 @@ COBALT_IMPL(int, pthread_mutex_lock, (pthread_mutex_t 
*mutex))
         * obtain them via a syscall.
         */
        status = xeno_get_current_mode();
-       if ((status & (XNRELAX|XNOTHER)) == 0) {
+       if ((status & (XNRELAX|XNWEAK)) == 0) {
                err = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
                if (err == 0) {
                        _mutex->lockcnt = 1;
@@ -196,7 +196,7 @@ COBALT_IMPL(int, pthread_mutex_timedlock, (pthread_mutex_t 
*mutex,
 
        /* See __wrap_pthread_mutex_lock() */
        status = xeno_get_current_mode();
-       if ((status & (XNRELAX|XNOTHER)) == 0) {
+       if ((status & (XNRELAX|XNWEAK)) == 0) {
                err = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
                if (err == 0) {
                        _mutex->lockcnt = 1;
@@ -250,7 +250,7 @@ COBALT_IMPL(int, pthread_mutex_trylock, (pthread_mutex_t 
*mutex))
                return EINVAL;
 
        status = xeno_get_current_mode();
-       if ((status & (XNRELAX|XNOTHER)) == 0) {
+       if ((status & (XNRELAX|XNWEAK)) == 0) {
                err = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
                if (err == 0) {
                        _mutex->lockcnt = 1;
@@ -314,7 +314,7 @@ COBALT_IMPL(int, pthread_mutex_unlock, (pthread_mutex_t 
*mutex))
        if ((datp->flags & COBALT_MUTEX_COND_SIGNAL))
                goto do_syscall;
 
-       if (xeno_get_current_mode() & XNOTHER)
+       if (xeno_get_current_mode() & XNWEAK)
                goto do_syscall;
 
        if (xnsynch_fast_release(&datp->owner, cur))
diff --git a/lib/cobalt/thread.c b/lib/cobalt/thread.c
index 6ecf2be..a4724c5 100644
--- a/lib/cobalt/thread.c
+++ b/lib/cobalt/thread.c
@@ -56,7 +56,7 @@ COBALT_IMPL(int, pthread_setschedparam, (pthread_t thread,
                xeno_sigshadow_install_once();
                xeno_set_current();
                xeno_set_current_window(mode_offset);
-               if (policy != SCHED_OTHER)
+               if (policy != SCHED_OTHER && policy != SCHED_WEAK)
                        XENOMAI_SYSCALL1(sc_nucleus_migrate, 
XENOMAI_XENO_DOMAIN);
        }
 
@@ -87,7 +87,7 @@ int pthread_setschedparam_ex(pthread_t thread,
                xeno_sigshadow_install_once();
                xeno_set_current();
                xeno_set_current_window(mode_offset);
-               if (policy != SCHED_OTHER)
+               if (policy != SCHED_OTHER && policy != SCHED_WEAK)
                        XENOMAI_SYSCALL1(sc_nucleus_migrate, 
XENOMAI_XENO_DOMAIN);
        }
 
@@ -236,7 +236,7 @@ static void *__pthread_trampoline(void *p)
        if (param_ex.sched_priority == parent_prio)
                __wrap_sched_yield();
 
-       if (policy != SCHED_OTHER)
+       if (policy != SCHED_OTHER && policy != SCHED_WEAK)
                XENOMAI_SYSCALL1(sc_nucleus_migrate, XENOMAI_XENO_DOMAIN);
 
        return start(arg);
diff --git a/testsuite/unit/mutex-torture.c b/testsuite/unit/mutex-torture.c
index a53c734..69edbc6 100644
--- a/testsuite/unit/mutex-torture.c
+++ b/testsuite/unit/mutex-torture.c
@@ -725,15 +725,15 @@ void nrt_lock(void *cookie)
 {
        pthread_mutex_t *mutex = cookie;
 
-       /* Check that XNOTHER flag gets cleared and set back when
+       /* Check that XNWEAK flag gets cleared and set back when
           changing priority */
-       check_current_mode(XNRELAX | XNOTHER, XNRELAX | XNOTHER);
+       check_current_mode(XNRELAX | XNWEAK, XNRELAX | XNWEAK);
        check_current_prio(0);
        dispatch("auto_switchback renice 1", THREAD_RENICE, 1, 0, 1);
-       check_current_mode(XNOTHER, 0);
+       check_current_mode(XNWEAK, 0);
        check_current_prio(1);
        dispatch("auto_switchback renice 2", THREAD_RENICE, 1, 0, 0);
-       check_current_mode(XNRELAX | XNOTHER, XNRELAX | XNOTHER);
+       check_current_mode(XNRELAX | XNWEAK, XNRELAX | XNWEAK);
        check_current_prio(0);
 
        /* Check mode changes for auto-switchback threads while using
@@ -743,7 +743,7 @@ void nrt_lock(void *cookie)
        ms_sleep(11);
        check_current_prio(2);
        dispatch("auto_switchback mutex_unlock 1", MUTEX_UNLOCK, 1, 0, mutex);
-       check_current_mode(XNRELAX | XNOTHER, XNRELAX | XNOTHER);
+       check_current_mode(XNRELAX | XNWEAK, XNRELAX | XNWEAK);
 }
 
 void auto_switchback(void)


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to