Module: xenomai-forge
Branch: next
Commit: ffdee1e9bb710c911d32f4f410ed9a83abc5266c
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=ffdee1e9bb710c911d32f4f410ed9a83abc5266c

Author: Philippe Gerum <r...@xenomai.org>
Date:   Wed May 21 15:15:32 2014 +0200

copperplate: introduce threadobj_set_schedparam()

threadobj_set_priority() is replaced and superseded by
threadobj_set_schedparam(), for changing the scheduling parameters of
a thread. Unlike its predecessor, this new service does not decide of
the target scheduling class (depending on the priority level), which
is left to the caller instead.

As a notable side-effect, threadobj_set_rr() is dropped, since
enabling round-robin basically means switching to SCHED_RR via a call
to threadobj_set_schedparam(), and disabling it happens implicitely
whenever a thread leaves the SCHED_RR class.

---

 include/copperplate/threadobj.h   |   46 +++---
 lib/alchemy/task.c                |   31 ++--
 lib/copperplate/regd/fs-common.c  |    4 +-
 lib/copperplate/regd/fs-mercury.c |    2 +-
 lib/copperplate/syncobj.c         |    2 +-
 lib/copperplate/threadobj.c       |  313 ++++++++++++-------------------------
 lib/psos/task.c                   |   53 ++++---
 lib/vxworks/kernLib.c             |   36 +++--
 lib/vxworks/taskLib.c             |   25 +--
 9 files changed, 216 insertions(+), 296 deletions(-)

diff --git a/include/copperplate/threadobj.h b/include/copperplate/threadobj.h
index 1746517..58a4f3e 100644
--- a/include/copperplate/threadobj.h
+++ b/include/copperplate/threadobj.h
@@ -81,7 +81,7 @@ void threadobj_save_timeout(struct threadobj_corespec 
*corespec,
 struct threadobj_corespec {
        pthread_cond_t grant_sync;
        int policy_unlocked;
-       int prio_unlocked;
+       struct sched_param_ex schedparam_unlocked;
        timer_t rr_timer;
        struct timespec wakeup;
        ticks_t period;
@@ -115,14 +115,13 @@ void threadobj_save_timeout(struct threadobj_corespec 
*corespec,
 /*
  * threadobj->status, updated with ->lock held.
  */
-#define __THREAD_S_RR          (1 << 0)        /* Undergoes round-robin. */
-#define __THREAD_S_STARTED     (1 << 1)        /* threadobj_start() called. */
-#define __THREAD_S_WARMUP      (1 << 2)        /* threadobj_prologue() not 
called yet. */
-#define __THREAD_S_ABORTED     (1 << 3)        /* Cancelled before start. */
-#define __THREAD_S_LOCKED      (1 << 4)        /* threadobj_lock() granted 
(debug only). */
-#define __THREAD_S_ACTIVE      (1 << 5)        /* Running user code. */
-#define __THREAD_S_SUSPENDED   (1 << 6)        /* Suspended via 
threadobj_suspend(). */
-#define __THREAD_S_SAFE                (1 << 7)        /* TCB release 
deferred. */
+#define __THREAD_S_STARTED     (1 << 0)        /* threadobj_start() called. */
+#define __THREAD_S_WARMUP      (1 << 1)        /* threadobj_prologue() not 
called yet. */
+#define __THREAD_S_ABORTED     (1 << 2)        /* Cancelled before start. */
+#define __THREAD_S_LOCKED      (1 << 3)        /* threadobj_lock() granted 
(debug only). */
+#define __THREAD_S_ACTIVE      (1 << 4)        /* Running user code. */
+#define __THREAD_S_SUSPENDED   (1 << 5)        /* Suspended via 
threadobj_suspend(). */
+#define __THREAD_S_SAFE                (1 << 6)        /* TCB release 
deferred. */
 #define __THREAD_S_DEBUG       (1 << 31)       /* Debug mode enabled. */
 /*
  * threadobj->run_state, locklessly updated by "current", merged
@@ -163,7 +162,8 @@ struct threadobj {
        int status;
        int run_state;
        int policy;
-       int priority;
+       struct sched_param_ex schedparam;
+       int global_priority;
        pid_t cnode;
        pid_t pid;
        char name[32];
@@ -192,7 +192,8 @@ struct threadobj {
 struct threadobj_init_data {
        unsigned int magic;
        cpu_set_t affinity;
-       int priority;
+       int policy;
+       struct sched_param_ex param_ex;
        void (*finalizer)(struct threadobj *thobj);
 };
 
@@ -322,16 +323,11 @@ int __threadobj_unlock_sched(struct threadobj *current);
 
 int threadobj_unlock_sched(void);
 
-void __threadobj_set_scheduler(struct threadobj *thobj,
-                              int policy, int prio);
-
-int threadobj_set_priority(struct threadobj *thobj, int prio);
+int threadobj_set_schedparam(struct threadobj *thobj, int policy,
+                            const struct sched_param_ex *param_ex);
 
 int threadobj_set_mode(int clrmask, int setmask, int *mode_r);
 
-int threadobj_set_rr(struct threadobj *thobj,
-                    const struct timespec *quantum);
-
 int threadobj_set_periodic(struct threadobj *thobj,
                           const struct timespec *__restrict__ idate,
                           const struct timespec *__restrict__ period);
@@ -377,9 +373,20 @@ int threadobj_pkg_init(void);
                __p;                                                    \
        })
 
+static inline int threadobj_get_policy(struct threadobj *thobj)
+{
+       return thobj->policy;
+}
+
 static inline int threadobj_get_priority(struct threadobj *thobj)
 {
-       return thobj->priority;
+       return thobj->schedparam.sched_priority;
+}
+
+static inline void threadobj_copy_schedparam(struct sched_param_ex *param_ex,
+                                            const struct threadobj *thobj)
+{
+       *param_ex = thobj->schedparam;
 }
 
 static inline int threadobj_lock(struct threadobj *thobj)
@@ -410,6 +417,7 @@ static inline int threadobj_trylock(struct threadobj *thobj)
 
 static inline int threadobj_unlock(struct threadobj *thobj)
 {
+       __threadobj_check_locked(thobj);
        __threadobj_tag_unlocked(thobj);
        return write_unlock_safe(&thobj->lock, thobj->cancel_state);
 }
diff --git a/lib/alchemy/task.c b/lib/alchemy/task.c
index fb5cae8..199d675 100644
--- a/lib/alchemy/task.c
+++ b/lib/alchemy/task.c
@@ -268,7 +268,8 @@ static int create_tcb(struct alchemy_task **tcbp, RT_TASK 
*task,
 
        idata.magic = task_magic;
        idata.finalizer = task_finalizer;
-       idata.priority = prio;
+       idata.policy = prio ? SCHED_FIFO : SCHED_OTHER;
+       idata.param_ex.sched_priority = prio;
        ret = threadobj_init(&tcb->thobj, &idata);
        if (ret)
                goto fail_threadinit;
@@ -416,8 +417,8 @@ int rt_task_create(RT_TASK *task, const char *name,
 
        cta.detachstate = mode & T_JOINABLE ?
                PTHREAD_CREATE_JOINABLE : PTHREAD_CREATE_DETACHED;
-       cta.policy = prio ? SCHED_RT : SCHED_OTHER;
-       cta.param_ex.sched_priority = prio;
+       cta.policy = threadobj_get_policy(&tcb->thobj);
+       threadobj_copy_schedparam(&cta.param_ex, &tcb->thobj);
        cta.prologue = task_prologue_1;
        cta.run = task_entry;
        cta.arg = tcb;
@@ -1228,9 +1229,10 @@ RT_TASK *rt_task_self(void)
  */
 int rt_task_set_priority(RT_TASK *task, int prio)
 {
+       struct sched_param_ex param_ex;
        struct alchemy_task *tcb;
        struct service svc;
-       int ret;
+       int policy, ret;
 
        ret = check_task_priority(prio);
        if (ret)
@@ -1242,7 +1244,10 @@ int rt_task_set_priority(RT_TASK *task, int prio)
        if (tcb == NULL)
                goto out;
 
-       ret = threadobj_set_priority(&tcb->thobj, prio);
+       policy = prio ? SCHED_FIFO : SCHED_OTHER;
+       param_ex.sched_priority = prio;
+       ret = threadobj_set_schedparam(&tcb->thobj, policy, &param_ex);
+       put_alchemy_task(tcb);
 out:
        CANCEL_RESTORE(svc);
 
@@ -1353,20 +1358,26 @@ out:
  */
 int rt_task_slice(RT_TASK *task, RTIME quantum)
 {
+       struct sched_param_ex param_ex;
        struct alchemy_task *tcb;
-       struct timespec slice;
        struct service svc;
-       int ret;
+       int ret, policy;
 
        CANCEL_DEFER(svc);
 
-       clockobj_ticks_to_timespec(&alchemy_clock, quantum, &slice);
-
        tcb = get_alchemy_task_or_self(task, &ret);
        if (tcb == NULL)
                goto out;
 
-       ret = threadobj_set_rr(&tcb->thobj, &slice);
+       param_ex.sched_priority = threadobj_get_priority(&tcb->thobj);
+       if (quantum) {
+               policy = SCHED_RR;
+               clockobj_ticks_to_timespec(&alchemy_clock, quantum,
+                                          &param_ex.sched_rr_quantum);
+       } else
+               policy = param_ex.sched_priority ? SCHED_FIFO : SCHED_OTHER;
+
+       ret = threadobj_set_schedparam(&tcb->thobj, policy, &param_ex);
        put_alchemy_task(tcb);
 out:
        CANCEL_RESTORE(svc);
diff --git a/lib/copperplate/regd/fs-common.c b/lib/copperplate/regd/fs-common.c
index 36c4b34..eadf309 100644
--- a/lib/copperplate/regd/fs-common.c
+++ b/lib/copperplate/regd/fs-common.c
@@ -115,8 +115,8 @@ int open_threads(struct fsobj *fsobj, void *priv)
                strncpy(p->name, thobj->name, sizeof(p->name) - 1);
                p->name[sizeof(p->name) - 1] = '\0';
                p->pid = thobj->pid;
-               p->priority = thobj->priority;
-               p->policy = thobj->policy;
+               p->priority = threadobj_get_priority(thobj);
+               p->policy = threadobj_get_policy(thobj);
                threadobj_stat(thobj, &statbuf);
                threadobj_unlock(thobj);
                p->status = statbuf.status;
diff --git a/lib/copperplate/regd/fs-mercury.c 
b/lib/copperplate/regd/fs-mercury.c
index c1fed37..9a17f83 100644
--- a/lib/copperplate/regd/fs-mercury.c
+++ b/lib/copperplate/regd/fs-mercury.c
@@ -75,7 +75,7 @@ char *format_thread_status(const struct thread_data *p, char 
*buf, size_t len)
        if (p->schedlock > 0)
                *wp++ = 'l';
 
-       if (p->status & __THREAD_S_RR)
+       if (p->policy == SCHED_RR)
                *wp++ = 'r';
 
        *wp = '\0';
diff --git a/lib/copperplate/syncobj.c b/lib/copperplate/syncobj.c
index 56c0025..40b0997 100644
--- a/lib/copperplate/syncobj.c
+++ b/lib/copperplate/syncobj.c
@@ -337,7 +337,7 @@ static inline void enqueue_waiter(struct syncobj *sobj,
 {
        struct threadobj *__thobj;
 
-       thobj->wait_prio = threadobj_get_priority(thobj);
+       thobj->wait_prio = thobj->global_priority;
        if (list_empty(&sobj->grant_list) || (sobj->flags & SYNCOBJ_PRIO) == 0) 
{
                list_append(&thobj->wait_link, &sobj->grant_list);
                return;
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index f010450..49f73f8 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -48,6 +48,9 @@ union copperplate_wait_union {
 
 static void finalize_thread(void *p);
 
+static void set_global_priority(struct threadobj *thobj, int policy,
+                               const struct sched_param_ex *param_ex);
+
 static int request_setschedparam(struct threadobj *thobj, int policy,
                                 const struct sched_param_ex *param_ex);
 
@@ -389,54 +392,6 @@ int threadobj_unlock_sched(void)
        return __bt(__threadobj_unlock_sched(current));
 }
 
-void __threadobj_set_scheduler(struct threadobj *thobj,
-                              int policy, int prio) /* thobj->lock held */
-{
-       __threadobj_check_locked(thobj);
-
-       /*
-        * XXX: Internal call which bypasses the normal scheduling
-        * policy tracking: use with care.
-        */
-       thobj->priority = prio;
-       thobj->policy = policy;
-}
-
-int threadobj_set_priority(struct threadobj *thobj, int prio) /* thobj->lock 
held, dropped */
-{
-       struct sched_param_ex param_ex;
-       int policy, ret;
-
-       __threadobj_check_locked(thobj);
-
-       policy = SCHED_RT;
-       if (prio == 0) {
-               thobj->status &= ~__THREAD_S_RR;
-               policy = SCHED_OTHER;
-       } else if (thobj->status & __THREAD_S_RR) {
-               param_ex.sched_rr_quantum = thobj->tslice;
-               policy = SCHED_RR;
-       }
-
-       /*
-        * As a side effect, resetting SCHED_RR will refill the time
-        * credit for the target thread with the last quantum set.
-        */
-       param_ex.sched_priority = prio;
-       thobj->priority = prio;
-       thobj->policy = policy;
-
-       if (thobj == threadobj_current()) {
-               threadobj_unlock(thobj);
-               ret = request_setschedparam(thobj, policy, &param_ex);
-       } else {
-               ret = request_setschedparam(thobj, policy, &param_ex);
-               threadobj_unlock(thobj);
-       }
-
-       return __bt(ret);
-}
-
 int threadobj_set_mode(int clrmask, int setmask, int *mode_r) /* current->lock 
held */
 {
        struct threadobj *current = threadobj_current();
@@ -465,31 +420,16 @@ int threadobj_set_mode(int clrmask, int setmask, int 
*mode_r) /* current->lock h
        return 0;
 }
 
-static int set_rr(struct threadobj *thobj, const struct timespec *quantum)
+static inline int enable_rr_corespec(struct threadobj *thobj,
+                                    int *policy,
+                                    const struct sched_param_ex *param_ex) /* 
thobj->lock held */
 {
-       struct sched_param_ex xparam;
-       pthread_t tid = thobj->tid;
-       int ret, policy;
-
-       if (quantum && (quantum->tv_sec || quantum->tv_nsec)) {
-               policy = SCHED_RR;
-               xparam.sched_rr_quantum = *quantum;
-               thobj->status |= __THREAD_S_RR;
-               thobj->tslice = *quantum;
-               xparam.sched_priority = thobj->priority ?: 1;
-       } else {
-               policy = thobj->policy;
-               thobj->status &= ~__THREAD_S_RR;
-               xparam.sched_rr_quantum.tv_sec = 0;
-               xparam.sched_rr_quantum.tv_nsec = 0;
-               xparam.sched_priority = thobj->priority;
-       }
-
-       threadobj_unlock(thobj);
-       ret = pthread_setschedparam_ex(tid, policy, &xparam);
-       threadobj_lock(thobj);
+       return 0;
+}
 
-       return __bt(-ret);
+static inline void disable_rr_corespec(struct threadobj *thobj)
+{
+       /* nop */
 }
 
 int threadobj_set_periodic(struct threadobj *thobj,
@@ -759,21 +699,24 @@ static inline int threadobj_unblocked_corespec(struct 
threadobj *current)
 
 int __threadobj_lock_sched(struct threadobj *current) /* current->lock held */
 {
-       pthread_t tid = current->tid;
-       struct sched_param param;
+       struct sched_param_ex param_ex;
+       int ret;
 
        __threadobj_check_locked(current);
 
-       if (current->schedlock_depth++ > 0)
-               return 0;
+       if (current->schedlock_depth > 0)
+               goto done;
 
-       current->core.prio_unlocked = current->priority;
+       current->core.schedparam_unlocked = current->schedparam;
        current->core.policy_unlocked = current->policy;
-       current->priority = threadobj_lock_prio;
-       current->policy = SCHED_RT;
-       param.sched_priority = threadobj_lock_prio;
+       param_ex.sched_priority = threadobj_lock_prio;
+       ret = __bt(threadobj_set_schedparam(current, SCHED_FIFO, &param_ex));
+       if (ret)
+               return __bt(ret);
+done:
+       current->schedlock_depth++;
 
-       return __bt(-pthread_setschedparam(tid, SCHED_RT, &param));
+       return 0;
 }
 
 int threadobj_lock_sched(void)
@@ -790,10 +733,6 @@ int threadobj_lock_sched(void)
 
 int __threadobj_unlock_sched(struct threadobj *current) /* current->lock held 
*/
 {
-       pthread_t tid = current->tid;
-       struct sched_param param;
-       int policy, ret;
-
        __threadobj_check_locked(current);
 
        if (current->schedlock_depth == 0)
@@ -802,14 +741,9 @@ int __threadobj_unlock_sched(struct threadobj *current) /* 
current->lock held */
        if (--current->schedlock_depth > 0)
                return 0;
 
-       current->priority = current->core.prio_unlocked;
-       param.sched_priority = current->core.prio_unlocked;
-       policy = current->core.policy_unlocked;
-       threadobj_unlock(current);
-       ret = pthread_setschedparam(tid, policy, &param);
-       threadobj_lock(current);
-
-       return __bt(-ret);
+       return __bt(threadobj_set_schedparam(current,
+                                            current->core.policy_unlocked,
+                                            
&current->core.schedparam_unlocked));
 }
 
 int threadobj_unlock_sched(void)
@@ -824,65 +758,6 @@ int threadobj_unlock_sched(void)
        return __bt(ret);
 }
 
-void __threadobj_set_scheduler(struct threadobj *thobj,
-                              int policy, int prio) /* thobj->lock held */
-{
-       __threadobj_check_locked(thobj);
-
-       /*
-        * XXX: Internal call which bypasses the normal scheduling
-        * policy tracking: use with care.
-        */
-       if (thobj->schedlock_depth > 0) {
-               thobj->core.prio_unlocked = prio;
-               thobj->core.policy_unlocked = policy;
-       } else {
-               thobj->priority = prio;
-               thobj->policy = policy;
-       }
-}
-
-int threadobj_set_priority(struct threadobj *thobj, int prio) /* thobj->lock 
held, dropped */
-{
-       struct sched_param_ex param_ex;
-       int policy, ret;
-
-       __threadobj_check_locked(thobj);
-
-       /*
-        * We don't actually change the scheduling priority in case
-        * the target thread holds the scheduler lock, but only record
-        * the level to set when unlocking.
-        */
-       if (thobj->schedlock_depth > 0) {
-               thobj->core.prio_unlocked = prio;
-               thobj->core.policy_unlocked = prio ? SCHED_RT : SCHED_OTHER;
-               threadobj_unlock(thobj);
-               return 0;
-       }
-
-       policy = SCHED_RT;
-       if (prio == 0) {
-               thobj->status &= ~__THREAD_S_RR;
-               policy = SCHED_OTHER;
-       } else if (thobj->status & __THREAD_S_RR)
-               policy = SCHED_RR;
-
-       param_ex.sched_priority = prio;
-       thobj->priority = prio;
-       thobj->policy = policy;
-
-       if (thobj == threadobj_current()) {
-               threadobj_unlock(thobj);
-               ret = request_setschedparam(thobj, policy, &param_ex);
-       } else {
-               ret = request_setschedparam(thobj, policy, &param_ex);
-               threadobj_unlock(thobj);
-       }
-
-       return __bt(ret);
-}
-
 int threadobj_set_mode(int clrmask, int setmask, int *mode_r) /* current->lock 
held */
 {
        struct threadobj *current = threadobj_current();
@@ -906,56 +781,37 @@ int threadobj_set_mode(int clrmask, int setmask, int 
*mode_r) /* current->lock h
        return __bt(ret);
 }
 
-static int set_rr(struct threadobj *thobj, const struct timespec *quantum)
+static int enable_rr_corespec(struct threadobj *thobj,
+                             int *policy,
+                             const struct sched_param_ex *param_ex) /* 
thobj->lock held */
 {
-       pthread_t tid = thobj->tid;
-       struct sched_param param;
        struct itimerspec value;
-       int policy, ret;
+       int ret;
 
-       if (quantum && (quantum->tv_sec || quantum->tv_nsec)) {
-               value.it_interval = *quantum;
-               value.it_value = *quantum;
-               thobj->tslice = *quantum;
+       /*
+        * Switch to SCHED_FIFO policy instead of SCHED_RR, and use a
+        * per-thread timer in order to implement round-robin manually
+        * since we want a per-thread time quantum.
+        */
+       value.it_interval = param_ex->sched_rr_quantum;
+       value.it_value = value.it_interval;
+       ret = timer_settime(thobj->core.rr_timer, 0, &value, NULL);
+       if (ret)
+               return __bt(-errno);
 
-               if (thobj->status & __THREAD_S_RR) {
-                       /* Changing quantum of ongoing RR. */
-                       ret = timer_settime(thobj->core.rr_timer, 0, &value, 
NULL);
-                       return ret ? __bt(-errno) : 0;
-               }
+       *policy = SCHED_FIFO;
 
-               thobj->status |= __THREAD_S_RR;
-               /*
-                * Switch to SCHED_FIFO policy, assign default prio=1
-                * if coming from SCHED_OTHER. We use a per-thread
-                * timer to implement manual round-robin.
-                */
-               policy = SCHED_FIFO;
-               param.sched_priority = thobj->priority ?: 1;
-               ret = timer_settime(thobj->core.rr_timer, 0, &value, NULL);
-               if (ret)
-                       return __bt(-errno);
-       } else {
-               if ((thobj->status & __THREAD_S_RR) == 0)
-                       return 0;
-               thobj->status &= ~__THREAD_S_RR;
-               /*
-                * Disarm timer and reset scheduling parameters to
-                * former policy.
-                */
-               value.it_value.tv_sec = 0;
-               value.it_value.tv_nsec = 0;
-               value.it_interval = value.it_value;
-               timer_settime(thobj->core.rr_timer, 0, &value, NULL);
-               param.sched_priority = thobj->priority;
-               policy = thobj->policy;
-       }
+       return 0;
+}
 
-       threadobj_unlock(thobj);
-       ret = pthread_setschedparam(tid, policy, &param);
-       threadobj_lock(thobj);
+static void disable_rr_corespec(struct threadobj *thobj)
+{
+       struct itimerspec value;
 
-       return __bt(-ret);
+       value.it_value.tv_sec = 0;
+       value.it_value.tv_nsec = 0;
+       value.it_interval = value.it_value;
+       timer_settime(thobj->core.rr_timer, 0, &value, NULL);
 }
 
 int threadobj_set_periodic(struct threadobj *thobj,
@@ -1133,6 +989,14 @@ void *__threadobj_alloc(size_t tcb_struct_size,
        return p;
 }
 
+static void set_global_priority(struct threadobj *thobj, int policy,
+                               const struct sched_param_ex *param_ex)
+{
+       thobj->schedparam = *param_ex;
+       thobj->policy = policy;
+       thobj->global_priority = param_ex->sched_priority;
+}
+
 int threadobj_init(struct threadobj *thobj,
                   struct threadobj_init_data *idata)
 {
@@ -1148,9 +1012,8 @@ int threadobj_init(struct threadobj *thobj,
        thobj->schedlock_depth = 0;
        thobj->status = __THREAD_S_WARMUP;
        thobj->run_state = __THREAD_S_DORMANT;
-       thobj->priority = idata->priority;
-       thobj->policy = idata->priority ? SCHED_RT : SCHED_OTHER;
-       holder_init(&thobj->wait_link);
+       set_global_priority(thobj, idata->policy, &idata->param_ex);
+       holder_init(&thobj->wait_link); /* mandatory */
        thobj->cnode = __node_id;
        thobj->pid = 0;
        thobj->cancel_sem = NULL;
@@ -1250,7 +1113,7 @@ int threadobj_start(struct threadobj *thobj)      /* 
thobj->lock held. */
        thobj->status |= __THREAD_S_STARTED;
        __RT(pthread_cond_signal(&thobj->barrier));
 
-       if (current && thobj->priority <= current->priority)
+       if (current && thobj->global_priority <= current->global_priority)
                return 0;
 
        /*
@@ -1610,30 +1473,45 @@ void threadobj_spin(ticks_t ns)
                cpu_relax();
 }
 
-int threadobj_set_rr(struct threadobj *thobj, const struct timespec *quantum)
-{                              /* thobj->lock held */
+int threadobj_set_schedparam(struct threadobj *thobj, int policy,
+                            const struct sched_param_ex *param_ex) /* 
thobj->lock held */
+{
+       int ret;
+
        __threadobj_check_locked(thobj);
 
-       /*
-        * XXX: we enforce locality since both Cobalt and Mercury need
-        * this for set_rr(). This seems an acceptable limitation
-        * compared to introducing a significantly more complex
-        * implementation only for supporting a somewhat weird feature
-        * (i.e. controlling the round-robin state of remote threads).
-        */
-       if (!threadobj_local_p(thobj))
-               return -EINVAL;
+       if (thobj->schedlock_depth > 0)
+               return __bt(-EPERM);
 
        /*
-        * It makes no sense to enable/disable round-robin while
-        * holding the scheduler lock. Prevent this, which makes our
-        * logic simpler in the Mercury case with respect to tracking
-        * the current scheduling parameters.
+        * XXX: only local threads may switch to SCHED_RR since both
+        * Cobalt and Mercury need this for different reasons.
+        *
+        * This seems an acceptable limitation compared to introducing
+        * a significantly more complex implementation only for
+        * supporting a somewhat weird feature (i.e. controlling the
+        * round-robin state of threads running in remote processes).
         */
-       if (thobj->schedlock_depth > 0)
-               return -EINVAL;
+       if (policy == SCHED_RR) {
+               if (!threadobj_local_p(thobj))
+                       return -EINVAL;
+               thobj->tslice = param_ex->sched_rr_quantum;
+               ret = enable_rr_corespec(thobj, &policy, param_ex);
+               if (ret)
+                       return __bt(ret);
+       } else if (thobj->policy == SCHED_RR) /* Switching off round-robin. */
+               disable_rr_corespec(thobj);
 
-       return __bt(set_rr(thobj, quantum));
+       set_global_priority(thobj, policy, param_ex);
+
+       if (thobj == threadobj_current()) {
+               threadobj_unlock(thobj);
+               ret = request_setschedparam(thobj, policy, param_ex);
+               threadobj_lock(thobj);
+       } else
+               ret = request_setschedparam(thobj, policy, param_ex);
+
+       return __bt(ret);
 }
 
 static inline int main_overlay(void)
@@ -1655,7 +1533,8 @@ static inline int main_overlay(void)
 
        idata.magic = 0x0;
        idata.finalizer = NULL;
-       idata.priority = 0;
+       idata.policy = SCHED_OTHER;
+       idata.param_ex.sched_priority = 0;
        ret = threadobj_init(tcb, &idata);
        if (ret) {
                __threadobj_free(tcb);
diff --git a/lib/psos/task.c b/lib/psos/task.c
index 5f92367..ac6f8ae 100644
--- a/lib/psos/task.c
+++ b/lib/psos/task.c
@@ -182,8 +182,8 @@ static void *task_trampoline(void *arg)
 {
        struct psos_task *task = arg;
        struct psos_task_args *args = &task->args;
+       struct sched_param_ex param_ex;
        struct service svc;
-       int ret;
 
        CANCEL_DEFER(svc);
 
@@ -192,17 +192,9 @@ static void *task_trampoline(void *arg)
        threadobj_lock(&task->thobj);
 
        if (task->mode & T_TSLICE) {
-               ret = threadobj_set_rr(&task->thobj, &psos_rrperiod);
-               if (ret) {
-                       warning("task %s failed to enter round-robin 
scheduling, %s",
-                               threadobj_get_name(&task->thobj),
-                               symerror(ret));
-                       threadobj_set_magic(&task->thobj, ~task_magic);
-                       threadobj_unlock(&task->thobj);
-                       CANCEL_RESTORE(svc);
-                       threadobj_notify_entry();
-                       return (void *)(long)ret;
-               }
+               param_ex.sched_priority = threadobj_get_priority(&task->thobj);
+               param_ex.sched_rr_quantum = psos_rrperiod;
+               threadobj_set_schedparam(&task->thobj, SCHED_RR, &param_ex);
        }
 
        if (task->mode & T_NOPREEMPT)
@@ -335,7 +327,8 @@ u_long t_create(const char *name, u_long prio,
 
        idata.magic = task_magic;
        idata.finalizer = task_finalizer;
-       idata.priority = cprio;
+       idata.policy = cprio ? SCHED_RT : SCHED_OTHER;
+       idata.param_ex.sched_priority = cprio;
        ret = threadobj_init(&task->thobj, &idata);
        if (ret)
                goto fail_threadinit;
@@ -346,7 +339,7 @@ u_long t_create(const char *name, u_long prio,
                goto fail_register;
        }
 
-       cta.policy = SCHED_RT;
+       cta.policy = idata.policy;
        cta.param_ex.sched_priority = cprio;
        cta.prologue = task_prologue;
        cta.run = task_trampoline;
@@ -447,8 +440,9 @@ u_long t_resume(u_long tid)
 
 u_long t_setpri(u_long tid, u_long newprio, u_long *oldprio_r)
 {
+       struct sched_param_ex param_ex;
+       int policy, ret, cprio = 1;
        struct psos_task *task;
-       int ret, cprio = 1;
 
        task = get_psos_task_or_self(tid, &ret);
        if (task == NULL)
@@ -467,7 +461,10 @@ u_long t_setpri(u_long tid, u_long newprio, u_long 
*oldprio_r)
                return ERR_SETPRI;
        }
 
-       ret = threadobj_set_priority(&task->thobj, cprio);
+       policy = cprio ? SCHED_RT : SCHED_OTHER;
+       param_ex.sched_priority = cprio;
+       ret = threadobj_set_schedparam(&task->thobj, policy, &param_ex);
+       put_psos_task(task);
        if (ret)
                return ERR_OBJDEL;
 
@@ -573,8 +570,9 @@ u_long t_setreg(u_long tid, u_long regnum, u_long regvalue)
 
 u_long t_mode(u_long mask, u_long newmask, u_long *oldmode_r)
 {
+       struct sched_param_ex param_ex;
        struct psos_task *task;
-       int ret;
+       int policy, ret;
 
        task = get_psos_task_or_self(0, &ret);
        if (task == NULL)
@@ -593,15 +591,18 @@ u_long t_mode(u_long mask, u_long newmask, u_long 
*oldmode_r)
        else if (*oldmode_r & T_NOPREEMPT)
                __threadobj_unlock_sched(&task->thobj);
 
-       /*
-        * Copperplate won't accept to turn round-robin on/off when
-        * preemption is disabled, so we leave user a chance to do the
-        * right thing first.
-        */
-       if (task->mode & T_TSLICE)
-               threadobj_set_rr(&task->thobj, &psos_rrperiod);
-       else if (*oldmode_r & T_TSLICE)
-               threadobj_set_rr(&task->thobj, NULL);
+       param_ex.sched_priority = threadobj_get_priority(&task->thobj);
+
+       if (((task->mode ^ *oldmode_r) & T_TSLICE) == 0)
+               goto done;      /* rr status not changed. */
+
+       if (task->mode & T_TSLICE) {
+               policy = SCHED_RR;
+               param_ex.sched_rr_quantum = psos_rrperiod;
+       } else
+               policy = param_ex.sched_priority ? SCHED_FIFO : SCHED_OTHER;
+
+       threadobj_set_schedparam(&task->thobj, policy, &param_ex);
 done:
        put_psos_task(task);
 
diff --git a/lib/vxworks/kernLib.c b/lib/vxworks/kernLib.c
index 193e90d..80580fb 100644
--- a/lib/vxworks/kernLib.c
+++ b/lib/vxworks/kernLib.c
@@ -21,25 +21,39 @@
 #include "tickLib.h"
 #include "taskLib.h"
 
+static int switch_slicing(struct threadobj *thobj, struct timespec *quantum)
+{
+       struct sched_param_ex param_ex;
+       int policy;
+
+       param_ex.sched_priority = threadobj_get_priority(thobj);
+
+       if (quantum) {
+               policy = SCHED_RR;
+               param_ex.sched_rr_quantum = *quantum;
+       } else
+               policy = param_ex.sched_priority ? SCHED_FIFO : SCHED_OTHER;
+
+       return __bt(threadobj_set_schedparam(thobj, policy, &param_ex));
+}
+
 STATUS kernelTimeSlice(int ticks)
 {
-       struct timespec quantum;
+       struct timespec quantum, *p = NULL;
        struct wind_task *task;
 
-       /* Convert VxWorks ticks to timespec. */
-       clockobj_ticks_to_timespec(&wind_clock, ticks, &quantum);
+       if (ticks) {
+               /* Convert VxWorks ticks to timespec. */
+               clockobj_ticks_to_timespec(&wind_clock, ticks, &quantum);
+               p = &quantum;
+       }
 
        /*
-        * XXX: Enable/disable round-robin for all threads known by
-        * the current process. Round-robin is most commonly about
-        * having multiple threads getting an equal share of time for
-        * running the same bulk of code, so applying this policy
-        * session-wide to multiple Xenomai processes would not make
-        * much sense. I.e. one is better off having all those threads
-        * running within a single process.
+        * Enable/disable round-robin for all threads known by the
+        * current process.
         */
        wind_time_slice = ticks;
-       do_each_wind_task(task, threadobj_set_rr(&task->thobj, &quantum));
+       do_each_wind_task(task, switch_slicing(&task->thobj, p));
 
        return OK;
 }
diff --git a/lib/vxworks/taskLib.c b/lib/vxworks/taskLib.c
index ad64679..cf7ebde 100644
--- a/lib/vxworks/taskLib.c
+++ b/lib/vxworks/taskLib.c
@@ -173,7 +173,7 @@ static inline char *task_decode_status(struct wind_task 
*task, char *buf)
        if (threadobj_get_lockdepth(&task->thobj) > 0)
                strcat(buf, "+sched_lock");
        status = threadobj_get_status(&task->thobj);
-       if (status & __THREAD_S_RR)
+       if (threadobj_get_policy(&task->thobj) == SCHED_RR)
                strcat(buf, "+sched_rr");
        if (status & __THREAD_S_SUSPENDED)
                strcat(buf, "+suspended");
@@ -226,7 +226,7 @@ static void *task_trampoline(void *arg)
 {
        struct wind_task *task = arg;
        struct wind_task_args *args = &task->args;
-       struct timespec quantum;
+       struct sched_param_ex param_ex;
        struct service svc;
        int ret;
 
@@ -247,10 +247,11 @@ static void *task_trampoline(void *arg)
 
        /* Turn on time slicing if RR globally enabled. */
        if (wind_time_slice) {
-               clockobj_ticks_to_timespec(&wind_clock,
-                                          wind_time_slice, &quantum);
+               clockobj_ticks_to_timespec(&wind_clock, wind_time_slice,
+                                          &param_ex.sched_rr_quantum);
                threadobj_lock(&task->thobj);
-               threadobj_set_rr(&task->thobj, &quantum);
+               param_ex.sched_priority = threadobj_get_priority(&task->thobj);
+               threadobj_set_schedparam(&task->thobj, SCHED_RR, &param_ex);
                threadobj_unlock(&task->thobj);
        }
 
@@ -352,7 +353,8 @@ static STATUS __taskInit(struct wind_task *task,
 
        idata.magic = task_magic;
        idata.finalizer = task_finalizer;
-       idata.priority = cprio;
+       idata.policy = cprio ? SCHED_RT : SCHED_OTHER;
+       idata.param_ex.sched_priority = cprio;
        ret = threadobj_init(&task->thobj, &idata);
        if (ret) {
                errno = S_memLib_NOT_ENOUGH_MEMORY;
@@ -377,7 +379,7 @@ static STATUS __taskInit(struct wind_task *task,
 
        registry_init_file(&task->fsobj, &registry_ops, 0);
 
-       cta.policy = SCHED_RT;
+       cta.policy = idata.policy;
        cta.param_ex.sched_priority = cprio;
        cta.prologue = task_prologue;
        cta.run = task_trampoline;
@@ -730,9 +732,10 @@ void taskExit(int code)
 
 STATUS taskPrioritySet(TASK_ID tid, int prio)
 {
+       struct sched_param_ex param_ex;
        struct wind_task *task;
+       int ret, policy, cprio;
        struct service svc;
-       int ret, cprio;
 
        task = get_wind_task(tid);
        if (task == NULL)
@@ -746,9 +749,13 @@ STATUS taskPrioritySet(TASK_ID tid, int prio)
        }
 
        CANCEL_DEFER(svc);
-       ret = threadobj_set_priority(&task->thobj, cprio);
+       policy = cprio ? SCHED_RT : SCHED_OTHER;
+       param_ex.sched_priority = cprio;
+       ret = threadobj_set_schedparam(&task->thobj, policy, &param_ex);
        CANCEL_RESTORE(svc);
 
+       put_wind_task(task);
+
        if (ret) {
        objid_error:
                errno = S_objLib_OBJ_ID_ERROR;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to