Module: xenomai-forge
Branch: master
Commit: 943768a4fbcd4cb0d3234d440cf3814d18277f75
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=943768a4fbcd4cb0d3234d440cf3814d18277f75

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu May 31 16:42:05 2012 +0200

copperplate/threadobj: fix round-robin API

The former implementation of the time slicing API suffered several
issues:

- Decoupling round-robin configuration and system activation did not
  make sense. In practice, RTOS emulators want to turn it on/off
  for a given thread with a single call.

- Handling global RR activation in copperplate was wrong. A single
  process may host several emulators, but enabling RR globally is a
  decision from a particular emulator, only for its own threads. So it
  makes no sense to activate RR for all known copperplate thread
  objects.

- Using setitimer() to create per-thread RR timers over Mercury does
  not work as intended with NPTL. For POSIX-conformance, such timers
  are shared by all threads (unlike LinuxThreads), which does not cope
  well with performing manual RR only for threads which undergo this
  scheduling policy.

The new implemention simplifies the API to a single call,
i.e. threadobj_set_rr(), to turn RR on/off for a single, valid
thread. Emulators should handle global enabling locally, by calling
this service for each of their own threads.

In addition, timer_create() is used to obtain a per-thread timer over
Mercury, via the linux-specific SIGEV_THREAD_ID mode.

---

 include/copperplate/threadobj.h |    8 +-
 lib/copperplate/internal.h      |    1 +
 lib/copperplate/threadobj.c     |  336 ++++++++++++++++-----------------------
 3 files changed, 141 insertions(+), 204 deletions(-)

diff --git a/include/copperplate/threadobj.h b/include/copperplate/threadobj.h
index 56f8199..d5fc7d8 100644
--- a/include/copperplate/threadobj.h
+++ b/include/copperplate/threadobj.h
@@ -61,7 +61,9 @@ struct threadobj_stat {
 
 struct threadobj_corespec {
        pthread_cond_t grant_sync;
+       int policy_unlocked;
        int prio_unlocked;
+       timer_t rr_timer;
        struct notifier notifier;
        struct timespec wakeup;
        ticks_t period;
@@ -125,6 +127,7 @@ struct threadobj {
        int schedlock_depth;
        int cancel_state;
        int status;
+       int policy;
        int priority;
        pid_t cnode;
        const char *name;
@@ -142,7 +145,6 @@ struct threadobj {
        struct timespec tslice;
        pthread_cond_t barrier;
        struct traceobj *tracer;
-       struct pvholder thread_link;
        struct backtrace_data btd;
 };
 
@@ -273,10 +275,6 @@ int threadobj_set_mode(struct threadobj *thobj,
 
 int threadobj_set_rr(struct threadobj *thobj, struct timespec *quantum);
 
-int threadobj_start_rr(struct timespec *quantum);
-
-void threadobj_stop_rr(void);
-
 int threadobj_set_periodic(struct threadobj *thobj,
                           struct timespec *idate, struct timespec *period);
 
diff --git a/lib/copperplate/internal.h b/lib/copperplate/internal.h
index 4c5b1a6..4120ff7 100644
--- a/lib/copperplate/internal.h
+++ b/lib/copperplate/internal.h
@@ -21,6 +21,7 @@
 
 #include <sys/types.h>
 #include <stdarg.h>
+#include <time.h>
 #include <pthread.h>
 #include <sched.h>
 #include <xeno_config.h>
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index 70f4435..ce5b641 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -18,13 +18,13 @@
  * Thread object abstraction.
  */
 
-#include <sys/time.h>
 #include <signal.h>
 #include <memory.h>
 #include <errno.h>
 #include <string.h>
 #include <stdlib.h>
 #include <unistd.h>
+#include <time.h>
 #include <fcntl.h>
 #include <assert.h>
 #include <limits.h>
@@ -63,14 +63,6 @@ int threadobj_high_prio;
 
 int threadobj_irq_prio;
 
-static DEFINE_PRIVATE_LIST(thread_list);
-
-static pthread_mutex_t list_lock;
-
-static int global_rr;
-
-static struct timespec global_quantum;
-
 static void cancel_sync(struct threadobj *thobj);
 
 #ifdef HAVE_TLS
@@ -104,11 +96,13 @@ static inline void threadobj_init_corespec(struct 
threadobj *thobj)
 {
 }
 
-static inline void threadobj_setup_corespec(struct threadobj *thobj)
+static inline int threadobj_setup_corespec(struct threadobj *thobj)
 {
        pthread_set_name_np(pthread_self(), thobj->name);
        thobj->core.handle = xeno_get_current();
        thobj->core.u_window = xeno_get_current_window();
+
+       return 0;
 }
 
 static inline void threadobj_cleanup_corespec(struct threadobj *thobj)
@@ -282,7 +276,6 @@ int threadobj_set_priority(struct threadobj *thobj, int 
prio) /* thobj->lock hel
 
        __threadobj_check_locked(thobj);
 
-       thobj->priority = prio;
        policy = SCHED_RT;
        if (prio == 0) {
                thobj->status &= ~THREADOBJ_ROUNDROBIN;
@@ -292,10 +285,12 @@ int threadobj_set_priority(struct threadobj *thobj, int 
prio) /* thobj->lock hel
                policy = SCHED_RR;
        }
 
+       thobj->priority = prio;
+       thobj->policy = policy;
        threadobj_unlock(thobj);
        /*
         * XXX: as a side effect, resetting SCHED_RR will refill the
-        * time credit for the target thread with the last rrperiod
+        * time credit for the target thread with the last quantum
         * set.
         */
        xparam.sched_priority = prio;
@@ -338,23 +333,20 @@ static int set_rr(struct threadobj *thobj, struct 
timespec *quantum)
        pthread_t tid = thobj->tid;
        int ret, policy;
 
-       policy = SCHED_RT;
-       if (quantum == NULL) {
+       if (quantum && (quantum->tv_sec || quantum->tv_nsec)) {
+               policy = SCHED_RR;
+               xparam.sched_rr_quantum = *quantum;
+               thobj->status |= THREADOBJ_ROUNDROBIN;
+               thobj->tslice = *quantum;
+               xparam.sched_priority = thobj->priority ?: 1;
+       } else {
+               policy = thobj->policy;
+               thobj->status &= ~THREADOBJ_ROUNDROBIN;
                xparam.sched_rr_quantum.tv_sec = 0;
                xparam.sched_rr_quantum.tv_nsec = 0;
-               thobj->status &= ~THREADOBJ_ROUNDROBIN;
-       } else {
-               thobj->tslice = *quantum;
-               xparam.sched_rr_quantum = *quantum;
-               if (quantum->tv_sec == 0 && quantum->tv_nsec == 0)
-                       thobj->status &= ~THREADOBJ_ROUNDROBIN;
-               else {
-                       thobj->status |= THREADOBJ_ROUNDROBIN;
-                       policy = SCHED_RR;
-               }
+               xparam.sched_priority = thobj->priority;
        }
 
-       xparam.sched_priority = thobj->priority;
        threadobj_unlock(thobj);
        ret = pthread_setschedparam_ex(tid, policy, &xparam);
        threadobj_lock(thobj);
@@ -362,58 +354,6 @@ static int set_rr(struct threadobj *thobj, struct timespec 
*quantum)
        return __bt(-ret);
 }
 
-int threadobj_set_rr(struct threadobj *thobj, struct timespec *quantum)
-{                              /* thobj->lock held if valid */
-       int ret;
-
-       if (thobj) {
-               __threadobj_check_locked(thobj);
-               return __bt(set_rr(thobj, quantum));
-       }
-
-       global_rr = (quantum != NULL);
-       if (global_rr)
-               global_quantum = *quantum;
-
-       /*
-        * XXX: Enable round-robin for all threads locally known by
-        * the current process. Round-robin is most commonly about
-        * having multiple threads getting an equal share of time for
-        * running the same bulk of code, so applying this policy
-        * session-wide to multiple Xenomai processes would not make
-        * much sense. I.e. one is better off having all those threads
-        * running within a single process.
-        */
-       ret = 0;
-       push_cleanup_lock(&list_lock);
-       read_lock(&list_lock);
-
-       if (!pvlist_empty(&thread_list)) {
-               pvlist_for_each_entry(thobj, &thread_list, thread_link) {
-                       threadobj_lock(thobj);
-                       ret = set_rr(thobj, quantum);
-                       threadobj_unlock(thobj);
-                       if (ret)
-                               break;
-               }
-       }
-
-       read_unlock(&list_lock);
-       pop_cleanup_lock(&list_lock);
-
-       return __bt(ret);
-}
-
-int threadobj_start_rr(struct timespec *quantum)
-{
-       return __bt(threadobj_set_rr(NULL, quantum));
-}
-
-void threadobj_stop_rr(void)
-{
-       threadobj_set_rr(NULL, NULL);
-}
-
 int threadobj_set_periodic(struct threadobj *thobj,
                           struct timespec *idate, struct timespec *period)
 {
@@ -454,6 +394,8 @@ int threadobj_stat(struct threadobj *thobj, struct 
threadobj_stat *p) /* thobj->
 #include <sys/prctl.h>
 #include "copperplate/notifier.h"
 
+#define sigev_notify_thread_id  _sigev_un._tid
+
 static int threadobj_lock_prio;
 
 static void unblock_sighandler(int sig)
@@ -464,6 +406,19 @@ static void unblock_sighandler(int sig)
         */
 }
 
+static void roundrobin_handler(int sig)
+{
+       struct threadobj *current = threadobj_current();
+
+       /*
+        * We do manual round-robin over SCHED_FIFO(RT) to allow for
+        * multiple arbitrary time slices (i.e. vs the kernel
+        * pre-defined and fixed one).
+        */
+       if (current && (current->status & THREADOBJ_ROUNDROBIN) != 0)
+               sched_yield();
+}
+
 static inline void pkg_init_corespec(void)
 {
        struct sigaction sa;
@@ -479,6 +434,8 @@ static inline void pkg_init_corespec(void)
        memset(&sa, 0, sizeof(sa));
        sa.sa_handler = unblock_sighandler;
        sigaction(SIGRELS, &sa, NULL);
+       sa.sa_handler = roundrobin_handler;
+       sigaction(SIGVTALRM, &sa, NULL);
 
        notifier_pkg_init();
 }
@@ -514,19 +471,44 @@ static inline void threadobj_init_corespec(struct 
threadobj *thobj)
        pthread_condattr_setclock(&cattr, CLOCK_COPPERPLATE);
        pthread_cond_init(&thobj->core.grant_sync, &cattr);
        pthread_condattr_destroy(&cattr);
+       thobj->core.rr_timer = NULL;
 }
 
-static inline void threadobj_setup_corespec(struct threadobj *thobj)
+static inline int threadobj_setup_corespec(struct threadobj *thobj)
 {
+       struct sigevent sev;
+       int ret;
+
        prctl(PR_SET_NAME, (unsigned long)thobj->name, 0, 0, 0);
        notifier_init(&thobj->core.notifier, notifier_callback, 1);
        thobj->core.period = 0;
+
+       /*
+        * Create the per-thread round-robin timer.
+        *
+        * XXX: It is a bit overkill doing this here instead of on
+        * demand, but we must get the internal ID from the running
+        * thread, and unlike with set_rr(), threadobj_current() ==
+        * thobj is guaranteed in threadobj_setup_corespec().
+        */
+       sev.sigev_notify = SIGEV_SIGNAL|SIGEV_THREAD_ID;
+       sev.sigev_signo = SIGVTALRM;
+       sev.sigev_notify_thread_id = copperplate_get_tid();
+
+       ret = timer_create(CLOCK_THREAD_CPUTIME_ID, &sev,
+                          &thobj->core.rr_timer);
+       if (ret)
+               return __bt(-errno);
+
+       return 0;
 }
 
 static inline void threadobj_cleanup_corespec(struct threadobj *thobj)
 {
        notifier_destroy(&thobj->core.notifier);
        pthread_cond_destroy(&thobj->core.grant_sync);
+       if (thobj->core.rr_timer)
+               timer_delete(thobj->core.rr_timer);
 }
 
 static inline void threadobj_run_corespec(struct threadobj *thobj)
@@ -587,7 +569,6 @@ int threadobj_lock_sched(struct threadobj *thobj) /* 
thobj->lock held */
 {
        pthread_t tid = thobj->tid;
        struct sched_param param;
-       int policy, ret;
 
        __threadobj_check_locked(thobj);
 
@@ -596,13 +577,11 @@ int threadobj_lock_sched(struct threadobj *thobj) /* 
thobj->lock held */
        if (thobj->schedlock_depth++ > 0)
                return 0;
 
-       ret = pthread_getschedparam(tid, &policy, &param);
-       if (ret)
-               return __bt(-ret);
-
-       thobj->core.prio_unlocked = param.sched_priority;
+       thobj->core.prio_unlocked = thobj->priority;
+       thobj->core.policy_unlocked = thobj->policy;
        thobj->status |= THREADOBJ_SCHEDLOCK;
        thobj->priority = threadobj_lock_prio;
+       thobj->policy = SCHED_RT;
        param.sched_priority = threadobj_lock_prio;
 
        return __bt(-pthread_setschedparam(tid, SCHED_RT, &param));
@@ -627,7 +606,7 @@ int threadobj_unlock_sched(struct threadobj *thobj) /* 
thobj->lock held */
        thobj->status &= ~THREADOBJ_SCHEDLOCK;
        thobj->priority = thobj->core.prio_unlocked;
        param.sched_priority = thobj->core.prio_unlocked;
-       policy = param.sched_priority ? SCHED_RT : SCHED_OTHER;
+       policy = thobj->core.policy_unlocked;
        threadobj_unlock(thobj);
        ret = pthread_setschedparam(tid, policy, &param);
        threadobj_lock(thobj);
@@ -650,17 +629,25 @@ int threadobj_set_priority(struct threadobj *thobj, int 
prio) /* thobj->lock hel
         */
        if (thobj->status & THREADOBJ_SCHEDLOCK) {
                thobj->core.prio_unlocked = prio;
+               thobj->core.policy_unlocked = prio ? SCHED_RT : SCHED_OTHER;
                return 0;
        }
 
        thobj->priority = prio;
+       policy = SCHED_RT;
+       if (prio == 0) {
+               thobj->status &= ~THREADOBJ_ROUNDROBIN;
+               policy = SCHED_OTHER;
+       }
+
+       thobj->priority = prio;
+       thobj->policy = policy;
        threadobj_unlock(thobj);
        /*
         * Since we released the thread container lock, we now rely on
         * the pthread interface to recheck the tid for existence.
         */
        param.sched_priority = prio;
-       policy = prio ? SCHED_RT : SCHED_OTHER;
 
        return pthread_setschedparam(tid, policy, &param);
 }
@@ -686,112 +673,56 @@ int threadobj_set_mode(struct threadobj *thobj,
        return ret;
 }
 
-static void roundrobin_handler(int sig)
+static inline int set_rr(struct threadobj *thobj, struct timespec *quantum)
 {
-       struct threadobj *current = threadobj_current();
-
-       /*
-        * We do manual round-robin within SCHED_FIFO(RT) to allow for
-        * multiple time slices system-wide.
-        */
-       if (current && (current->status & THREADOBJ_ROUNDROBIN))
-               sched_yield();
-}
+       pthread_t tid = thobj->tid;
+       struct sched_param param;
+       struct itimerspec value;
+       int policy, ret;
 
-static inline void set_rr(struct threadobj *thobj, struct timespec *quantum)
-{
-       if (quantum) {
-               thobj->status |= THREADOBJ_ROUNDROBIN;
+       if (quantum && (quantum->tv_sec || quantum->tv_nsec)) {
+               value.it_interval = *quantum;
+               value.it_value = *quantum;
                thobj->tslice = *quantum;
-       } else
-               thobj->status &= ~THREADOBJ_ROUNDROBIN;
-}
-
-int threadobj_set_rr(struct threadobj *thobj, struct timespec *quantum)
-{                              /* thobj->lock held if valid */
-       if (thobj) {
-               __threadobj_check_locked(thobj);
-               set_rr(thobj, quantum);
-               return 0;
-       }
 
-       global_rr = (quantum != NULL);
-       if (global_rr)
-               global_quantum = *quantum;
-
-       /*
-        * XXX: Enable round-robin for all threads locally known by
-        * the current process. Round-robin is most commonly about
-        * having multiple threads getting an equal share of time for
-        * running the same bulk of code, so applying this policy
-        * session-wide to multiple Xenomai processes would not make
-        * much sense. I.e. one is better off having all those threads
-        * running within a single process.
-        */
-       push_cleanup_lock(&list_lock);
-       read_lock(&list_lock);
-
-       if (!pvlist_empty(&thread_list)) {
-               pvlist_for_each_entry(thobj, &thread_list, thread_link) {
-                       threadobj_lock(thobj);
-                       set_rr(thobj, quantum);
-                       threadobj_unlock(thobj);
+               if (thobj->status & THREADOBJ_ROUNDROBIN) {
+                       /* Changing quantum of ongoing RR. */
+                       ret = timer_settime(thobj->core.rr_timer, 0, &value, 
NULL);
+                       return ret ? __bt(-errno) : 0;
                }
-       }
-
-       read_unlock(&list_lock);
-       pop_cleanup_lock(&list_lock);
-
-       return 0;
-}
-
-int threadobj_start_rr(struct timespec *quantum)
-{
-       struct itimerval value, ovalue;
-       struct sigaction sa;
-       int ret;
-
-       ret = threadobj_set_rr(NULL, quantum);
-       if (ret)
-               return __bt(ret);
-
-       value.it_interval.tv_sec = quantum->tv_sec;
-       value.it_interval.tv_usec = quantum->tv_nsec / 1000;
-
-       ret = getitimer(ITIMER_VIRTUAL, &ovalue);
-       if (ret == 0 &&
-           value.it_interval.tv_sec == ovalue.it_interval.tv_sec &&
-           value.it_interval.tv_usec == ovalue.it_interval.tv_usec)
-               return 0;       /* Already enabled. */
-
-       memset(&sa, 0, sizeof(sa));
-       sa.sa_handler = roundrobin_handler;
-       sigaction(SIGVTALRM, &sa, NULL);
-
-       value.it_value = value.it_interval;
-       ret = setitimer(ITIMER_VIRTUAL, &value, NULL);
-       if (ret)
-               return __bt(-errno);
 
-       return 0;
-}
-
-void threadobj_stop_rr(void)
-{
-       struct itimerval value;
-       struct sigaction sa;
-
-       threadobj_set_rr(NULL, NULL);
-
-       value.it_value.tv_sec = 0;
-       value.it_value.tv_usec = 0;
-       value.it_interval = value.it_value;
+               thobj->status |= THREADOBJ_ROUNDROBIN;
+               /*
+                * Switch to SCHED_FIFO policy, assign default prio=1
+                * if coming from SCHED_OTHER. We use a per-thread
+                * timer to implement manual round-robin.
+                */
+               policy = SCHED_FIFO;
+               param.sched_priority = thobj->priority ?: 1;
+               ret = timer_settime(thobj->core.rr_timer, 0, &value, NULL);
+               if (ret)
+                       return __bt(-errno);
+       } else {
+               if ((thobj->status & THREADOBJ_ROUNDROBIN) == 0)
+                       return 0;
+               thobj->status &= ~THREADOBJ_ROUNDROBIN;
+               /*
+                * Disarm timer and reset scheduling parameters to
+                * former policy.
+                */
+               value.it_value.tv_sec = 0;
+               value.it_value.tv_nsec = 0;
+               value.it_interval = value.it_value;
+               timer_settime(thobj->core.rr_timer, 0, &value, NULL);
+               param.sched_priority = thobj->priority;
+               policy = thobj->policy;
+       }
 
-       setitimer(ITIMER_VIRTUAL, &value, NULL);
+       threadobj_unlock(thobj);
+       ret = pthread_setschedparam(tid, policy, &param);
+       threadobj_lock(thobj);
 
-       memset(&sa, 0, sizeof(sa));
-       sa.sa_handler = SIG_DFL;
-       sigaction(SIGVTALRM, &sa, NULL);
+       return __bt(-ret);
 }
 
 int threadobj_set_periodic(struct threadobj *thobj,
@@ -899,6 +830,7 @@ void threadobj_init(struct threadobj *thobj,
        thobj->schedlock_depth = 0;
        thobj->status = THREADOBJ_WARMUP;
        thobj->priority = idata->priority;
+       thobj->policy = idata->priority ? SCHED_RT : SCHED_OTHER;
        holder_init(&thobj->wait_link);
        thobj->suspend_hook = idata->suspend_hook;
        thobj->cnode = __node_id;
@@ -1021,6 +953,7 @@ void threadobj_notify_entry(void) /* current->lock free. */
 int threadobj_prologue(struct threadobj *thobj, const char *name)
 {
        struct threadobj *current = threadobj_current();
+       int ret;
 
        /*
         * Check whether we overlay the default main TCB we set in
@@ -1039,19 +972,14 @@ int threadobj_prologue(struct threadobj *thobj, const 
char *name)
                pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
 
        thobj->name = name;
+       thobj->errno_pointer = &errno;
        backtrace_init_context(&thobj->btd, name);
-       threadobj_setup_corespec(thobj);
-
-       write_lock_nocancel(&list_lock);
-       pvlist_append(&thobj->thread_link, &thread_list);
-       write_unlock(&list_lock);
+       ret = threadobj_setup_corespec(thobj);
+       if (ret)
+               return __bt(ret);
 
-       thobj->errno_pointer = &errno;
        threadobj_set_current(thobj);
 
-       if (global_rr)
-               threadobj_set_rr(thobj, &global_quantum);
-
        threadobj_lock(thobj);
        thobj->status &= ~THREADOBJ_WARMUP;
        __RT(pthread_cond_signal(&thobj->barrier));
@@ -1096,10 +1024,6 @@ static void threadobj_finalize(void *p) /* thobj->lock 
free */
        if (thobj->wait_sobj)
                __syncobj_cleanup_wait(thobj->wait_sobj, thobj);
 
-       write_lock_nocancel(&list_lock);
-       pvlist_remove(&thobj->thread_link);
-       write_unlock(&list_lock);
-
        if (thobj->tracer)
                traceobj_unwind(thobj->tracer);
 
@@ -1147,6 +1071,22 @@ void threadobj_spin(ticks_t ns)
                cpu_relax();
 }
 
+int threadobj_set_rr(struct threadobj *thobj, struct timespec *quantum)
+{                              /* thobj->lock held */
+       __threadobj_check_locked(thobj);
+
+       /*
+        * It makes no sense to enable/disable round-robin while
+        * holding the scheduler lock. Prevent this, which makes our
+        * logic simpler in the Mercury case with respect to tracking
+        * the current scheduling parameters.
+        */
+       if (thobj->status & THREADOBJ_SCHEDLOCK)
+               return -EINVAL;
+
+       return __bt(set_rr(thobj, quantum));
+}
+
 #ifdef __XENO_DEBUG__
 
 int __check_cancel_type(const char *locktype)
@@ -1195,8 +1135,6 @@ void threadobj_pkg_init(void)
        threadobj_irq_prio = __RT(sched_get_priority_max(SCHED_RT));
        threadobj_high_prio = threadobj_irq_prio - 1;
 
-       __RT(pthread_mutex_init(&list_lock, NULL));
-
        threadobj_init_key();
 
        pkg_init_corespec();


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to