Module: xenomai-forge
Branch: next
Commit: 98e0378927e957ce89939a6bf945e0f22feac454
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=98e0378927e957ce89939a6bf945e0f22feac454

Author: Philippe Gerum <r...@xenomai.org>
Date:   Tue Sep  9 12:04:24 2014 +0200

cobalt/thread: sanitize header, drop pedantic accessors

---

 include/cobalt/kernel/synch.h      |    2 +-
 include/cobalt/kernel/thread.h     |   74 ++++++++++++++++--------------------
 kernel/cobalt/assert.c             |    4 +-
 kernel/cobalt/posix/monitor.c      |    4 +-
 kernel/cobalt/posix/mutex.c        |    7 ++--
 kernel/cobalt/posix/process.c      |    8 ++--
 kernel/cobalt/posix/syscall.c      |    6 +--
 kernel/cobalt/posix/thread.c       |   23 +++++------
 kernel/cobalt/posix/timer.c        |    4 +-
 kernel/cobalt/posix/timerfd.c      |    2 +-
 kernel/cobalt/registry.c           |    8 ++--
 kernel/cobalt/sched-sporadic.c     |    4 +-
 kernel/cobalt/sched.c              |   36 ++++++++++--------
 kernel/cobalt/synch.c              |   14 +++----
 kernel/cobalt/thread.c             |   28 ++++++--------
 kernel/cobalt/trace/cobalt-core.h  |   32 ++++++++--------
 kernel/cobalt/trace/cobalt-posix.h |    4 +-
 kernel/cobalt/trace/cobalt-rtdm.h  |   16 ++++----
 18 files changed, 135 insertions(+), 141 deletions(-)

diff --git a/include/cobalt/kernel/synch.h b/include/cobalt/kernel/synch.h
index 7509909..d4df940 100644
--- a/include/cobalt/kernel/synch.h
+++ b/include/cobalt/kernel/synch.h
@@ -101,7 +101,7 @@ static inline struct xnthread *xnsynch_owner(struct xnsynch 
*synch)
 #define xnsynch_fastlock(synch)                ((synch)->fastlock)
 #define xnsynch_fastlock_p(synch)      ((synch)->fastlock != NULL)
 #define xnsynch_owner_check(synch, thread) \
-       xnsynch_fast_owner_check((synch)->fastlock, xnthread_handle(thread))
+       xnsynch_fast_owner_check((synch)->fastlock, thread->handle)
 
 #define xnsynch_fast_is_claimed(fastlock) \
        xnhandle_test_spare(fastlock, XNSYNCH_FLCLAIM)
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index a455399..0801cbc 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -119,7 +119,7 @@ struct xnthread {
         */
        int wprio;
 
-       unsigned long schedlck; /** Scheduler lock count. */
+       int lock_count; /** Scheduler lock count. */
 
        /**
         * Thread holder in xnsched runnable queue. Prioritized by
@@ -146,7 +146,7 @@ struct xnthread {
 
        struct xnsynch *wwake;          /* Wait channel the thread was resumed 
from */
 
-       int hrescnt;                    /* Held resources count */
+       int res_count;                  /* Held resources count */
 
        struct xntimer rtimer;          /* Resource timer */
 
@@ -167,16 +167,9 @@ struct xnthread {
 
        struct xnselector *selector;    /* For select. */
 
-       int imode;                      /* Initial mode */
+       xnhandle_t handle;      /* Handle in registry */
 
-       struct xnsched_class *init_class; /* Initial scheduling class */
-
-       union xnsched_policy_param init_schedparam; /* Initial scheduling 
parameters */
-
-       struct {
-               xnhandle_t handle;      /* Handle in registry */
-               const char *waitkey;    /* Pended key */
-       } registry;
+       const char *waitkey;    /* Pended key */
 
        char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
 
@@ -195,11 +188,10 @@ struct xnthread {
        struct xnsynch join_synch;
 };
 
-#define xnthread_name(thread)               ((thread)->name)
-#define xnthread_clear_name(thread)        do { *(thread)->name = 0; } while(0)
-#define xnthread_sched(thread)             ((thread)->sched)
-#define xnthread_start_time(thread)        ((thread)->stime)
-#define xnthread_state_flags(thread)       ((thread)->state)
+static inline int xnthread_get_state(const struct xnthread *thread)
+{
+       return thread->state;
+}
 
 static inline int xnthread_test_state(struct xnthread *thread, int bits)
 {
@@ -236,28 +228,28 @@ static inline struct xnarchtcb *xnthread_archtcb(struct 
xnthread *thread)
        return &thread->tcb;
 }
 
-#define xnthread_lock_count(thread)        ((thread)->schedlck)
-#define xnthread_init_schedparam(thread)   ((thread)->init_schedparam)
-#define xnthread_base_priority(thread)     ((thread)->bprio)
-#define xnthread_current_priority(thread)  ((thread)->cprio)
-#define xnthread_init_class(thread)        ((thread)->init_class)
-#define xnthread_base_class(thread)        ((thread)->base_class)
-#define xnthread_sched_class(thread)       ((thread)->sched_class)
-#define xnthread_time_slice(thread)        ((thread)->rrperiod)
-#define xnthread_timeout(thread)           
xntimer_get_timeout(&(thread)->rtimer)
-#define xnthread_handle(thread)            ((thread)->registry.handle)
-#define xnthread_host_task(thread)         
(xnthread_archtcb(thread)->core.host_task)
-#define xnthread_host_pid(thread)         
(xnthread_test_state((thread),XNROOT) ? 0 : \
-                                           
xnthread_archtcb(thread)->core.host_task->pid)
-#define xnthread_host_mm(thread)           (xnthread_host_task(thread)->mm)
-#define xnthread_affinity(thread)          ((thread)->affinity)
-#define xnthread_affine_p(thread, cpu)     cpu_isset(cpu, (thread)->affinity)
-#define xnthread_get_exectime(thread)      
xnstat_exectime_get_total(&(thread)->stat.account)
-#define xnthread_get_lastswitch(thread)    
xnstat_exectime_get_last_switch((thread)->sched)
-#define xnthread_inc_rescnt(thread)        ({ (thread)->hrescnt++; })
-#define xnthread_dec_rescnt(thread)        ({ --(thread)->hrescnt; })
-#define xnthread_get_rescnt(thread)        ((thread)->hrescnt)
-#define xnthread_personality(thread)       ((thread)->personality)
+static inline int xnthread_base_priority(const struct xnthread *thread)
+{
+       return thread->bprio;
+}
+
+static inline int xnthread_current_priority(const struct xnthread *thread)
+{
+       return thread->cprio;
+}
+
+static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
+{
+       return xnthread_archtcb(thread)->core.host_task;
+}
+
+static inline pid_t xnthread_host_pid(struct xnthread *thread)
+{
+       if (xnthread_test_state(thread, XNROOT))
+               return 0;
+
+       return xnthread_host_task(thread)->pid;
+}
 
 #define xnthread_for_each_claimed(__pos, __thread)             \
        list_for_each_entry(__pos, &(__thread)->claimq, link)
@@ -291,14 +283,14 @@ struct xnthread_wait_context 
*xnthread_get_wait_context(struct xnthread *thread)
 static inline
 int xnthread_register(struct xnthread *thread, const char *name)
 {
-       return xnregistry_enter(name, thread, &xnthread_handle(thread), NULL);
+       return xnregistry_enter(name, thread, &thread->handle, NULL);
 }
 
 static inline
 struct xnthread *xnthread_lookup(xnhandle_t threadh)
 {
        struct xnthread *thread = xnregistry_lookup(threadh, NULL);
-       return thread && xnthread_handle(thread) == threadh ? thread : NULL;
+       return thread && thread->handle == threadh ? thread : NULL;
 }
 
 static inline void xnthread_sync_window(struct xnthread *thread)
@@ -338,7 +330,7 @@ static inline int xnthread_try_grab(struct xnthread *thread,
        xnsynch_set_owner(synch, thread);
 
        if (xnthread_test_state(thread, XNWEAK))
-               xnthread_inc_rescnt(thread);
+               thread->res_count++;
 
        return 1;
 }
diff --git a/kernel/cobalt/assert.c b/kernel/cobalt/assert.c
index e9cc06c..09a94d2 100644
--- a/kernel/cobalt/assert.c
+++ b/kernel/cobalt/assert.c
@@ -73,8 +73,8 @@ void __xnsys_fatal(const char *format, ...)
                               xnthread_host_pid(thread),
                               pbuf,
                               xnthread_get_timeout(thread, now),
-                              xnthread_state_flags(thread),
-                              xnthread_name(thread));
+                              xnthread_get_state(thread),
+                              thread->name);
                }
        }
 
diff --git a/kernel/cobalt/posix/monitor.c b/kernel/cobalt/posix/monitor.c
index d7cff94..9572c3b 100644
--- a/kernel/cobalt/posix/monitor.c
+++ b/kernel/cobalt/posix/monitor.c
@@ -126,11 +126,11 @@ static int monitor_enter(xnhandle_t handle, struct 
xnthread *curr)
         *
         * NOTE: monitors do not support recursive entries.
         */
-       ret = xnsynch_fast_acquire(mon->gate.fastlock, xnthread_handle(curr));
+       ret = xnsynch_fast_acquire(mon->gate.fastlock, curr->handle);
        switch(ret) {
        case 0:
                if (xnthread_test_state(curr, XNWEAK))
-                       xnthread_inc_rescnt(curr);
+                       curr->res_count++;
                break;
        default:
                /* Nah, we really have to wait. */
diff --git a/kernel/cobalt/posix/mutex.c b/kernel/cobalt/posix/mutex.c
index deccf05..0f54b1b 100644
--- a/kernel/cobalt/posix/mutex.c
+++ b/kernel/cobalt/posix/mutex.c
@@ -182,7 +182,7 @@ int cobalt_mutex_timedlock_break(struct cobalt_mutex *mutex,
        int ret;
 
        /* We need a valid thread handle for the fast lock. */
-       if (xnthread_handle(curr) == XN_NO_HANDLE)
+       if (curr->handle == XN_NO_HANDLE)
                return -EPERM;
 
        ret = cobalt_mutex_acquire(curr, mutex, timed, u_ts);
@@ -333,12 +333,11 @@ COBALT_SYSCALL(mutex_trylock, primary,
                goto err_unlock;
        }
 
-       err = xnsynch_fast_acquire(mutex->synchbase.fastlock,
-                                  xnthread_handle(curr));
+       err = xnsynch_fast_acquire(mutex->synchbase.fastlock, curr->handle);
        switch(err) {
        case 0:
                if (xnthread_test_state(curr, XNWEAK))
-                       xnthread_inc_rescnt(curr);
+                       curr->res_count++;
                break;
 
 /* This should not happen, as recursive mutexes are handled in
diff --git a/kernel/cobalt/posix/process.c b/kernel/cobalt/posix/process.c
index b1f0d72..d0b7826 100644
--- a/kernel/cobalt/posix/process.c
+++ b/kernel/cobalt/posix/process.c
@@ -1183,8 +1183,8 @@ no_ptrace:
                        show_stack(xnthread_host_task(next), NULL);
                        xnsys_fatal
                                ("hardened thread %s[%d] running in Linux 
domain?! "
-                                "(status=0x%lx, sig=%d, prev=%s[%d])",
-                                next->name, next_task->pid, 
xnthread_state_flags(next),
+                                "(status=0x%x, sig=%d, prev=%s[%d])",
+                                next->name, next_task->pid, 
xnthread_get_state(next),
                                 signal_pending(next_task), prev_task->comm, 
prev_task->pid);
                } else if (!(next_task->ptrace & PT_PTRACED) &&
                           /*
@@ -1197,8 +1197,8 @@ no_ptrace:
                        show_stack(xnthread_host_task(next), NULL);
                        xnsys_fatal
                                ("blocked thread %s[%d] rescheduled?! "
-                                "(status=0x%lx, sig=%d, prev=%s[%d])",
-                                next->name, next_task->pid, 
xnthread_state_flags(next),
+                                "(status=0x%x, sig=%d, prev=%s[%d])",
+                                next->name, next_task->pid, 
xnthread_get_state(next),
                                 signal_pending(next_task), prev_task->comm, 
prev_task->pid);
                }
        }
diff --git a/kernel/cobalt/posix/syscall.c b/kernel/cobalt/posix/syscall.c
index e3b594e..7e8db89 100644
--- a/kernel/cobalt/posix/syscall.c
+++ b/kernel/cobalt/posix/syscall.c
@@ -231,7 +231,7 @@ done:
                        sigs = 1;
                        prepare_for_signal(p, thread, regs, sysflags);
                } else if (xnthread_test_state(thread, XNWEAK) &&
-                          xnthread_get_rescnt(thread) == 0) {
+                          thread->res_count == 0) {
                        if (switched)
                                switched = 0;
                        else
@@ -365,7 +365,7 @@ restart:
                        sigs = 1;
                        prepare_for_signal(p, thread, regs, sysflags);
                } else if (xnthread_test_state(thread, XNWEAK) &&
-                          xnthread_get_rescnt(thread) == 0)
+                          thread->res_count == 0)
                        sysflags |= __xn_exec_switchback;
        }
        if (!sigs && (sysflags & __xn_exec_switchback) != 0
@@ -502,7 +502,7 @@ static COBALT_SYSCALL(get_current, current,
        if (cur == NULL)
                return -EPERM;
 
-       return __xn_safe_copy_to_user(u_handle, &xnthread_handle(cur),
+       return __xn_safe_copy_to_user(u_handle, &cur->handle,
                                      sizeof(*u_handle));
 }
 
diff --git a/kernel/cobalt/posix/thread.c b/kernel/cobalt/posix/thread.c
index 6a0f86b..8271afd 100644
--- a/kernel/cobalt/posix/thread.c
+++ b/kernel/cobalt/posix/thread.c
@@ -257,7 +257,7 @@ pthread_setschedparam_ex(struct cobalt_thread *thread,
                goto out;
        }
 
-       tslice = xnthread_time_slice(&thread->threadbase);
+       tslice = thread->threadbase.rrperiod;
        sched_class = cobalt_sched_policy_param(&param, policy,
                                                param_ex, &tslice);
        if (sched_class == NULL) {
@@ -295,14 +295,14 @@ pthread_getschedparam_ex(struct cobalt_thread *thread,
        }
 
        base_thread = &thread->threadbase;
-       base_class = xnthread_base_class(base_thread);
+       base_class = base_thread->base_class;
        *policy_r = thread->sched_u_policy;
        prio = xnthread_base_priority(base_thread);
        param_ex->sched_priority = prio;
 
        if (base_class == &xnsched_class_rt) {
                if (xnthread_test_state(base_thread, XNRRB))
-                       ns2ts(&param_ex->sched_rr_quantum, 
xnthread_time_slice(base_thread));
+                       ns2ts(&param_ex->sched_rr_quantum, 
base_thread->rrperiod);
                goto unlock_and_exit;
        }
 
@@ -648,7 +648,7 @@ COBALT_SYSCALL(thread_setname, current,
                return -ESRCH;
        }
 
-       ksformat(xnthread_name(&thread->threadbase),
+       ksformat(thread->threadbase.name,
                 XNOBJECT_NAME_LEN - 1, "%s", name);
        p = xnthread_host_task(&thread->threadbase);
        get_task_struct(p);
@@ -762,19 +762,20 @@ COBALT_SYSCALL(thread_getstat, current,
        /* We have to hold the nklock to keep most values consistent. */
        stat.cpu = xnsched_cpu(thread->sched);
        stat.cprio = xnthread_current_priority(thread);
-       xtime = xnthread_get_exectime(thread);
-       if (xnthread_sched(thread)->curr == thread)
-               xtime += xnstat_exectime_now() - 
xnthread_get_lastswitch(thread);
+       xtime = xnstat_exectime_get_total(&thread->stat.account);
+       if (thread->sched->curr == thread)
+               xtime += xnstat_exectime_now() -
+                       xnstat_exectime_get_last_switch(thread->sched);
        stat.xtime = xnclock_ticks_to_ns(&nkclock, xtime);
        stat.msw = xnstat_counter_get(&thread->stat.ssw);
        stat.csw = xnstat_counter_get(&thread->stat.csw);
        stat.xsc = xnstat_counter_get(&thread->stat.xsc);
        stat.pf = xnstat_counter_get(&thread->stat.pf);
-       stat.status = xnthread_state_flags(thread);
+       stat.status = xnthread_get_state(thread);
        stat.timeout = xnthread_get_timeout(thread,
                                            xnclock_read_monotonic(&nkclock));
-       strcpy(stat.name, xnthread_name(thread));
-       strcpy(stat.personality, xnthread_personality(thread)->name);
+       strcpy(stat.name, thread->name);
+       strcpy(stat.personality, thread->personality->name);
        xnlock_put_irqrestore(&nklock, s);
 
        return __xn_safe_copy_to_user(u_stat, &stat, sizeof(stat));
@@ -805,7 +806,7 @@ void cobalt_thread_restrict(void)
        struct cobalt_thread *thread = cobalt_current_thread();
 
        trace_cobalt_pthread_restrict(thread->hkey.u_pth,
-                     xnthread_personality(&thread->threadbase)->name);
+                     thread->threadbase.personality->name);
        cobalt_pop_personality(&cobalt_personality);
        cobalt_set_extref(&thread->extref, NULL, NULL);
 }
diff --git a/kernel/cobalt/posix/timer.c b/kernel/cobalt/posix/timer.c
index 741ccf2..819db8e 100644
--- a/kernel/cobalt/posix/timer.c
+++ b/kernel/cobalt/posix/timer.c
@@ -86,7 +86,7 @@ timer_init(struct cobalt_timer *timer,
         * want to deliver a signal when a timer elapses.
         */
        xntimer_init(&timer->timerbase, &nkclock, cobalt_timer_handler,
-                    xnthread_sched(&target->threadbase), XNTIMER_UGRAVITY);
+                    target->threadbase.sched, XNTIMER_UGRAVITY);
 
        return target;
 }
@@ -347,7 +347,7 @@ static inline int timer_set(struct cobalt_timer *timer, int 
flags,
         * Make the timer affine to the CPU running the thread to be
         * signaled.
         */
-       xntimer_set_sched(&timer->timerbase, 
xnthread_sched(&thread->threadbase));
+       xntimer_set_sched(&timer->timerbase, thread->threadbase.sched);
 
        return cobalt_xntimer_settime(&timer->timerbase,
                                clock_flag(flags, timer->clockid), value);
diff --git a/kernel/cobalt/posix/timerfd.c b/kernel/cobalt/posix/timerfd.c
index cb33994..5502bf1 100644
--- a/kernel/cobalt/posix/timerfd.c
+++ b/kernel/cobalt/posix/timerfd.c
@@ -186,7 +186,7 @@ COBALT_SYSCALL(timerfd_create, lostage,
        tfd->clockid = clockid;
        curr = xnthread_current();
        xntimer_init(&tfd->timer, &nkclock, timerfd_handler,
-                    curr ? xnthread_sched(curr) : NULL, XNTIMER_UGRAVITY);
+                    curr ? curr->sched : NULL, XNTIMER_UGRAVITY);
        xnsynch_init(&tfd->readers, XNSYNCH_PRIO | XNSYNCH_NOPIP, NULL);
        xnselect_init(&tfd->read_select);
        tfd->target = NULL;
diff --git a/kernel/cobalt/registry.c b/kernel/cobalt/registry.c
index 9fb93fa..561c8d9 100644
--- a/kernel/cobalt/registry.c
+++ b/kernel/cobalt/registry.c
@@ -565,9 +565,9 @@ static inline int registry_wakeup_sleepers(const char *key)
        int cnt = 0;
 
        xnsynch_for_each_sleeper_safe(sleeper, tmp, &register_synch) {
-               if (*key == *sleeper->registry.waitkey &&
-                   strcmp(key, sleeper->registry.waitkey) == 0) {
-                       sleeper->registry.waitkey = NULL;
+               if (*key == *sleeper->waitkey &&
+                   strcmp(key, sleeper->waitkey) == 0) {
+                       sleeper->waitkey = NULL;
                        xnsynch_wakeup_this_sleeper(&register_synch, sleeper);
                        ++cnt;
                }
@@ -761,7 +761,7 @@ int xnregistry_bind(const char *key, xnticks_t timeout, int 
timeout_mode,
                }
 
                thread = xnthread_current();
-               thread->registry.waitkey = key;
+               thread->waitkey = key;
                info = xnsynch_sleep_on(&register_synch, timeout, timeout_mode);
                if (info & XNTIMEO) {
                        ret = -ETIMEDOUT;
diff --git a/kernel/cobalt/sched-sporadic.c b/kernel/cobalt/sched-sporadic.c
index e1cb141..77f1822 100644
--- a/kernel/cobalt/sched-sporadic.c
+++ b/kernel/cobalt/sched-sporadic.c
@@ -315,10 +315,10 @@ static int xnsched_sporadic_declare(struct xnthread 
*thread,
                return -ENOMEM;
 
        xntimer_init(&pss->repl_timer, &nkclock, sporadic_replenish_handler,
-                    xnthread_sched(thread), XNTIMER_IGRAVITY);
+                    thread->sched, XNTIMER_IGRAVITY);
        xntimer_set_name(&pss->repl_timer, "pss-replenish");
        xntimer_init(&pss->drop_timer, &nkclock, sporadic_drop_handler,
-                    xnthread_sched(thread), XNTIMER_IGRAVITY);
+                    thread->sched, XNTIMER_IGRAVITY);
        xntimer_set_name(&pss->drop_timer, "pss-drop");
 
        thread->pss = pss;
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 73e8f12..19e3f71 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -120,11 +120,11 @@ static void watchdog_handler(struct xntimer *timer)
 
        if (xnthread_test_state(curr, XNUSER)) {
                printk(XENO_WARN "watchdog triggered on CPU #%d -- runaway 
thread "
-                      "'%s' signaled\n", xnsched_cpu(sched), 
xnthread_name(curr));
+                      "'%s' signaled\n", xnsched_cpu(sched), curr->name);
                xnthread_call_mayday(curr, SIGDEBUG_WATCHDOG);
        } else {
                printk(XENO_WARN "watchdog triggered on CPU #%d -- runaway 
thread "
-                      "'%s' canceled\n", xnsched_cpu(sched), 
xnthread_name(curr));
+                      "'%s' canceled\n", xnsched_cpu(sched), curr->name);
                /*
                 * On behalf on an IRQ handler, xnthread_cancel()
                 * would go half way cancelling the preempted
@@ -324,7 +324,7 @@ void ___xnsched_lock(struct xnsched *sched)
 {
        struct xnthread *curr = sched->curr;
 
-       if (xnthread_lock_count(curr)++ == 0) {
+       if (curr->lock_count++ == 0) {
                sched->lflags |= XNINLOCK;
                xnthread_set_state(curr, XNLOCK);
        }
@@ -335,10 +335,10 @@ void ___xnsched_unlock(struct xnsched *sched)
 {
        struct xnthread *curr = sched->curr;
 
-       if (!XENO_ASSERT(COBALT, xnthread_lock_count(curr) > 0))
+       if (!XENO_ASSERT(COBALT, curr->lock_count > 0))
                return;
 
-       if (--xnthread_lock_count(curr) == 0) {
+       if (--curr->lock_count == 0) {
                xnthread_clear_state(curr, XNLOCK);
                xnthread_clear_info(curr, XNLBALERT);
                sched->lflags &= ~XNINLOCK;
@@ -351,7 +351,7 @@ void ___xnsched_unlock_fully(struct xnsched *sched)
 {
        struct xnthread *curr = sched->curr;
 
-       xnthread_lock_count(curr) = 0;
+       curr->lock_count = 0;
        xnthread_clear_state(curr, XNLOCK);
        xnthread_clear_info(curr, XNLBALERT);
        sched->lflags &= ~XNINLOCK;
@@ -808,7 +808,13 @@ int __xnsched_run(struct xnsched *sched)
        xnlock_get_irqsave(&nklock, s);
 
        curr = sched->curr;
-       xntrace_pid(xnthread_host_pid(curr), xnthread_current_priority(curr));
+       /*
+        * CAUTION: xnthread_host_task(curr) may be unsynced and even
+        * stale if curr = &rootcb, since the task logged by
+        * leave_root() may not still be the current one. Use
+        * "current" for disambiguating.
+        */
+       xntrace_pid(current->pid, xnthread_current_priority(curr));
 reschedule:
        switched = 0;
        if (!test_resched(sched))
@@ -873,14 +879,14 @@ reschedule:
         */
        curr = sched->curr;
        xnthread_switch_fpu(sched);
-       xntrace_pid(xnthread_host_pid(curr), xnthread_current_priority(curr));
+       xntrace_pid(current->pid, xnthread_current_priority(curr));
 
 out:
        if (switched &&
            xnsched_maybe_resched_after_unlocked_switch(sched))
                goto reschedule;
 
-       if (xnthread_lock_count(curr))
+       if (curr->lock_count)
                sched->lflags |= XNINLOCK;
 
        xnlock_put_irqrestore(&nklock, s);
@@ -932,7 +938,7 @@ struct vfile_schedlist_data {
        char personality[XNOBJECT_NAME_LEN];
        int cprio;
        xnticks_t timeout;
-       unsigned long state;
+       int state;
 };
 
 static struct xnvfile_snapshot_ops vfile_schedlist_ops;
@@ -976,7 +982,7 @@ static int vfile_schedlist_next(struct 
xnvfile_snapshot_iterator *it,
        p->pid = xnthread_host_pid(thread);
        memcpy(p->name, thread->name, sizeof(p->name));
        p->cprio = thread->cprio;
-       p->state = xnthread_state_flags(thread);
+       p->state = xnthread_get_state(thread);
        knamecpy(p->sched_class, thread->sched_class->name);
        knamecpy(p->personality, thread->personality->name);
        period = xnthread_get_period(thread);
@@ -1073,7 +1079,7 @@ struct vfile_schedstat_priv {
 struct vfile_schedstat_data {
        int cpu;
        pid_t pid;
-       unsigned long state;
+       int state;
        char name[XNOBJECT_NAME_LEN];
        unsigned long ssw;
        unsigned long csw;
@@ -1140,7 +1146,7 @@ static int vfile_schedstat_next(struct 
xnvfile_snapshot_iterator *it,
        p->cpu = xnsched_cpu(sched);
        p->pid = xnthread_host_pid(thread);
        memcpy(p->name, thread->name, sizeof(p->name));
-       p->state = xnthread_state_flags(thread);
+       p->state = xnthread_get_state(thread);
        p->ssw = xnstat_counter_get(&thread->stat.ssw);
        p->csw = xnstat_counter_get(&thread->stat.csw);
        p->xsc = xnstat_counter_get(&thread->stat.xsc);
@@ -1219,7 +1225,7 @@ static int vfile_schedstat_show(struct 
xnvfile_snapshot_iterator *it,
                                              p->account_period, NULL);
                }
                xnvfile_printf(it,
-                              "%3u  %-6d %-10lu %-10lu %-10lu %-4lu  %.8lx  
%3u.%u"
+                              "%3u  %-6d %-10lu %-10lu %-10lu %-4lu  %.8x  
%3u.%u"
                               "  %s\n",
                               p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, 
p->state,
                               usage / 10, usage % 10, p->name);
@@ -1236,7 +1242,7 @@ static int vfile_schedacct_show(struct 
xnvfile_snapshot_iterator *it,
        if (p == NULL)
                return 0;
 
-       xnvfile_printf(it, "%u %d %lu %lu %lu %lu %.8lx %Lu %Lu %Lu %s %s %d 
%Lu\n",
+       xnvfile_printf(it, "%u %d %lu %lu %lu %lu %.8x %Lu %Lu %Lu %s %s %d 
%Lu\n",
                       p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state,
                       xnclock_ticks_to_ns(&nkclock, p->account_period),
                       xnclock_ticks_to_ns(&nkclock, p->exectime_period),
diff --git a/kernel/cobalt/synch.c b/kernel/cobalt/synch.c
index 5b835c0..89b9757 100644
--- a/kernel/cobalt/synch.c
+++ b/kernel/cobalt/synch.c
@@ -346,7 +346,7 @@ int xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
        XENO_BUGON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
 
        thread = xnthread_current();
-       threadh = xnthread_handle(thread);
+       threadh = thread->handle;
        lockp = xnsynch_fastlock(synch);
        trace_cobalt_synch_acquire(synch, thread);
 redo:
@@ -354,7 +354,7 @@ redo:
 
        if (likely(fastlock == XN_NO_HANDLE)) {
                if (xnthread_test_state(thread, XNWEAK))
-                       xnthread_inc_rescnt(thread);
+                       thread->res_count++;
                xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
                return 0;
        }
@@ -461,7 +461,7 @@ block:
        }
  grab:
        if (xnthread_test_state(thread, XNWEAK))
-               xnthread_inc_rescnt(thread);
+               thread->res_count++;
 
        if (xnsynch_pended_p(synch))
                threadh = xnsynch_fast_set_claimed(threadh, 1);
@@ -534,7 +534,7 @@ static struct xnthread *transfer_ownership(struct xnsynch 
*synch,
        if (synch->status & XNSYNCH_CLAIMED)
                clear_boost(synch, lastowner);
 
-       nextownerh = xnsynch_fast_set_claimed(xnthread_handle(nextowner),
+       nextownerh = xnsynch_fast_set_claimed(nextowner->handle,
                                              xnsynch_pended_p(synch));
        atomic_set(lockp, nextownerh);
 
@@ -584,15 +584,15 @@ struct xnthread *xnsynch_release(struct xnsynch *synch,
        trace_cobalt_synch_release(synch);
 
        if (unlikely(xnthread_test_state(thread, XNWEAK))) {
-               if (xnthread_get_rescnt(thread) == 0)
+               if (thread->res_count == 0)
                        xnthread_signal(thread, SIGDEBUG,
                                          SIGDEBUG_RESCNT_IMBALANCE);
                else
-                       xnthread_dec_rescnt(thread);
+                       thread->res_count--;
        }
 
        lockp = xnsynch_fastlock(synch);
-       threadh = xnthread_handle(thread);
+       threadh = thread->handle;
        if (likely(xnsynch_fast_release(lockp, threadh)))
                return NULL;
 
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 91baafc..5fb2de1 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -173,20 +173,19 @@ int __xnthread_init(struct xnthread *thread,
        thread->sched = sched;
        thread->state = flags;
        thread->info = 0;
-       thread->schedlck = 0;
+       thread->lock_count = 0;
        thread->rrperiod = XN_INFINITE;
        thread->wchan = NULL;
        thread->wwake = NULL;
        thread->wcontext = NULL;
-       thread->hrescnt = 0;
-       thread->registry.handle = XN_NO_HANDLE;
-       thread->registry.waitkey = NULL;
+       thread->res_count = 0;
+       thread->handle = XN_NO_HANDLE;
+       thread->waitkey = NULL;
        memset(&thread->stat, 0, sizeof(thread->stat));
        thread->selector = NULL;
        INIT_LIST_HEAD(&thread->claimq);
        xnsynch_init(&thread->join_synch, XNSYNCH_FIFO, NULL);
        /* These will be filled by xnthread_start() */
-       thread->imode = 0;
        thread->entry = NULL;
        thread->cookie = NULL;
 
@@ -201,9 +200,7 @@ int __xnthread_init(struct xnthread *thread,
        xntimer_set_name(&thread->ptimer, thread->name);
        xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO);
 
-       thread->init_class = sched_class;
        thread->base_class = NULL; /* xnsched_set_policy() will set it. */
-       thread->init_schedparam = *sched_param;
        ret = xnsched_init_thread(thread);
        if (ret)
                goto err_out;
@@ -261,10 +258,10 @@ void xnthread_init_root_tcb(struct xnthread *thread)
 
 void xnthread_deregister(struct xnthread *thread)
 {
-       if (thread->registry.handle != XN_NO_HANDLE)
-               xnregistry_remove(thread->registry.handle);
+       if (thread->handle != XN_NO_HANDLE)
+               xnregistry_remove(thread->handle);
 
-       thread->registry.handle = XN_NO_HANDLE;
+       thread->handle = XN_NO_HANDLE;
 }
 
 char *xnthread_format_status(unsigned long status, char *buf, int size)
@@ -349,7 +346,7 @@ xnticks_t xnthread_get_period(struct xnthread *thread)
        if (xntimer_running_p(&thread->ptimer))
                period = xntimer_interval(&thread->ptimer);
        else if (xnthread_test_state(thread,XNRRB))
-               period = xnthread_time_slice(thread);
+               period = thread->rrperiod;
 
        return period;
 }
@@ -644,7 +641,6 @@ int xnthread_start(struct xnthread *thread,
        }
 
        xnthread_set_state(thread, attr->mode & (XNTHREAD_MODE_BITS | XNSUSP));
-       thread->imode = (attr->mode & XNTHREAD_MODE_BITS);
        thread->entry = attr->entry;
        thread->cookie = attr->cookie;
 
@@ -718,7 +714,7 @@ int xnthread_set_mode(struct xnthread *thread, int clrmask, 
int setmask)
        xnlock_get_irqsave(&nklock, s);
 
        curr = xnsched_current_thread();
-       oldmode = xnthread_state_flags(thread) & XNTHREAD_MODE_BITS;
+       oldmode = xnthread_get_state(thread) & XNTHREAD_MODE_BITS;
        xnthread_clear_state(thread, clrmask & XNTHREAD_MODE_BITS);
        xnthread_set_state(thread, setmask & XNTHREAD_MODE_BITS);
        trace_cobalt_thread_set_mode(thread);
@@ -732,13 +728,13 @@ int xnthread_set_mode(struct xnthread *thread, int 
clrmask, int setmask)
                        if (thread == curr)
                                __xnsched_lock();
                        else
-                               xnthread_lock_count(curr) = 1;
+                               curr->lock_count = 1;
                }
        } else if (oldmode & XNLOCK) {
                if (thread == curr)
                        __xnsched_unlock_fully(); /* Will resched. */
                else
-                       xnthread_lock_count(curr) = 0;
+                       curr->lock_count = 0;
        }
 
        xnlock_put_irqrestore(&nklock, s);
@@ -1617,7 +1613,7 @@ int xnthread_migrate(int cpu)
        }
 
        sched = xnsched_struct(cpu);
-       if (sched == xnthread_sched(thread))
+       if (sched == thread->sched)
                goto unlock_and_exit;
 
        trace_cobalt_thread_migrate(thread, cpu);
diff --git a/kernel/cobalt/trace/cobalt-core.h 
b/kernel/cobalt/trace/cobalt-core.h
index ec2adef..34c93c3 100644
--- a/kernel/cobalt/trace/cobalt-core.h
+++ b/kernel/cobalt/trace/cobalt-core.h
@@ -12,7 +12,7 @@ DECLARE_EVENT_CLASS(thread_event,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, thread)
-               __string(name, xnthread_name(thread))
+               __string(name, thread->name)
                __field(pid_t, pid)
                __field(unsigned long, state)
                __field(unsigned long, info)
@@ -20,7 +20,7 @@ DECLARE_EVENT_CLASS(thread_event,
 
        TP_fast_assign(
                __entry->thread = thread;
-               __assign_str(name, xnthread_name(thread));
+               __assign_str(name, thread->name);
                __entry->state = thread->state;
                __entry->info = thread->info;
                __entry->pid = xnthread_host_pid(thread);
@@ -37,13 +37,13 @@ DECLARE_EVENT_CLASS(synch_wait_event,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, thread)
-               __string(name, xnthread_name(thread))
+               __string(name, thread->name)
                __field(struct xnsynch *, synch)
        ),
 
        TP_fast_assign(
                __entry->thread = thread;
-               __assign_str(name, xnthread_name(thread));
+               __assign_str(name, thread->name);
                __entry->synch = synch;
        ),
 
@@ -102,13 +102,13 @@ DECLARE_EVENT_CLASS(thread_migrate,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, thread)
-               __string(name, xnthread_name(thread))
+               __string(name, thread->name)
                __field(unsigned int, cpu)
        ),
 
        TP_fast_assign(
                __entry->thread = thread;
-               __assign_str(name, xnthread_name(thread));
+               __assign_str(name, thread->name);
                __entry->cpu = cpu;
        ),
 
@@ -168,15 +168,15 @@ TRACE_EVENT(cobalt_switch_context,
        TP_STRUCT__entry(
                __field(struct xnthread *, prev)
                __field(struct xnthread *, next)
-               __string(prev_name, xnthread_name(prev))
-               __string(next_name, xnthread_name(next))
+               __string(prev_name, prev->name)
+               __string(next_name, next->name)
        ),
 
        TP_fast_assign(
                __entry->prev = prev;
                __entry->next = next;
-               __assign_str(prev_name, xnthread_name(prev));
-               __assign_str(next_name, xnthread_name(next));
+               __assign_str(prev_name, prev->name);
+               __assign_str(next_name, next->name);
        ),
 
        TP_printk("prev=%p(%s) next=%p(%s)",
@@ -192,7 +192,7 @@ TRACE_EVENT(cobalt_thread_init,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, thread)
-               __string(thread_name, xnthread_name(thread))
+               __string(thread_name, thread->name)
                __string(class_name, sched_class->name)
                __field(unsigned long, flags)
                __field(int, cprio)
@@ -200,7 +200,7 @@ TRACE_EVENT(cobalt_thread_init,
 
        TP_fast_assign(
                __entry->thread = thread;
-               __assign_str(thread_name, xnthread_name(thread));
+               __assign_str(thread_name, thread->name);
                __entry->flags = attr->flags;
                __assign_str(class_name, sched_class->name);
                __entry->cprio = thread->cprio;
@@ -261,14 +261,14 @@ TRACE_EVENT(cobalt_thread_fault,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, thread)
-               __string(name, xnthread_name(thread))
+               __string(name, thread->name)
                __field(void *, ip)
                __field(unsigned int, type)
        ),
 
        TP_fast_assign(
                __entry->thread = thread;
-               __assign_str(name, xnthread_name(thread));
+               __assign_str(name, thread->name);
                __entry->ip = (void *)xnarch_fault_pc(td);
                __entry->type = xnarch_fault_trap(td);
        ),
@@ -359,13 +359,13 @@ TRACE_EVENT(cobalt_shadow_map,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, thread)
-               __string(name, xnthread_name(thread))
+               __string(name, thread->name)
                __field(int, prio)
        ),
 
        TP_fast_assign(
                __entry->thread = thread;
-               __assign_str(name, xnthread_name(thread));
+               __assign_str(name, thread->name);
                __entry->prio = xnthread_base_priority(thread);
        ),
 
diff --git a/kernel/cobalt/trace/cobalt-posix.h 
b/kernel/cobalt/trace/cobalt-posix.h
index 6cf26a5..c2dfe34 100644
--- a/kernel/cobalt/trace/cobalt-posix.h
+++ b/kernel/cobalt/trace/cobalt-posix.h
@@ -28,13 +28,13 @@ DECLARE_EVENT_CLASS(syscall_entry,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, thread)
-               __string(name, thread ? xnthread_name(thread) : "(anon)")
+               __string(name, thread ? thread->name : "(anon)")
                __field(int, nr)
        ),
 
        TP_fast_assign(
                __entry->thread = thread;
-               __assign_str(name, thread ? xnthread_name(thread) : "(anon)");
+               __assign_str(name, thread ? thread->name : "(anon)");
                __entry->nr = nr;
        ),
 
diff --git a/kernel/cobalt/trace/cobalt-rtdm.h 
b/kernel/cobalt/trace/cobalt-rtdm.h
index bd69607..2754a67 100644
--- a/kernel/cobalt/trace/cobalt-rtdm.h
+++ b/kernel/cobalt/trace/cobalt-rtdm.h
@@ -89,12 +89,12 @@ DECLARE_EVENT_CLASS(task_op,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, task)
-               __string(task_name, xnthread_name(task))
+               __string(task_name, task->name)
        ),
 
        TP_fast_assign(
                __entry->task = task;
-               __assign_str(task_name, xnthread_name(task));
+               __assign_str(task_name, task->name);
        ),
 
        TP_printk("task %p(%s)", __entry->task, __get_str(task_name))
@@ -383,13 +383,13 @@ TRACE_EVENT(cobalt_driver_event_wait,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, task)
-               __string(task_name, xnthread_name(task))
+               __string(task_name, task->name)
                __field(struct rtdm_event *, ev)
        ),
 
        TP_fast_assign(
                __entry->task = task;
-               __assign_str(task_name, xnthread_name(task));
+               __assign_str(task_name, task->name);
                __entry->ev = ev;
        ),
 
@@ -441,13 +441,13 @@ TRACE_EVENT(cobalt_driver_sem_wait,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, task)
-               __string(task_name, xnthread_name(task))
+               __string(task_name, task->name)
                __field(struct rtdm_sem *, sem)
        ),
 
        TP_fast_assign(
                __entry->task = task;
-               __assign_str(task_name, xnthread_name(task));
+               __assign_str(task_name, task->name);
                __entry->sem = sem;
        ),
 
@@ -486,13 +486,13 @@ TRACE_EVENT(cobalt_driver_mutex_wait,
 
        TP_STRUCT__entry(
                __field(struct xnthread *, task)
-               __string(task_name, xnthread_name(task))
+               __string(task_name, task->name)
                __field(struct rtdm_mutex *, mutex)
        ),
 
        TP_fast_assign(
                __entry->task = task;
-               __assign_str(task_name, xnthread_name(task));
+               __assign_str(task_name, task->name);
                __entry->mutex = mutex;
        ),
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to