Module: xenomai-2.6
Branch: master
Commit: 66c1f8c67e4f4f46e0603bd1f70eaf25c63ef83e
URL:    
http://git.xenomai.org/?p=xenomai-2.6.git;a=commit;h=66c1f8c67e4f4f46e0603bd1f70eaf25c63ef83e

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Sat Sep  6 16:57:34 2014 +0200

ksrc: use curr as name for current thread variable

---

 include/nucleus/sched.h      |    2 +-
 include/nucleus/shadow.h     |    2 +-
 include/nucleus/thread.h     |    8 +--
 ksrc/nucleus/pipe.c          |   12 ++--
 ksrc/nucleus/pod.c           |  112 ++++++++++++++---------------
 ksrc/nucleus/registry.c      |   12 ++--
 ksrc/nucleus/sched.c         |   44 ++++++------
 ksrc/nucleus/select.c        |    8 +--
 ksrc/nucleus/shadow.c        |  160 +++++++++++++++++++++---------------------
 ksrc/nucleus/synch.c         |   70 +++++++++---------
 ksrc/skins/native/buffer.c   |   12 ++--
 ksrc/skins/native/cond.c     |    6 +-
 ksrc/skins/native/heap.c     |   10 +--
 ksrc/skins/native/mutex.c    |   20 +++---
 ksrc/skins/native/queue.c    |    8 +--
 ksrc/skins/native/syscall.c  |   18 ++---
 ksrc/skins/posix/clock.c     |   14 ++--
 ksrc/skins/posix/cond.c      |   32 ++++-----
 ksrc/skins/posix/cond.h      |    4 +-
 ksrc/skins/posix/mq.c        |   24 +++----
 ksrc/skins/posix/mutex.c     |   26 +++----
 ksrc/skins/posix/sem.c       |   14 ++--
 ksrc/skins/posix/syscall.c   |   42 +++++------
 ksrc/skins/posix/thread.c    |   16 ++---
 ksrc/skins/rtdm/drvlib.c     |   46 ++++++------
 ksrc/skins/vrtx/mx.c         |   18 ++---
 ksrc/skins/vxworks/semLib.c  |   28 ++++----
 ksrc/skins/vxworks/syscall.c |    8 +--
 28 files changed, 388 insertions(+), 388 deletions(-)

diff --git a/include/nucleus/sched.h b/include/nucleus/sched.h
index 8db23cc..509312d 100644
--- a/include/nucleus/sched.h
+++ b/include/nucleus/sched.h
@@ -276,7 +276,7 @@ int xnsched_set_policy(struct xnthread *thread,
 void xnsched_track_policy(struct xnthread *thread,
                          struct xnthread *target);
 
-void xnsched_migrate(struct xnthread *thread,
+void xnsched_migrate(struct xnthread *curr,
                     struct xnsched *sched);
 
 void xnsched_migrate_passive(struct xnthread *thread,
diff --git a/include/nucleus/shadow.h b/include/nucleus/shadow.h
index bca3c48..6ad4e5e 100644
--- a/include/nucleus/shadow.h
+++ b/include/nucleus/shadow.h
@@ -61,7 +61,7 @@ void xnshadow_grab_events(void);
 
 void xnshadow_release_events(void);
 
-int xnshadow_map(struct xnthread *thread,
+int xnshadow_map(struct xnthread *curr,
                 xncompletion_t __user *u_completion,
                 unsigned long __user *u_mode_offset);
 
diff --git a/include/nucleus/thread.h b/include/nucleus/thread.h
index aca5f90..39ee394 100644
--- a/include/nucleus/thread.h
+++ b/include/nucleus/thread.h
@@ -436,7 +436,7 @@ struct xnthread *xnthread_lookup(xnhandle_t threadh)
        return (thread && xnthread_handle(thread) == threadh) ? thread : NULL;
 }
 
-static inline int xnthread_try_grab(struct xnthread *thread,
+static inline int xnthread_try_grab(struct xnthread *curr,
                                    struct xnsynch *synch)
 {
        XENO_BUGON(NUCLEUS, xnsynch_fastlock_p(synch));
@@ -444,10 +444,10 @@ static inline int xnthread_try_grab(struct xnthread 
*thread,
        if (xnsynch_owner(synch) != NULL)
                return 0;
 
-       xnsynch_set_owner(synch, thread);
+       xnsynch_set_owner(synch, curr);
 
-       if (xnthread_test_state(thread, XNOTHER))
-               xnthread_inc_rescnt(thread);
+       if (xnthread_test_state(curr, XNOTHER))
+               xnthread_inc_rescnt(curr);
 
        return 1;
 }
diff --git a/ksrc/nucleus/pipe.c b/ksrc/nucleus/pipe.c
index f6c80f1..3c4dccf 100644
--- a/ksrc/nucleus/pipe.c
+++ b/ksrc/nucleus/pipe.c
@@ -509,7 +509,7 @@ ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, 
xnticks_t timeout)
 {
        struct xnpipe_state *state;
        struct xnholder *h;
-       xnthread_t *thread;
+       xnthread_t *curr;
        ssize_t ret;
        spl_t s;
 
@@ -528,7 +528,7 @@ ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, 
xnticks_t timeout)
                goto unlock_and_exit;
        }
 
-       thread = xnpod_current_thread();
+       curr = xnpod_current_thread();
 
        while ((h = getq(&state->inq)) == NULL) {
                if (timeout == XN_NONBLOCK) {
@@ -538,21 +538,21 @@ ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, 
xnticks_t timeout)
 
                xnsynch_sleep_on(&state->synchbase, timeout, XN_RELATIVE);
 
-               if (xnthread_test_info(thread, XNTIMEO)) {
+               if (xnthread_test_info(curr, XNTIMEO)) {
                        ret = -ETIMEDOUT;
                        goto unlock_and_exit;
                }
-               if (xnthread_test_info(thread, XNBREAK)) {
+               if (xnthread_test_info(curr, XNBREAK)) {
                        ret = -EINTR;
                        goto unlock_and_exit;
                }
-               if (xnthread_test_info(thread, XNRMID)) {
+               if (xnthread_test_info(curr, XNRMID)) {
                        ret = -EIDRM;
                        goto unlock_and_exit;
                }
 
                /* remaining timeout */
-               timeout = xnthread_timeout(thread);
+               timeout = xnthread_timeout(curr);
        }
 
        *pmh = link2mh(h);
diff --git a/ksrc/nucleus/pod.c b/ksrc/nucleus/pod.c
index 22fa91d..27b98b7 100644
--- a/ksrc/nucleus/pod.c
+++ b/ksrc/nucleus/pod.c
@@ -98,17 +98,17 @@ static inline void __xnpod_giveup_fpu(struct xnsched *sched,
                sched->fpuholder = NULL;
 }
 
-static inline void __xnpod_release_fpu(struct xnthread *thread)
+static inline void __xnpod_release_fpu(struct xnthread *curr)
 {
-       if (xnthread_test_state(thread, XNFPU)) {
+       if (xnthread_test_state(curr, XNFPU)) {
                /*
                 * Force the FPU save, and nullify the
                 * sched->fpuholder pointer, to avoid leaving
                 * fpuholder pointing on the backup area of the
                 * migrated thread.
                 */
-               xnarch_save_fpu(xnthread_archtcb(thread));
-               thread->sched->fpuholder = NULL;
+               xnarch_save_fpu(xnthread_archtcb(curr));
+               curr->sched->fpuholder = NULL;
        }
 }
 
@@ -1917,7 +1917,7 @@ unlock_and_exit:
 
 int xnpod_migrate_thread(int cpu)
 {
-       struct xnthread *thread;
+       struct xnthread *curr;
        struct xnsched *sched;
        int ret = 0;
        spl_t s;
@@ -1930,9 +1930,9 @@ int xnpod_migrate_thread(int cpu)
 
        xnlock_get_irqsave(&nklock, s);
 
-       thread = xnpod_current_thread();
+       curr = xnpod_current_thread();
 
-       if (!xnarch_cpu_isset(cpu, thread->affinity)) {
+       if (!xnarch_cpu_isset(cpu, curr->affinity)) {
                ret = -EPERM;
                goto unlock_and_exit;
        }
@@ -1944,15 +1944,15 @@ int xnpod_migrate_thread(int cpu)
 
        trace_mark(xn_nucleus, thread_migrate,
                   "thread %p thread_name %s cpu %d",
-                  thread, xnthread_name(thread), cpu);
+                  curr, xnthread_name(curr), cpu);
 
-       __xnpod_release_fpu(thread);
+       __xnpod_release_fpu(curr);
 
        /* Move to remote scheduler. */
-       xnsched_migrate(thread, sched);
+       xnsched_migrate(curr, sched);
 
        /* Migrate the thread periodic timer. */
-       xntimer_set_sched(&thread->ptimer, sched);
+       xntimer_set_sched(&curr->ptimer, sched);
 
        xnpod_schedule();
 
@@ -1960,7 +1960,7 @@ int xnpod_migrate_thread(int cpu)
         * Reset execution time measurement period so that we don't
         * mess up per-CPU statistics.
         */
-       xnstat_exectime_reset_stats(&thread->stat.lastperiod);
+       xnstat_exectime_reset_stats(&curr->stat.lastperiod);
 
       unlock_and_exit:
 
@@ -1983,7 +1983,7 @@ EXPORT_SYMBOL_GPL(xnpod_migrate_thread);
 
 void xnpod_dispatch_signals(void)
 {
-       xnthread_t *thread = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        int asrimask, savedmask;
        xnflags_t oldmode;
        xnsigmask_t sigs;
@@ -1992,26 +1992,26 @@ void xnpod_dispatch_signals(void)
        /* Process user-defined signals if the ASR is enabled for this
           thread. */
 
-       if (thread->signals == 0 || xnthread_test_state(thread, XNASDI)
-           || thread->asr == XNTHREAD_INVALID_ASR)
+       if (curr->signals == 0 || xnthread_test_state(curr, XNASDI)
+           || curr->asr == XNTHREAD_INVALID_ASR)
                return;
 
        trace_mark(xn_nucleus, sched_sigdispatch, "signals %lu",
-                  thread->signals);
+                  curr->signals);
 
        /* Start the asynchronous service routine */
-       oldmode = xnthread_test_state(thread, XNTHREAD_MODE_BITS);
-       sigs = thread->signals;
-       asrimask = thread->asrimask;
-       asr = thread->asr;
+       oldmode = xnthread_test_state(curr, XNTHREAD_MODE_BITS);
+       sigs = curr->signals;
+       asrimask = curr->asrimask;
+       asr = curr->asr;
 
        /* Clear pending signals mask since an ASR can be reentrant */
-       thread->signals = 0;
+       curr->signals = 0;
 
        /* Reset ASR mode bits */
-       xnthread_clear_state(thread, XNTHREAD_MODE_BITS);
-       xnthread_set_state(thread, thread->asrmode);
-       thread->asrlevel++;
+       xnthread_clear_state(curr, XNTHREAD_MODE_BITS);
+       xnthread_set_state(curr, curr->asrmode);
+       curr->asrlevel++;
 
        /* Setup ASR interrupt mask then fire it. */
        savedmask = xnarch_setimask(asrimask);
@@ -2019,9 +2019,9 @@ void xnpod_dispatch_signals(void)
        xnarch_setimask(savedmask);
 
        /* Reset the thread mode bits */
-       thread->asrlevel--;
-       xnthread_clear_state(thread, XNTHREAD_MODE_BITS);
-       xnthread_set_state(thread, oldmode);
+       curr->asrlevel--;
+       xnthread_clear_state(curr, XNTHREAD_MODE_BITS);
+       xnthread_set_state(curr, oldmode);
 }
 
 /*!
@@ -2036,26 +2036,26 @@ void xnpod_dispatch_signals(void)
  * Entered with nklock locked, irqs off.
  */
 
-void xnpod_welcome_thread(xnthread_t *thread, int imask)
+void xnpod_welcome_thread(xnthread_t *curr, int imask)
 {
-       xnsched_t *sched = xnsched_finish_unlocked_switch(thread->sched);
+       xnsched_t *sched = xnsched_finish_unlocked_switch(curr->sched);
 
        xnsched_finalize_zombie(sched);
 
        trace_mark(xn_nucleus, thread_boot, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+                  curr, xnthread_name(curr));
 
-       xnarch_trace_pid(-1, xnthread_current_priority(thread));
+       xnarch_trace_pid(-1, xnthread_current_priority(curr));
 
-       if (xnthread_test_state(thread, XNLOCK))
+       if (xnthread_test_state(curr, XNLOCK))
                /* Actually grab the scheduler lock. */
                xnpod_lock_sched();
 
-       __xnpod_init_fpu(sched, thread);
+       __xnpod_init_fpu(sched, curr);
 
-       xnthread_clear_state(thread, XNRESTART);
+       xnthread_clear_state(curr, XNRESTART);
 
-       if (xnthread_signaled_p(thread))
+       if (xnthread_signaled_p(curr))
                xnpod_dispatch_signals();
 
        xnlock_clear_irqoff(&nklock);
@@ -2535,23 +2535,23 @@ EXPORT_SYMBOL_GPL(xnpod_remove_hook);
 
 int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo)
 {
-       xnthread_t *thread;
+       xnthread_t *curr;
 
        if (!xnpod_active_p() ||
            (!xnpod_interrupt_p() && xnpod_idle_p()))
                return 0;
 
-       thread = xnpod_current_thread();
+       curr = xnpod_current_thread();
 
        trace_mark(xn_nucleus, thread_fault,
                   "thread %p thread_name %s ip %p type 0x%x",
-                  thread, xnthread_name(thread),
+                  curr, xnthread_name(curr),
                   (void *)xnarch_fault_pc(fltinfo),
                   xnarch_fault_trap(fltinfo));
 
 #ifdef __KERNEL__
        if (xnarch_fault_fpu_p(fltinfo)) {
-               if (__xnpod_fault_init_fpu(thread))
+               if (__xnpod_fault_init_fpu(curr))
                        return 1;
                print_symbol("invalid use of FPU in Xenomai context at %s\n",
                             xnarch_fault_pc(fltinfo));
@@ -2560,10 +2560,10 @@ int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo)
        if (!xnpod_userspace_p()) {
                xnprintf
                    ("suspending kernel thread %p ('%s') at 0x%lx after 
exception #0x%x\n",
-                    thread, thread->name, xnarch_fault_pc(fltinfo),
+                    curr, curr->name, xnarch_fault_pc(fltinfo),
                     xnarch_fault_trap(fltinfo));
 
-               xnpod_suspend_thread(thread, XNSUSP, XN_INFINITE, XN_RELATIVE, 
NULL);
+               xnpod_suspend_thread(curr, XNSUSP, XN_INFINITE, XN_RELATIVE, 
NULL);
                return 1;
        }
 
@@ -2580,24 +2580,24 @@ int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo)
                        xnarch_trace_panic_freeze();
                        xnprintf
                            ("Switching %s to secondary mode after exception 
#%u in "
-                            "kernel-space at 0x%lx (pid %d)\n", thread->name,
+                            "kernel-space at 0x%lx (pid %d)\n", curr->name,
                             xnarch_fault_trap(fltinfo),
                             xnarch_fault_pc(fltinfo),
-                            xnthread_user_pid(thread));
+                            xnthread_user_pid(curr));
                        xnarch_trace_panic_dump();
                } else if (xnarch_fault_notify(fltinfo))        /* Don't report 
debug traps */
                        xnprintf
                            ("Switching %s to secondary mode after exception 
#%u from "
-                            "user-space at 0x%lx (pid %d)\n", thread->name,
+                            "user-space at 0x%lx (pid %d)\n", curr->name,
                             xnarch_fault_trap(fltinfo),
                             xnarch_fault_pc(fltinfo),
-                            xnthread_user_pid(thread));
+                            xnthread_user_pid(curr));
 #endif /* XENO_DEBUG(NUCLEUS) */
                if (xnarch_fault_pf_p(fltinfo))
                        /* The page fault counter is not SMP-safe, but it's a
                           simple indicator that something went wrong wrt memory
                           locking anyway. */
-                       xnstat_counter_inc(&thread->stat.pf);
+                       xnstat_counter_inc(&curr->stat.pf);
 
                xnshadow_relax(xnarch_fault_notify(fltinfo),
                               SIGDEBUG_MIGRATE_FAULT);
@@ -2957,31 +2957,31 @@ int xnpod_wait_thread_period(unsigned long *overruns_r)
 {
        xnticks_t now;
        unsigned long overruns = 0;
-       xnthread_t *thread;
+       xnthread_t *curr;
        xntbase_t *tbase;
        int err = 0;
        spl_t s;
 
-       thread = xnpod_current_thread();
+       curr = xnpod_current_thread();
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (unlikely(!xntimer_running_p(&thread->ptimer))) {
+       if (unlikely(!xntimer_running_p(&curr->ptimer))) {
                err = -EWOULDBLOCK;
                goto unlock_and_exit;
        }
 
        trace_mark(xn_nucleus, thread_waitperiod, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+                  curr, xnthread_name(curr));
 
        /* Work with either TSC or periodic ticks. */
-       tbase = xnthread_time_base(thread);
+       tbase = xnthread_time_base(curr);
        now = xntbase_get_rawclock(tbase);
 
-       if (likely((xnsticks_t)(now - xntimer_pexpect(&thread->ptimer)) < 0)) {
-               xnpod_suspend_thread(thread, XNDELAY, XN_INFINITE, XN_RELATIVE, 
NULL);
+       if (likely((xnsticks_t)(now - xntimer_pexpect(&curr->ptimer)) < 0)) {
+               xnpod_suspend_thread(curr, XNDELAY, XN_INFINITE, XN_RELATIVE, 
NULL);
 
-               if (unlikely(xnthread_test_info(thread, XNBREAK))) {
+               if (unlikely(xnthread_test_info(curr, XNBREAK))) {
                        err = -EINTR;
                        goto unlock_and_exit;
                }
@@ -2989,13 +2989,13 @@ int xnpod_wait_thread_period(unsigned long *overruns_r)
                now = xntbase_get_rawclock(tbase);
        }
 
-       overruns = xntimer_get_overruns(&thread->ptimer, now);
+       overruns = xntimer_get_overruns(&curr->ptimer, now);
        if (overruns) {
                err = -ETIMEDOUT;
 
                trace_mark(xn_nucleus, thread_missedperiod,
                           "thread %p thread_name %s overruns %lu",
-                          thread, xnthread_name(thread), overruns);
+                          curr, xnthread_name(curr), overruns);
        }
 
        if (likely(overruns_r != NULL))
diff --git a/ksrc/nucleus/registry.c b/ksrc/nucleus/registry.c
index eceadad..0a5efe9 100644
--- a/ksrc/nucleus/registry.c
+++ b/ksrc/nucleus/registry.c
@@ -798,7 +798,7 @@ int xnregistry_bind(const char *key, xnticks_t timeout, int 
timeout_mode,
                    xnhandle_t *phandle)
 {
        struct xnobject *object;
-       xnthread_t *thread;
+       xnthread_t *curr;
        xntbase_t *tbase;
        int err = 0;
        spl_t s;
@@ -806,8 +806,8 @@ int xnregistry_bind(const char *key, xnticks_t timeout, int 
timeout_mode,
        if (!key)
                return -EINVAL;
 
-       thread = xnpod_current_thread();
-       tbase = xnthread_time_base(thread);
+       curr = xnpod_current_thread();
+       tbase = xnthread_time_base(curr);
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -831,15 +831,15 @@ int xnregistry_bind(const char *key, xnticks_t timeout, 
int timeout_mode,
                        goto unlock_and_exit;
                }
 
-               thread->registry.waitkey = key;
+               curr->registry.waitkey = key;
                xnsynch_sleep_on(&registry_hash_synch, timeout, timeout_mode);
 
-               if (xnthread_test_info(thread, XNTIMEO)) {
+               if (xnthread_test_info(curr, XNTIMEO)) {
                        err = -ETIMEDOUT;
                        goto unlock_and_exit;
                }
 
-               if (xnthread_test_info(thread, XNBREAK)) {
+               if (xnthread_test_info(curr, XNBREAK)) {
                        err = -EINTR;
                        goto unlock_and_exit;
                }
diff --git a/ksrc/nucleus/sched.c b/ksrc/nucleus/sched.c
index 3391f4a..e59f48e 100644
--- a/ksrc/nucleus/sched.c
+++ b/ksrc/nucleus/sched.c
@@ -80,9 +80,9 @@ MODULE_PARM_DESC(watchdog_timeout, "Watchdog timeout (s)");
 static void xnsched_watchdog_handler(struct xntimer *timer)
 {
        struct xnsched *sched = xnpod_current_sched();
-       struct xnthread *thread = sched->curr;
+       struct xnthread *curr = sched->curr;
 
-       if (likely(xnthread_test_state(thread, XNROOT))) {
+       if (likely(xnthread_test_state(curr, XNROOT))) {
                xnsched_reset_watchdog(sched);
                return;
        }
@@ -91,24 +91,24 @@ static void xnsched_watchdog_handler(struct xntimer *timer)
                return;
 
 #ifdef CONFIG_XENO_OPT_PERVASIVE
-       if (xnthread_test_state(thread, XNSHADOW) &&
-           !xnthread_amok_p(thread)) {
+       if (xnthread_test_state(curr, XNSHADOW) &&
+           !xnthread_amok_p(curr)) {
                trace_mark(xn_nucleus, watchdog_signal,
                           "thread %p thread_name %s",
-                          thread, xnthread_name(thread));
+                          curr, xnthread_name(curr));
                xnprintf("watchdog triggered -- signaling runaway thread "
-                        "'%s'\n", xnthread_name(thread));
-               xnthread_set_info(thread, XNAMOK);
-               xnshadow_send_sig(thread, SIGDEBUG, SIGDEBUG_WATCHDOG, 1);
-               xnshadow_call_mayday(thread);
+                        "'%s'\n", xnthread_name(curr));
+               xnthread_set_info(curr, XNAMOK);
+               xnshadow_send_sig(curr, SIGDEBUG, SIGDEBUG_WATCHDOG, 1);
+               xnshadow_call_mayday(curr);
        } else
 #endif /* CONFIG_XENO_OPT_PERVASIVE */
        {
                trace_mark(xn_nucleus, watchdog, "thread %p thread_name %s",
-                          thread, xnthread_name(thread));
+                          curr, xnthread_name(curr));
                xnprintf("watchdog triggered -- killing runaway thread '%s'\n",
-                        xnthread_name(thread));
-               xnpod_delete_thread(thread);
+                        xnthread_name(curr));
+               xnpod_delete_thread(curr);
        }
        xnsched_reset_watchdog(sched);
 }
@@ -478,33 +478,33 @@ void xnsched_track_policy(struct xnthread *thread,
 
 /* Must be called with nklock locked, interrupts off. thread must be
  * runnable. */
-void xnsched_migrate(struct xnthread *thread, struct xnsched *sched)
+void xnsched_migrate(struct xnthread *curr, struct xnsched *sched)
 {
-       struct xnsched_class *sched_class = thread->sched_class;
+       struct xnsched_class *sched_class = curr->sched_class;
 
-       if (xnthread_test_state(thread, XNREADY)) {
-               xnsched_dequeue(thread);
-               xnthread_clear_state(thread, XNREADY);
+       if (xnthread_test_state(curr, XNREADY)) {
+               xnsched_dequeue(curr);
+               xnthread_clear_state(curr, XNREADY);
        }
 
        if (sched_class->sched_migrate)
-               sched_class->sched_migrate(thread, sched);
+               sched_class->sched_migrate(curr, sched);
        /*
         * WARNING: the scheduling class may have just changed as a
         * result of calling the per-class migration hook.
         */
-       xnsched_set_resched(thread->sched);
-       thread->sched = sched;
+       xnsched_set_resched(curr->sched);
+       curr->sched = sched;
 
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
        /*
         * Mark the thread in flight, xnsched_finish_unlocked_switch()
         * will put the thread on the remote runqueue.
         */
-       xnthread_set_state(thread, XNMIGRATE);
+       xnthread_set_state(curr, XNMIGRATE);
 #else /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
        /* Move thread to the remote runnable queue. */
-       xnsched_putback(thread);
+       xnsched_putback(curr);
 #endif /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
 }
 
diff --git a/ksrc/nucleus/select.c b/ksrc/nucleus/select.c
index 987ef30..a44f784 100644
--- a/ksrc/nucleus/select.c
+++ b/ksrc/nucleus/select.c
@@ -326,13 +326,13 @@ int xnselect(struct xnselector *selector,
             xnticks_t timeout, xntmode_t timeout_mode)
 {
        unsigned i, not_empty = 0;
-       xnthread_t *thread;
+       xnthread_t *curr;
        spl_t s;
 
        if ((unsigned) nfds > __FD_SETSIZE)
                return -EINVAL;
 
-       thread = xnpod_current_thread();
+       curr = xnpod_current_thread();
 
        for (i = 0; i < XNSELECT_MAX_TYPES; i++)
                if (out_fds[i])
@@ -365,7 +365,7 @@ int xnselect(struct xnselector *selector,
                                          &selector->fds[i].pending, nfds))
                                not_empty = 1;
 
-               if (xnthread_test_info(thread, XNBREAK | XNTIMEO))
+               if (xnthread_test_info(curr, XNBREAK | XNTIMEO))
                        break;
        }
        xnlock_put_irqrestore(&nklock, s);
@@ -380,7 +380,7 @@ int xnselect(struct xnselector *selector,
                return count;
        }
 
-       if (xnthread_test_info(thread, XNBREAK))
+       if (xnthread_test_info(curr, XNBREAK))
                return -EINTR;
 
        return 0; /* Timeout */
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 38c1423..f2b6703 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -939,13 +939,13 @@ static int gatekeeper_thread(void *data)
 int xnshadow_harden(void)
 {
        struct task_struct *this_task = current;
-       struct xnthread *thread;
+       struct xnthread *curr;
        struct xnsched *sched;
        int cpu, err;
 
 redo:
-       thread = xnshadow_thread(this_task);
-       if (!thread)
+       curr = xnshadow_thread(this_task);
+       if (!curr)
                return -EPERM;
 
        cpu = task_cpu(this_task);
@@ -957,8 +957,8 @@ redo:
                goto failed;
        }
 
-       if (thread->u_mode)
-               *(thread->u_mode) = thread->state & ~XNRELAX;
+       if (curr->u_mode)
+               *(curr->u_mode) = curr->state & ~XNRELAX;
 
        preempt_disable();
 
@@ -986,16 +986,16 @@ redo:
 
        trace_mark(xn_nucleus, shadow_gohard,
                   "thread %p thread_name %s comm %s",
-                  thread, xnthread_name(thread), this_task->comm);
+                  curr, xnthread_name(curr), this_task->comm);
 
-       sched->gktarget = thread;
-       xnthread_set_info(thread, XNATOMIC);
+       sched->gktarget = curr;
+       xnthread_set_info(curr, XNATOMIC);
        set_current_state(TASK_INTERRUPTIBLE | TASK_ATOMICSWITCH);
 
        wake_up_process(sched->gatekeeper);
 
        schedule();
-       xnthread_clear_info(thread, XNATOMIC);
+       xnthread_clear_info(curr, XNATOMIC);
 
        /*
         * Rare case: we might have received a signal before entering
@@ -1009,7 +1009,7 @@ redo:
                    || this_task->state != TASK_RUNNING))
                        xnpod_fatal
                            ("xnshadow_harden() failed for thread %s[%d]",
-                            thread->name, xnthread_user_pid(thread));
+                            curr->name, xnthread_user_pid(curr));
 
                /*
                 * Synchronize with the chosen gatekeeper so that it no longer
@@ -1024,7 +1024,7 @@ redo:
        }
 
        /* "current" is now running into the Xenomai domain. */
-       sched = xnsched_finish_unlocked_switch(thread->sched);
+       sched = xnsched_finish_unlocked_switch(curr->sched);
 
        xnsched_finalize_zombie(sched);
 
@@ -1034,7 +1034,7 @@ redo:
 
        xnarch_schedule_tail(this_task);
 
-       if (xnthread_signaled_p(thread))
+       if (xnthread_signaled_p(curr))
                xnpod_dispatch_signals();
 
        xnlock_clear_irqon(&nklock);
@@ -1046,11 +1046,11 @@ redo:
         * gatekeeper; in such a case, we must unlink from the remote
         * CPU's RPI list now.
         */
-       if (rpi_p(thread))
-               rpi_clear_remote(thread);
+       if (rpi_p(curr))
+               rpi_clear_remote(curr);
 
        trace_mark(xn_nucleus, shadow_hardened, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+                  curr, xnthread_name(curr));
 
        /*
         * Recheck pending signals once again. As we block task wakeups during
@@ -1060,7 +1060,7 @@ redo:
         * to here.
         */
        if (signal_pending(this_task)) {
-               xnshadow_relax(!xnthread_test_state(thread, XNDEBUG),
+               xnshadow_relax(!xnthread_test_state(curr, XNDEBUG),
                               SIGDEBUG_MIGRATE_SIGNAL);
                return -ERESTARTSYS;
        }
@@ -1070,8 +1070,8 @@ redo:
        return 0;
 
       failed:
-       if (thread->u_mode)
-               *(thread->u_mode) = thread->state;
+       if (curr->u_mode)
+               *(curr->u_mode) = curr->state;
        return err;
 }
 EXPORT_SYMBOL_GPL(xnshadow_harden);
@@ -1108,11 +1108,11 @@ EXPORT_SYMBOL_GPL(xnshadow_harden);
 
 void xnshadow_relax(int notify, int reason)
 {
-       xnthread_t *thread = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        siginfo_t si;
        int prio;
 
-       XENO_BUGON(NUCLEUS, xnthread_test_state(thread, XNROOT));
+       XENO_BUGON(NUCLEUS, xnthread_test_state(curr, XNROOT));
 
        /*
         * Enqueue the request to move the running shadow from the Xenomai
@@ -1120,7 +1120,7 @@ void xnshadow_relax(int notify, int reason)
         * to resume using the register state of the shadow thread.
         */
        trace_mark(xn_nucleus, shadow_gorelax, "thread %p thread_name %s",
-                 thread, xnthread_name(thread));
+                 curr, xnthread_name(curr));
 
        /*
         * If you intend to change the following interrupt-free
@@ -1136,7 +1136,7 @@ void xnshadow_relax(int notify, int reason)
         * xnpod_suspend_thread() has an interrupts-on section built in.
         */
        splmax();
-       rpi_push(thread->sched, thread);
+       rpi_push(curr->sched, curr);
        schedule_linux_call(LO_WAKEUP_REQ, current, 0);
 
        /*
@@ -1145,21 +1145,21 @@ void xnshadow_relax(int notify, int reason)
         */
        xnlock_get(&nklock);
        clear_task_nowakeup(current);
-       xnpod_suspend_thread(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
+       xnpod_suspend_thread(curr, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
 
        splnone();
        if (XENO_DEBUG(NUCLEUS) && rthal_current_domain != rthal_root_domain)
                xnpod_fatal("xnshadow_relax() failed for thread %s[%d]",
-                           thread->name, xnthread_user_pid(thread));
+                           curr->name, xnthread_user_pid(curr));
 
-       prio = normalize_priority(xnthread_current_priority(thread));
+       prio = normalize_priority(xnthread_current_priority(curr));
        rthal_reenter_root(get_switch_lock_owner(),
                           prio ? SCHED_FIFO : SCHED_NORMAL, prio);
 
-       xnstat_counter_inc(&thread->stat.ssw);  /* Account for secondary mode 
switch. */
+       xnstat_counter_inc(&curr->stat.ssw);    /* Account for secondary mode 
switch. */
 
        if (notify) {
-               if (xnthread_test_state(thread, XNTRAPSW)) {
+               if (xnthread_test_state(curr, XNTRAPSW)) {
                        /* Help debugging spurious relaxes. */
                        memset(&si, 0, sizeof(si));
                        si.si_signo = SIGDEBUG;
@@ -1167,12 +1167,12 @@ void xnshadow_relax(int notify, int reason)
                        si.si_int = reason;
                        send_sig_info(SIGDEBUG, &si, current);
                }
-               xnsynch_detect_claimed_relax(thread);
+               xnsynch_detect_claimed_relax(curr);
        }
 
-       if (xnthread_test_info(thread, XNPRIOSET)) {
-               xnthread_clear_info(thread, XNPRIOSET);
-               xnshadow_send_sig(thread, SIGSHADOW,
+       if (xnthread_test_info(curr, XNPRIOSET)) {
+               xnthread_clear_info(curr, XNPRIOSET);
+               xnshadow_send_sig(curr, SIGSHADOW,
                                  sigshadow_int(SIGSHADOW_ACTION_RENICE, prio),
                                  1);
        }
@@ -1183,21 +1183,21 @@ void xnshadow_relax(int notify, int reason)
           counter-part when returning to secondary mode. [Actually,
           there is no service changing the CPU affinity from primary
           mode available from the nucleus --rpm]. */
-       if (xnthread_test_info(thread, XNAFFSET)) {
-               xnthread_clear_info(thread, XNAFFSET);
-               set_cpus_allowed(current, xnthread_affinity(thread));
+       if (xnthread_test_info(curr, XNAFFSET)) {
+               xnthread_clear_info(curr, XNAFFSET);
+               set_cpus_allowed(current, xnthread_affinity(curr));
        }
 #endif /* CONFIG_SMP */
 
        /* "current" is now running into the Linux domain on behalf of the
           root thread. */
 
-       if (thread->u_mode)
-               *(thread->u_mode) = thread->state;
+       if (curr->u_mode)
+               *(curr->u_mode) = curr->state;
 
        trace_mark(xn_nucleus, shadow_relaxed,
                  "thread %p thread_name %s comm %s",
-                 thread, xnthread_name(thread), current->comm);
+                 curr, xnthread_name(curr), current->comm);
 }
 EXPORT_SYMBOL_GPL(xnshadow_relax);
 
@@ -1210,7 +1210,7 @@ void xnshadow_exit(void)
 }
 
 /*!
- * \fn int xnshadow_map(xnthread_t *thread,
+ * \fn int xnshadow_map(xnthread_t *curr,
  *                      xncompletion_t __user *u_completion,
  *                      unsigned long __user *u_mode_offset)
  * @internal
@@ -1271,7 +1271,7 @@ void xnshadow_exit(void)
  *
  */
 
-int xnshadow_map(xnthread_t *thread, xncompletion_t __user *u_completion,
+int xnshadow_map(xnthread_t *curr, xncompletion_t __user *u_completion,
                 unsigned long __user *u_mode_offset)
 {
        struct xnthread_start_attr attr;
@@ -1283,10 +1283,10 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
        spl_t s;
        int ret;
 
-       if (!xnthread_test_state(thread, XNSHADOW))
+       if (!xnthread_test_state(curr, XNSHADOW))
                return -EINVAL;
 
-       if (xnshadow_thread(current) || xnthread_test_state(thread, XNMAPPED))
+       if (xnshadow_thread(current) || xnthread_test_state(curr, XNMAPPED))
                return -EBUSY;
 
        if (!access_wok(u_mode_offset, sizeof(*u_mode_offset)))
@@ -1307,7 +1307,7 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
 #endif /* CONFIG_MMU */
 
        /* Increment the interface reference count. */
-       magic = xnthread_get_magic(thread);
+       magic = xnthread_get_magic(curr);
 
        for (muxid = 0; muxid < XENOMAI_MUX_NR; muxid++) {
                if (muxtable[muxid].props && muxtable[muxid].props->magic == 
magic) {
@@ -1335,17 +1335,17 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
 
        trace_mark(xn_nucleus, shadow_map,
                   "thread %p thread_name %s pid %d priority %d",
-                  thread, xnthread_name(thread), current->pid,
-                  xnthread_base_priority(thread));
+                  curr, xnthread_name(curr), current->pid,
+                  xnthread_base_priority(curr));
 
-       xnarch_init_shadow_tcb(xnthread_archtcb(thread), thread,
-                              xnthread_name(thread));
+       xnarch_init_shadow_tcb(xnthread_archtcb(curr), curr,
+                              xnthread_name(curr));
 
-       thread->u_mode = u_mode;
+       curr->u_mode = u_mode;
        __xn_put_user(xnheap_mapped_offset(sem_heap, u_mode), u_mode_offset);
 
-       xnthread_set_state(thread, XNMAPPED);
-       xnpod_suspend_thread(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
+       xnthread_set_state(curr, XNMAPPED);
+       xnpod_suspend_thread(curr, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
 
        /*
         * Switch on propagation of normal kernel events for the bound
@@ -1360,23 +1360,23 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
         * positive in debug code from do_schedule_event() and
         * friends.
         */
-       xnshadow_thrptd(current) = thread;
+       xnshadow_thrptd(current) = curr;
        xnshadow_mmptd(current) = current->mm;
        xnarch_atomic_inc(&sys_ppd->refcnt);
 
        rthal_enable_notifier(current);
 
-       if (xnthread_base_priority(thread) == 0 &&
+       if (xnthread_base_priority(curr) == 0 &&
            current->policy == SCHED_NORMAL)
                /* Non real-time shadow. */
-               xnthread_set_state(thread, XNOTHER);
+               xnthread_set_state(curr, XNOTHER);
 
        if (u_completion) {
                /*
                 * Send the renice signal if we are not migrating so that user
                 * space will immediately align Linux sched policy and prio.
                 */
-               xnshadow_renice(thread);
+               xnshadow_renice(curr);
 
                /*
                 * We still have the XNDORMANT bit set, so we can't
@@ -1393,12 +1393,12 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
        attr.affinity = affinity;
        attr.entry = NULL;
        attr.cookie = NULL;
-       ret = xnpod_start_thread(thread, &attr);
+       ret = xnpod_start_thread(curr, &attr);
        if (ret)
                return ret;
 
-       if (thread->u_mode)
-               *(thread->u_mode) = thread->state;
+       if (curr->u_mode)
+               *(curr->u_mode) = curr->state;
 
        ret = xnshadow_harden();
 
@@ -1406,10 +1406,10 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
         * Ensure that user space will receive the proper Linux task policy
         * and prio on next switch to secondary mode.
         */
-       xnthread_set_info(thread, XNPRIOSET);
+       xnthread_set_info(curr, XNPRIOSET);
 
-       xnarch_trace_pid(xnthread_user_pid(thread),
-                        xnthread_current_priority(thread));
+       xnarch_trace_pid(xnthread_user_pid(curr),
+                        xnthread_current_priority(curr));
 
        return ret;
 }
@@ -1653,16 +1653,16 @@ EXPORT_SYMBOL_GPL(xnshadow_call_mayday);
 
 static int xnshadow_sys_mayday(struct pt_regs *regs)
 {
-       struct xnthread *cur;
+       struct xnthread *curr;
 
-       cur = xnshadow_thread(current);
-       if (likely(cur != NULL)) {
+       curr = xnshadow_thread(current);
+       if (likely(curr != NULL)) {
                /*
                 * If the thread is amok in primary mode, this syscall
                 * we have just forced on it will cause it to
                 * relax. See do_hisyscall_event().
                 */
-               xnarch_fixup_mayday(xnthread_archtcb(cur), regs);
+               xnarch_fixup_mayday(xnthread_archtcb(curr), regs);
 
                /* returning 0 here would clobber the register holding
                   the return value. Instead, return whatever value
@@ -2097,15 +2097,15 @@ static int xnshadow_sys_heap_info(struct pt_regs *regs)
 
 static int xnshadow_sys_current(struct pt_regs *regs)
 {
-       xnthread_t *cur = xnshadow_thread(current);
+       xnthread_t *curr = xnshadow_thread(current);
        xnhandle_t __user *us_handle;
 
-       if (!cur)
+       if (!curr)
                return -EPERM;
 
        us_handle = (xnhandle_t __user *) __xn_reg_arg1(regs);
 
-       return __xn_safe_copy_to_user(us_handle, &xnthread_handle(cur),
+       return __xn_safe_copy_to_user(us_handle, &xnthread_handle(curr),
                                      sizeof(*us_handle));
 }
 
@@ -2113,28 +2113,28 @@ static int xnshadow_sys_current_info(struct pt_regs 
*regs)
 {
        xnthread_info_t __user *us_info;
        xnthread_info_t info;
-       xnthread_t *cur = xnshadow_thread(current);
+       xnthread_t *curr = xnshadow_thread(current);
        xnticks_t raw_exectime;
        int i;
 
-       if (!cur)
+       if (!curr)
                return -EPERM;
 
-       info.state = xnthread_state_flags(cur);
-       info.bprio = xnthread_base_priority(cur);
-       info.cprio = xnthread_current_priority(cur);
-       info.cpu = xnsched_cpu(xnthread_sched(cur));
+       info.state = xnthread_state_flags(curr);
+       info.bprio = xnthread_base_priority(curr);
+       info.cprio = xnthread_current_priority(curr);
+       info.cpu = xnsched_cpu(xnthread_sched(curr));
        for (i = 0, info.affinity = 0; i < BITS_PER_LONG; i++)
-               if (xnthread_affine_p(cur, i))
+               if (xnthread_affine_p(curr, i))
                        info.affinity |= 1UL << i;
-       info.relpoint = xntimer_get_date(&cur->ptimer);
-       raw_exectime = xnthread_get_exectime(cur) +
-               xnstat_exectime_now() - xnthread_get_lastswitch(cur);
+       info.relpoint = xntimer_get_date(&curr->ptimer);
+       raw_exectime = xnthread_get_exectime(curr) +
+               xnstat_exectime_now() - xnthread_get_lastswitch(curr);
        info.exectime = xnarch_tsc_to_ns(raw_exectime);
-       info.modeswitches = xnstat_counter_get(&cur->stat.ssw);
-       info.ctxswitches = xnstat_counter_get(&cur->stat.csw);
-       info.pagefaults = xnstat_counter_get(&cur->stat.pf);
-       strcpy(info.name, xnthread_name(cur));
+       info.modeswitches = xnstat_counter_get(&curr->stat.ssw);
+       info.ctxswitches = xnstat_counter_get(&curr->stat.csw);
+       info.pagefaults = xnstat_counter_get(&curr->stat.pf);
+       strcpy(info.name, xnthread_name(curr));
 
        us_info = (xnthread_info_t __user *) __xn_reg_arg1(regs);
 
diff --git a/ksrc/nucleus/synch.c b/ksrc/nucleus/synch.c
index 0926045..020705f 100644
--- a/ksrc/nucleus/synch.c
+++ b/ksrc/nucleus/synch.c
@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(xnsynch_init);
 xnflags_t xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout,
                           xntmode_t timeout_mode)
 {
-       struct xnthread *thread = xnpod_current_thread();
+       struct xnthread *curr = xnpod_current_thread();
        spl_t s;
 
        XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));
@@ -180,18 +180,18 @@ xnflags_t xnsynch_sleep_on(struct xnsynch *synch, 
xnticks_t timeout,
 
        trace_mark(xn_nucleus, synch_sleepon,
                   "thread %p thread_name %s synch %p",
-                  thread, xnthread_name(thread), synch);
+                  curr, xnthread_name(curr), synch);
 
        if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
-               appendpq(&synch->pendq, &thread->plink);
+               appendpq(&synch->pendq, &curr->plink);
        else /* i.e. priority-sorted */
-               insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));
+               insertpqf(&synch->pendq, &curr->plink, w_cprio(curr));
 
-       xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
+       xnpod_suspend_thread(curr, XNPEND, timeout, timeout_mode, synch);
 
        xnlock_put_irqrestore(&nklock, s);
 
-       return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
+       return xnthread_test_info(curr, XNRMID|XNTIMEO|XNBREAK);
 }
 EXPORT_SYMBOL_GPL(xnsynch_sleep_on);
 
@@ -406,8 +406,8 @@ static void xnsynch_renice_thread(struct xnthread *thread,
 xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
                          xntmode_t timeout_mode)
 {
-       struct xnthread *thread = xnpod_current_thread(), *owner;
-       xnhandle_t threadh = xnthread_handle(thread), fastlock, old;
+       struct xnthread *curr = xnpod_current_thread(), *owner;
+       xnhandle_t threadh = xnthread_handle(curr), fastlock, old;
        const int use_fastlock = xnsynch_fastlock_p(synch);
        spl_t s;
 
@@ -424,9 +424,9 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                                                 XN_NO_HANDLE, threadh);
 
                if (likely(fastlock == XN_NO_HANDLE)) {
-                       if (xnthread_test_state(thread, XNOTHER))
-                               xnthread_inc_rescnt(thread);
-                       xnthread_clear_info(thread,
+                       if (xnthread_test_state(curr, XNOTHER))
+                               xnthread_inc_rescnt(curr);
+                       xnthread_clear_info(curr,
                                            XNRMID | XNTIMEO | XNBREAK);
                        return 0;
                }
@@ -464,7 +464,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                if (!owner) {
                        /* The handle is broken, therefore pretend that the 
synch
                           object was deleted to signal an error. */
-                       xnthread_set_info(thread, XNRMID);
+                       xnthread_set_info(curr, XNRMID);
                        goto unlock_and_exit;
                }
 
@@ -475,29 +475,29 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, 
xnticks_t timeout,
                owner = synch->owner;
 
                if (!owner) {
-                       synch->owner = thread;
-                       if (xnthread_test_state(thread, XNOTHER))
-                               xnthread_inc_rescnt(thread);
-                       xnthread_clear_info(thread,
+                       synch->owner = curr;
+                       if (xnthread_test_state(curr, XNOTHER))
+                               xnthread_inc_rescnt(curr);
+                       xnthread_clear_info(curr,
                                            XNRMID | XNTIMEO | XNBREAK);
                        goto unlock_and_exit;
                }
        }
 
-       xnsynch_detect_relaxed_owner(synch, thread);
+       xnsynch_detect_relaxed_owner(synch, curr);
 
        if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
-               appendpq(&synch->pendq, &thread->plink);
-       else if (w_cprio(thread) > w_cprio(owner)) {
+               appendpq(&synch->pendq, &curr->plink);
+       else if (w_cprio(curr) > w_cprio(owner)) {
                if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == 
synch) {
                        /* Ownership is still pending, steal the resource. */
-                       synch->owner = thread;
-                       xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
+                       synch->owner = curr;
+                       xnthread_clear_info(curr, XNRMID | XNTIMEO | XNBREAK);
                        xnthread_set_info(owner, XNROBBED);
                        goto grab_and_exit;
                }
 
-               insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));
+               insertpqf(&synch->pendq, &curr->plink, w_cprio(curr));
 
                if (testbits(synch->status, XNSYNCH_PIP)) {
                        if (!xnthread_test_state(owner, XNBOOST)) {
@@ -510,21 +510,21 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, 
xnticks_t timeout,
                        else
                                __setbits(synch->status, XNSYNCH_CLAIMED);
 
-                       insertpqf(&owner->claimq, &synch->link, 
w_cprio(thread));
-                       xnsynch_renice_thread(owner, thread);
+                       insertpqf(&owner->claimq, &synch->link, w_cprio(curr));
+                       xnsynch_renice_thread(owner, curr);
                }
        } else
-               insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));
+               insertpqf(&synch->pendq, &curr->plink, w_cprio(curr));
 
-       xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
+       xnpod_suspend_thread(curr, XNPEND, timeout, timeout_mode, synch);
 
-       thread->wwake = NULL;
-       xnthread_clear_info(thread, XNWAKEN);
+       curr->wwake = NULL;
+       xnthread_clear_info(curr, XNWAKEN);
 
-       if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK))
+       if (xnthread_test_info(curr, XNRMID | XNTIMEO | XNBREAK))
                goto unlock_and_exit;
 
-       if (xnthread_test_info(thread, XNROBBED)) {
+       if (xnthread_test_info(curr, XNROBBED)) {
                /* Somebody stole us the ownership while we were ready
                   to run, waiting for the CPU: we need to wait again
                   for the resource. */
@@ -532,18 +532,18 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, 
xnticks_t timeout,
                        xnlock_put_irqrestore(&nklock, s);
                        goto redo;
                }
-               timeout = xntimer_get_timeout_stopped(&thread->rtimer);
+               timeout = xntimer_get_timeout_stopped(&curr->rtimer);
                if (timeout > 1) { /* Otherwise, it's too late. */
                        xnlock_put_irqrestore(&nklock, s);
                        goto redo;
                }
-               xnthread_set_info(thread, XNTIMEO);
+               xnthread_set_info(curr, XNTIMEO);
        } else {
 
              grab_and_exit:
 
-               if (xnthread_test_state(thread, XNOTHER))
-                       xnthread_inc_rescnt(thread);
+               if (xnthread_test_state(curr, XNOTHER))
+                       xnthread_inc_rescnt(curr);
 
                if (use_fastlock) {
                        xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
@@ -560,7 +560,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
 
        xnlock_put_irqrestore(&nklock, s);
 
-       return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
+       return xnthread_test_info(curr, XNRMID|XNTIMEO|XNBREAK);
 }
 EXPORT_SYMBOL_GPL(xnsynch_acquire);
 
diff --git a/ksrc/skins/native/buffer.c b/ksrc/skins/native/buffer.c
index 4ae546c..2f35da0 100644
--- a/ksrc/skins/native/buffer.c
+++ b/ksrc/skins/native/buffer.c
@@ -361,7 +361,7 @@ ssize_t rt_buffer_write_inner(RT_BUFFER *bf,
                              struct xnbufd *bufd,
                              xntmode_t timeout_mode, RTIME timeout)
 {
-       xnthread_t *thread, *waiter;
+       xnthread_t *curr, *waiter;
        size_t len, rbytes, n;
        xnflags_t info;
        u_long wrtoken;
@@ -486,8 +486,8 @@ redo:
                        break;
                }
 
-               thread = xnpod_current_thread();
-               thread->wait_u.size = len;
+               curr = xnpod_current_thread();
+               curr->wait_u.size = len;
                info = xnsynch_sleep_on(&bf->osynch_base,
                                        timeout, timeout_mode);
                if (info & XNRMID) {
@@ -520,7 +520,7 @@ ssize_t rt_buffer_read_inner(RT_BUFFER *bf,
                             struct xnbufd *bufd,
                             xntmode_t timeout_mode, RTIME timeout)
 {
-       xnthread_t *thread, *waiter;
+       xnthread_t *curr, *waiter;
        size_t len, rbytes, n;
        xnflags_t info;
        u_long rdtoken;
@@ -658,8 +658,8 @@ redo:
                        goto redo;
                }
 
-               thread = xnpod_current_thread();
-               thread->wait_u.bufd =  bufd;
+               curr = xnpod_current_thread();
+               curr->wait_u.bufd =  bufd;
                info = xnsynch_sleep_on(&bf->isynch_base,
                                        timeout, timeout_mode);
                if (info & XNRMID) {
diff --git a/ksrc/skins/native/cond.c b/ksrc/skins/native/cond.c
index 2278ad2..9999ca2 100644
--- a/ksrc/skins/native/cond.c
+++ b/ksrc/skins/native/cond.c
@@ -397,7 +397,7 @@ int rt_cond_broadcast(RT_COND *cond)
 int rt_cond_wait_prologue(RT_COND *cond, RT_MUTEX *mutex, unsigned *plockcnt,
                       xntmode_t timeout_mode, RTIME timeout)
 {
-       xnthread_t *thread;
+       xnthread_t *curr;
        xnflags_t info;
        spl_t s;
        int err;
@@ -424,9 +424,9 @@ int rt_cond_wait_prologue(RT_COND *cond, RT_MUTEX *mutex, 
unsigned *plockcnt,
                goto unlock_and_exit;
        }
 
-       thread = xnpod_current_thread();
+       curr = xnpod_current_thread();
 
-       err = xnsynch_owner_check(&mutex->synch_base, thread);
+       err = xnsynch_owner_check(&mutex->synch_base, curr);
 
        if (err)
                goto unlock_and_exit;
diff --git a/ksrc/skins/native/heap.c b/ksrc/skins/native/heap.c
index bd7d93b..9ae65de 100644
--- a/ksrc/skins/native/heap.c
+++ b/ksrc/skins/native/heap.c
@@ -553,7 +553,7 @@ int rt_heap_delete(RT_HEAP *heap)
 int rt_heap_alloc(RT_HEAP *heap, size_t size, RTIME timeout, void **blockp)
 {
        void *block = NULL;
-       xnthread_t *thread;
+       xnthread_t *curr;
        xnflags_t info;
        int err = 0;
        spl_t s;
@@ -613,9 +613,9 @@ int rt_heap_alloc(RT_HEAP *heap, size_t size, RTIME 
timeout, void **blockp)
                goto unlock_and_exit;
        }
 
-       thread = xnpod_current_thread();
-       thread->wait_u.buffer.size = size;
-       thread->wait_u.buffer.ptr = NULL;
+       curr = xnpod_current_thread();
+       curr->wait_u.buffer.size = size;
+       curr->wait_u.buffer.ptr = NULL;
        info = xnsynch_sleep_on(&heap->synch_base, timeout, XN_RELATIVE);
        if (info & XNRMID)
                err = -EIDRM;   /* Heap deleted while pending. */
@@ -624,7 +624,7 @@ int rt_heap_alloc(RT_HEAP *heap, size_t size, RTIME 
timeout, void **blockp)
        else if (info & XNBREAK)
                err = -EINTR;   /* Unblocked. */
        else
-               block = thread->wait_u.buffer.ptr;
+               block = curr->wait_u.buffer.ptr;
 
       unlock_and_exit:
 
diff --git a/ksrc/skins/native/mutex.c b/ksrc/skins/native/mutex.c
index 9365cbe..43964c2 100644
--- a/ksrc/skins/native/mutex.c
+++ b/ksrc/skins/native/mutex.c
@@ -352,7 +352,7 @@ int rt_mutex_delete(RT_MUTEX *mutex)
 int rt_mutex_acquire_inner(RT_MUTEX *mutex, RTIME timeout,
                           xntmode_t timeout_mode)
 {
-       xnthread_t *thread;
+       xnthread_t *curr;
        xnflags_t info;
 
        if (xnpod_unblockable_p())
@@ -363,9 +363,9 @@ int rt_mutex_acquire_inner(RT_MUTEX *mutex, RTIME timeout,
        if (!mutex)
                return xeno_handle_error(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);
 
-       thread = xnpod_current_thread();
+       curr = xnpod_current_thread();
 
-       if (xnsynch_owner_check(&mutex->synch_base, thread) == 0) {
+       if (xnsynch_owner_check(&mutex->synch_base, curr) == 0) {
                mutex->lockcnt++;
                return 0;
        }
@@ -373,9 +373,9 @@ int rt_mutex_acquire_inner(RT_MUTEX *mutex, RTIME timeout,
        if (timeout == TM_NONBLOCK && timeout_mode == XN_RELATIVE) {
 #ifdef CONFIG_XENO_FASTSYNCH
                if (xnsynch_fast_acquire(mutex->synch_base.fastlock,
-                                        xnthread_handle(thread)) == 0) {
-                       if (xnthread_test_state(thread, XNOTHER))
-                               xnthread_inc_rescnt(thread);
+                                        xnthread_handle(curr)) == 0) {
+                       if (xnthread_test_state(curr, XNOTHER))
+                               xnthread_inc_rescnt(curr);
                        mutex->lockcnt = 1;
                        return 0;
                } else
@@ -387,8 +387,8 @@ int rt_mutex_acquire_inner(RT_MUTEX *mutex, RTIME timeout,
 
                xnlock_get_irqsave(&nklock, s);
                if (xnsynch_owner(&mutex->synch_base) == NULL) {
-                       if (xnthread_test_state(thread, XNOTHER))
-                               xnthread_inc_rescnt(thread);
+                       if (xnthread_test_state(curr, XNOTHER))
+                               xnthread_inc_rescnt(curr);
                        mutex->lockcnt = 1;
                } else
                        err = -EWOULDBLOCK;
@@ -582,7 +582,7 @@ int rt_mutex_acquire_until(RT_MUTEX *mutex, RTIME timeout)
 
 int rt_mutex_release(RT_MUTEX *mutex)
 {
-       xnthread_t *thread = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        int err;
 
        if (xnpod_unblockable_p())
@@ -593,7 +593,7 @@ int rt_mutex_release(RT_MUTEX *mutex)
        if (!mutex)
                return xeno_handle_error(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);
 
-       err = xnsynch_owner_check(&mutex->synch_base, thread);
+       err = xnsynch_owner_check(&mutex->synch_base, curr);
        if (err)
                return err;
 
diff --git a/ksrc/skins/native/queue.c b/ksrc/skins/native/queue.c
index 9de8c72..8f0e145 100644
--- a/ksrc/skins/native/queue.c
+++ b/ksrc/skins/native/queue.c
@@ -783,7 +783,7 @@ ssize_t rt_queue_receive_inner(RT_QUEUE *q, void **bufp,
                               xntmode_t timeout_mode, RTIME timeout)
 {
        rt_queue_msg_t *msg = NULL;
-       xnthread_t *thread;
+       xnthread_t *curr;
        xnholder_t *holder;
        ssize_t err = 0;
        xnflags_t info;
@@ -823,9 +823,9 @@ ssize_t rt_queue_receive_inner(RT_QUEUE *q, void **bufp,
                else if (info & XNBREAK)
                        err = -EINTR;   /* Unblocked. */
                else {
-                       thread = xnpod_current_thread();
-                       msg = thread->wait_u.buffer.ptr;
-                       thread->wait_u.buffer.ptr = NULL;
+                       curr = xnpod_current_thread();
+                       msg = curr->wait_u.buffer.ptr;
+                       curr->wait_u.buffer.ptr = NULL;
                }
        }
 
diff --git a/ksrc/skins/native/syscall.c b/ksrc/skins/native/syscall.c
index fab7640..950b092 100644
--- a/ksrc/skins/native/syscall.c
+++ b/ksrc/skins/native/syscall.c
@@ -1019,7 +1019,7 @@ static int __rt_timer_inquire(struct pt_regs *regs)
 
 static int __rt_timer_spin(struct pt_regs *regs)
 {
-       xnthread_t *thread = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        struct task_struct *p = current;
        RTIME etime;
        RTIME ns;
@@ -1030,7 +1030,7 @@ static int __rt_timer_spin(struct pt_regs *regs)
 
        etime = xnarch_get_cpu_tsc() + xnarch_ns_to_tsc(ns);
        while ((SRTIME)(xnarch_get_cpu_tsc() - etime) < 0) {
-               if (signal_pending(p) || xnthread_amok_p(thread))
+               if (signal_pending(p) || xnthread_amok_p(curr))
                        return -EINTR;
                cpu_relax();
        }
@@ -2958,7 +2958,7 @@ static int __rt_alarm_stop(struct pt_regs *regs)
 
 static int __rt_alarm_wait(struct pt_regs *regs)
 {
-       xnthread_t *thread = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        union xnsched_policy_param param;
        RT_ALARM_PLACEHOLDER ph;
        RT_ALARM *alarm;
@@ -2981,10 +2981,10 @@ static int __rt_alarm_wait(struct pt_regs *regs)
                goto unlock_and_exit;
        }
 
-       if (xnthread_base_priority(thread) != XNSCHED_IRQ_PRIO) {
+       if (xnthread_base_priority(curr) != XNSCHED_IRQ_PRIO) {
                /* Boost the waiter above all regular tasks if needed. */
                param.rt.prio = XNSCHED_IRQ_PRIO;
-               xnpod_set_thread_schedparam(thread, &xnsched_class_rt, &param);
+               xnpod_set_thread_schedparam(curr, &xnsched_class_rt, &param);
        }
 
        info = xnsynch_sleep_on(&alarm->synch_base, XN_INFINITE, XN_RELATIVE);
@@ -3179,7 +3179,7 @@ static int __rt_intr_wait(struct pt_regs *regs)
 {
        union xnsched_policy_param param;
        RT_INTR_PLACEHOLDER ph;
-       xnthread_t *thread;
+       xnthread_t *curr;
        xnflags_t info;
        RTIME timeout;
        RT_INTR *intr;
@@ -3209,12 +3209,12 @@ static int __rt_intr_wait(struct pt_regs *regs)
        }
 
        if (!intr->pending) {
-               thread = xnpod_current_thread();
+               curr = xnpod_current_thread();
 
-               if (xnthread_base_priority(thread) != XNSCHED_IRQ_PRIO) {
+               if (xnthread_base_priority(curr) != XNSCHED_IRQ_PRIO) {
                        /* Boost the waiter above all regular tasks if needed. 
*/
                        param.rt.prio = XNSCHED_IRQ_PRIO;
-                       xnpod_set_thread_schedparam(thread, &xnsched_class_rt, 
&param);
+                       xnpod_set_thread_schedparam(curr, &xnsched_class_rt, 
&param);
                }
 
                info = xnsynch_sleep_on(&intr->synch_base,
diff --git a/ksrc/skins/posix/clock.c b/ksrc/skins/posix/clock.c
index 107ebb0..af3a948 100644
--- a/ksrc/skins/posix/clock.c
+++ b/ksrc/skins/posix/clock.c
@@ -306,7 +306,7 @@ int clock_nanosleep(clockid_t clock_id,
                    int flags,
                    const struct timespec *rqtp, struct timespec *rmtp)
 {
-       xnthread_t *cur;
+       xnthread_t *curr;
        spl_t s;
        int err = 0;
 
@@ -322,23 +322,23 @@ int clock_nanosleep(clockid_t clock_id,
        if (flags & ~TIMER_ABSTIME)
                return EINVAL;
 
-       cur = xnpod_current_thread();
+       curr = xnpod_current_thread();
 
        xnlock_get_irqsave(&nklock, s);
 
-       thread_cancellation_point(cur);
+       thread_cancellation_point(curr);
 
-       xnpod_suspend_thread(cur, XNDELAY, ts2ticks_ceil(rqtp) + 1,
+       xnpod_suspend_thread(curr, XNDELAY, ts2ticks_ceil(rqtp) + 1,
                             clock_flag(flags, clock_id), NULL);
 
-       thread_cancellation_point(cur);
+       thread_cancellation_point(curr);
 
-       if (xnthread_test_info(cur, XNBREAK)) {
+       if (xnthread_test_info(curr, XNBREAK)) {
 
                if (flags == 0 && rmtp) {
                        xnsticks_t rem;
 
-                       rem = xntimer_get_timeout_stopped(&cur->rtimer);
+                       rem = xntimer_get_timeout_stopped(&curr->rtimer);
                        xnlock_put_irqrestore(&nklock, s);
 
                        ticks2ts(rmtp, rem > 1 ? rem : 0);
diff --git a/ksrc/skins/posix/cond.c b/ksrc/skins/posix/cond.c
index 8ccd6e0..e32a06f 100644
--- a/ksrc/skins/posix/cond.c
+++ b/ksrc/skins/posix/cond.c
@@ -222,7 +222,7 @@ int pthread_cond_destroy(pthread_cond_t * cnd)
 
    Note: this function is very similar to mutex_unlock_internal() in mutex.c.
 */
-static inline int mutex_save_count(xnthread_t *cur,
+static inline int mutex_save_count(xnthread_t *curr,
                                   struct __shadow_mutex *shadow,
                                   unsigned *count_ptr)
 {
@@ -233,7 +233,7 @@ static inline int mutex_save_count(xnthread_t *cur,
            || !pse51_obj_active(mutex, PSE51_MUTEX_MAGIC, struct pse51_mutex))
                 return EINVAL;
 
-       if (xnsynch_owner_check(&mutex->synchbase, cur) != 0)
+       if (xnsynch_owner_check(&mutex->synchbase, curr) != 0)
                return EPERM;
 
        *count_ptr = shadow->lockcnt;
@@ -246,7 +246,7 @@ static inline int mutex_save_count(xnthread_t *cur,
        return 0;
 }
 
-int pse51_cond_timedwait_prologue(xnthread_t *cur,
+int pse51_cond_timedwait_prologue(xnthread_t *curr,
                                  struct __shadow_cond *shadow,
                                  struct __shadow_mutex *mutex,
                                  unsigned *count_ptr,
@@ -265,7 +265,7 @@ int pse51_cond_timedwait_prologue(xnthread_t *cur,
 
        xnlock_get_irqsave(&nklock, s);
 
-       thread_cancellation_point(cur);
+       thread_cancellation_point(curr);
 
        cond = shadow->cond;
 
@@ -284,7 +284,7 @@ int pse51_cond_timedwait_prologue(xnthread_t *cur,
 
        /* Unlock mutex, with its previous recursive lock count stored
           in "*count_ptr". */
-       err = mutex_save_count(cur, mutex, count_ptr);
+       err = mutex_save_count(curr, mutex, count_ptr);
        if (err)
                goto unlock_and_return;
 
@@ -317,9 +317,9 @@ int pse51_cond_timedwait_prologue(xnthread_t *cur,
 
        err = 0;
 
-       if (xnthread_test_info(cur, XNBREAK))
+       if (xnthread_test_info(curr, XNBREAK))
                err = EINTR;
-       else if (xnthread_test_info(cur, XNTIMEO))
+       else if (xnthread_test_info(curr, XNTIMEO))
                err = ETIMEDOUT;
 
       unlock_and_return:
@@ -328,7 +328,7 @@ int pse51_cond_timedwait_prologue(xnthread_t *cur,
        return err;
 }
 
-int pse51_cond_timedwait_epilogue(xnthread_t *cur,
+int pse51_cond_timedwait_epilogue(xnthread_t *curr,
                                  struct __shadow_cond *shadow,
                                  struct __shadow_mutex *mutex, unsigned count)
 {
@@ -340,7 +340,7 @@ int pse51_cond_timedwait_epilogue(xnthread_t *cur,
 
        cond = shadow->cond;
 
-       err = pse51_mutex_timedlock_internal(cur, mutex, count, 0, XN_INFINITE);
+       err = pse51_mutex_timedlock_internal(curr, mutex, count, 0, 
XN_INFINITE);
 
        if (err == -EINTR)
                goto unlock_and_return;
@@ -352,7 +352,7 @@ int pse51_cond_timedwait_epilogue(xnthread_t *cur,
            && cond->mutex == mutex->mutex)
                cond->mutex = NULL;
 
-       thread_cancellation_point(cur);
+       thread_cancellation_point(curr);
 
       unlock_and_return:
        xnlock_put_irqrestore(&nklock, s);
@@ -415,7 +415,7 @@ int pthread_cond_wait(pthread_cond_t * cnd, pthread_mutex_t 
* mx)
        struct __shadow_cond *cond = &((union __xeno_cond *)cnd)->shadow_cond;
        struct __shadow_mutex *mutex =
            &((union __xeno_mutex *)mx)->shadow_mutex;
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        unsigned count;
        int err;
 
@@ -424,11 +424,11 @@ int pthread_cond_wait(pthread_cond_t * cnd, 
pthread_mutex_t * mx)
                return EINVAL;
 #endif /* CONFIG_XENO_FASTSYNCH */
 
-       err = pse51_cond_timedwait_prologue(cur, cond, mutex,
+       err = pse51_cond_timedwait_prologue(curr, cond, mutex,
                                            &count, 0, XN_INFINITE);
 
        if (!err || err == EINTR)
-               while (-EINTR == pse51_cond_timedwait_epilogue(cur, cond,
+               while (-EINTR == pse51_cond_timedwait_epilogue(curr, cond,
                                                               mutex, count))
                        ;
 
@@ -483,7 +483,7 @@ int pthread_cond_timedwait(pthread_cond_t * cnd,
        struct __shadow_cond *cond = &((union __xeno_cond *)cnd)->shadow_cond;
        struct __shadow_mutex *mutex =
            &((union __xeno_mutex *)mx)->shadow_mutex;
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        unsigned count;
        int err;
 
@@ -492,11 +492,11 @@ int pthread_cond_timedwait(pthread_cond_t * cnd,
                return EINVAL;
 #endif /* CONFIG_XENO_FASTSYNCH */
 
-       err = pse51_cond_timedwait_prologue(cur, cond, mutex, &count, 1,
+       err = pse51_cond_timedwait_prologue(curr, cond, mutex, &count, 1,
                                            ts2ticks_ceil(abstime) + 1);
 
        if (!err || err == EINTR || err == ETIMEDOUT)
-               while (-EINTR == pse51_cond_timedwait_epilogue(cur, cond,
+               while (-EINTR == pse51_cond_timedwait_epilogue(curr, cond,
                                                               mutex, count))
                        ;
 
diff --git a/ksrc/skins/posix/cond.h b/ksrc/skins/posix/cond.h
index ca1fa45..7c90c91 100644
--- a/ksrc/skins/posix/cond.h
+++ b/ksrc/skins/posix/cond.h
@@ -23,14 +23,14 @@
 #include <posix/posix.h>
 #include <posix/mutex.h>
 
-int pse51_cond_timedwait_prologue(xnthread_t *cur,
+int pse51_cond_timedwait_prologue(xnthread_t *curr,
                                  struct __shadow_cond *shadow,
                                  struct __shadow_mutex *mutex,
                                  unsigned *count_ptr,
                                  int timed,
                                  xnticks_t to);
 
-int pse51_cond_timedwait_epilogue(xnthread_t *cur,
+int pse51_cond_timedwait_epilogue(xnthread_t *curr,
                                  struct __shadow_cond *shadow,
                                  struct __shadow_mutex *mutex, unsigned count);
 
diff --git a/ksrc/skins/posix/mq.c b/ksrc/skins/posix/mq.c
index f665ecb..220bea1 100644
--- a/ksrc/skins/posix/mq.c
+++ b/ksrc/skins/posix/mq.c
@@ -525,7 +525,7 @@ static pse51_msg_t *pse51_mq_tryrcv(pse51_mq_t **mqp,
 pse51_msg_t *pse51_mq_timedsend_inner(pse51_mq_t **mqp, mqd_t fd, size_t len,
                                      const struct timespec *abs_timeoutp)
 {
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        pse51_msg_t *msg;
        spl_t s;
        int rc;
@@ -564,26 +564,26 @@ pse51_msg_t *pse51_mq_timedsend_inner(pse51_mq_t **mqp, 
mqd_t fd, size_t len,
 
                mq = node2mq(pse51_desc_node(desc));
 
-               thread_cancellation_point(cur);
+               thread_cancellation_point(curr);
 
                if (abs_timeoutp)
                        xnsynch_sleep_on(&mq->senders, to, XN_REALTIME);
                else
                        xnsynch_sleep_on(&mq->senders, to, XN_RELATIVE);
 
-               thread_cancellation_point(cur);
+               thread_cancellation_point(curr);
 
-               if (xnthread_test_info(cur, XNBREAK)) {
+               if (xnthread_test_info(curr, XNBREAK)) {
                        msg = ERR_PTR(-EINTR);
                        break;
                }
 
-               if (xnthread_test_info(cur, XNTIMEO)) {
+               if (xnthread_test_info(curr, XNTIMEO)) {
                        msg = ERR_PTR(-ETIMEDOUT);
                        break;
                }
 
-               if (xnthread_test_info(cur, XNRMID)) {
+               if (xnthread_test_info(curr, XNRMID)) {
                        msg = ERR_PTR(-EBADF);
                        break;
                }
@@ -653,7 +653,7 @@ int pse51_mq_finish_send(mqd_t fd, pse51_mq_t *mq, 
pse51_msg_t *msg)
 pse51_msg_t *pse51_mq_timedrcv_inner(pse51_mq_t **mqp, mqd_t fd, size_t len,
                                     const struct timespec *abs_timeoutp)
 {
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        pse51_msg_t *msg;
        spl_t s;
        int rc;
@@ -691,26 +691,26 @@ pse51_msg_t *pse51_mq_timedrcv_inner(pse51_mq_t **mqp, 
mqd_t fd, size_t len,
 
                mq = node2mq(pse51_desc_node(desc));
 
-               thread_cancellation_point(cur);
+               thread_cancellation_point(curr);
 
                if (abs_timeoutp)
                        xnsynch_sleep_on(&mq->receivers, to, XN_REALTIME);
                else
                        xnsynch_sleep_on(&mq->receivers, to, XN_RELATIVE);
 
-               thread_cancellation_point(cur);
+               thread_cancellation_point(curr);
 
-               if (xnthread_test_info(cur, XNRMID)) {
+               if (xnthread_test_info(curr, XNRMID)) {
                        msg = ERR_PTR(-EBADF);
                        break;
                }
 
-               if (xnthread_test_info(cur, XNTIMEO)) {
+               if (xnthread_test_info(curr, XNTIMEO)) {
                        msg = ERR_PTR(-ETIMEDOUT);
                        break;
                }
 
-               if (xnthread_test_info(cur, XNBREAK)) {
+               if (xnthread_test_info(curr, XNBREAK)) {
                        msg = ERR_PTR(-EINTR);
                        break;
                }
diff --git a/ksrc/skins/posix/mutex.c b/ksrc/skins/posix/mutex.c
index 3414340..c38b04b 100644
--- a/ksrc/skins/posix/mutex.c
+++ b/ksrc/skins/posix/mutex.c
@@ -290,16 +290,16 @@ int pthread_mutex_destroy(pthread_mutex_t * mx)
 int pse51_mutex_timedlock_break(struct __shadow_mutex *shadow,
                                int timed, xnticks_t abs_to)
 {
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        pse51_mutex_t *mutex;
        spl_t s;
        int err;
 
        /* We need a valid thread handle for the fast lock. */
-       if (xnthread_handle(cur) == XN_NO_HANDLE)
+       if (xnthread_handle(curr) == XN_NO_HANDLE)
                return -EPERM;
 
-       err = pse51_mutex_timedlock_internal(cur, shadow, 1, timed, abs_to);
+       err = pse51_mutex_timedlock_internal(curr, shadow, 1, timed, abs_to);
        if (err != -EBUSY)
                goto unlock_and_return;
 
@@ -317,17 +317,17 @@ int pse51_mutex_timedlock_break(struct __shadow_mutex 
*shadow,
                                xnsynch_acquire(&mutex->synchbase,
                                                XN_INFINITE, XN_RELATIVE);
 
-                       if (xnthread_test_info(cur, XNBREAK)) {
+                       if (xnthread_test_info(curr, XNBREAK)) {
                                err = -EINTR;
                                break;
                        }
 
-                       if (xnthread_test_info(cur, XNTIMEO)) {
+                       if (xnthread_test_info(curr, XNTIMEO)) {
                                err = -ETIMEDOUT;
                                break;
                        }
 
-                       if (xnthread_test_info(cur, XNRMID)) {
+                       if (xnthread_test_info(curr, XNRMID)) {
                                err = -EINVAL;
                                break;
                        }
@@ -387,7 +387,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mx)
 {
        struct __shadow_mutex *shadow =
            &((union __xeno_mutex *)mx)->shadow_mutex;
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        pse51_mutex_t *mutex = shadow->mutex;
        DECLARE_CB_LOCK_FLAGS(s);
        int err;
@@ -415,13 +415,13 @@ int pthread_mutex_trylock(pthread_mutex_t *mx)
 
 #ifdef CONFIG_XENO_FASTSYNCH
        err = -xnsynch_fast_acquire(mutex->synchbase.fastlock,
-                                   xnthread_handle(cur));
+                                   xnthread_handle(curr));
 #else /* !CONFIG_XENO_FASTSYNCH */
        {
                xnthread_t *owner = xnsynch_owner(&mutex->synchbase);
                if (!owner)
                        err = 0;
-               else if (owner == cur)
+               else if (owner == curr)
                        err = EBUSY;
                else
                        err = EAGAIN;
@@ -429,8 +429,8 @@ int pthread_mutex_trylock(pthread_mutex_t *mx)
 #endif /* !CONFIG_XENO_FASTSYNCH */
 
        if (likely(!err)) {
-               if (xnthread_test_state(cur, XNOTHER) && !err)
-                       xnthread_inc_rescnt(cur);
+               if (xnthread_test_state(curr, XNOTHER) && !err)
+                       xnthread_inc_rescnt(curr);
                shadow->lockcnt = 1;
        }
        else if (err == EBUSY) {
@@ -596,7 +596,7 @@ int pthread_mutex_unlock(pthread_mutex_t * mx)
 {
        struct __shadow_mutex *shadow =
            &((union __xeno_mutex *)mx)->shadow_mutex;
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        DECLARE_CB_LOCK_FLAGS(s);
        pse51_mutex_t *mutex;
        int err;
@@ -616,7 +616,7 @@ int pthread_mutex_unlock(pthread_mutex_t * mx)
                goto out;
        }
 
-       err = -xnsynch_owner_check(&mutex->synchbase, cur);
+       err = -xnsynch_owner_check(&mutex->synchbase, curr);
        if (err)
                goto out;
 
diff --git a/ksrc/skins/posix/sem.c b/ksrc/skins/posix/sem.c
index 3a57cb0..f1b71b6 100644
--- a/ksrc/skins/posix/sem.c
+++ b/ksrc/skins/posix/sem.c
@@ -558,18 +558,18 @@ static inline int sem_timedwait_internal(struct 
__shadow_sem *shadow,
                                         int timed, xnticks_t to)
 {
        pse51_sem_t *sem = shadow->sem;
-       xnthread_t *cur;
+       xnthread_t *curr;
        int err;
 
        if (xnpod_unblockable_p())
                return EPERM;
 
-       cur = xnpod_current_thread();
+       curr = xnpod_current_thread();
 
        if ((err = sem_trywait_internal(shadow)) != EAGAIN)
                return err;
 
-       thread_cancellation_point(cur);
+       thread_cancellation_point(curr);
 
        if (timed)
                xnsynch_sleep_on(&sem->synchbase, to, XN_REALTIME);
@@ -577,15 +577,15 @@ static inline int sem_timedwait_internal(struct 
__shadow_sem *shadow,
                xnsynch_sleep_on(&sem->synchbase, XN_INFINITE, XN_RELATIVE);
 
        /* Handle cancellation requests. */
-       thread_cancellation_point(cur);
+       thread_cancellation_point(curr);
 
-       if (xnthread_test_info(cur, XNRMID))
+       if (xnthread_test_info(curr, XNRMID))
                return EINVAL;
 
-       if (xnthread_test_info(cur, XNBREAK))
+       if (xnthread_test_info(curr, XNBREAK))
                return EINTR;
 
-       if (xnthread_test_info(cur, XNTIMEO))
+       if (xnthread_test_info(curr, XNTIMEO))
                return ETIMEDOUT;
 
        return 0;
diff --git a/ksrc/skins/posix/syscall.c b/ksrc/skins/posix/syscall.c
index bd4651d..401fd61 100644
--- a/ksrc/skins/posix/syscall.c
+++ b/ksrc/skins/posix/syscall.c
@@ -1122,7 +1122,7 @@ static int __pthread_mutex_trylock(struct pt_regs *regs)
 
 static int __pthread_mutex_unlock(struct pt_regs *regs)
 {
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        struct __shadow_mutex *shadow;
        union __xeno_mutex mx, *umx;
        DECLARE_CB_LOCK_FLAGS(s);
@@ -1152,7 +1152,7 @@ static int __pthread_mutex_unlock(struct pt_regs *regs)
 
        mutex = shadow->mutex;
 
-       err = (xnsynch_owner(&mutex->synchbase) == cur) ? 0 : -EPERM;
+       err = (xnsynch_owner(&mutex->synchbase) == curr) ? 0 : -EPERM;
        if (err)
                goto out;
 
@@ -1543,7 +1543,7 @@ struct us_cond_data {
 /* pthread_cond_wait_prologue(cond, mutex, count_ptr, timed, timeout) */
 static int __pthread_cond_wait_prologue(struct pt_regs *regs)
 {
-       xnthread_t *cur = xnshadow_thread(current);
+       xnthread_t *curr = xnshadow_thread(current);
        union __xeno_cond cnd, *ucnd;
        union __xeno_mutex mx, *umx;
        struct us_cond_data d;
@@ -1577,14 +1577,14 @@ static int __pthread_cond_wait_prologue(struct pt_regs 
*regs)
                                             sizeof(ts)))
                        return -EFAULT;
 
-               err = pse51_cond_timedwait_prologue(cur,
+               err = pse51_cond_timedwait_prologue(curr,
                                                    &cnd.shadow_cond,
                                                    &mx.shadow_mutex,
                                                    &d.count,
                                                    timed,
                                                    ts2ticks_ceil(&ts) + 1);
        } else
-               err = pse51_cond_timedwait_prologue(cur,
+               err = pse51_cond_timedwait_prologue(curr,
                                                    &cnd.shadow_cond,
                                                    &mx.shadow_mutex,
                                                    &d.count,
@@ -1594,7 +1594,7 @@ static int __pthread_cond_wait_prologue(struct pt_regs 
*regs)
        case 0:
        case ETIMEDOUT:
                perr = d.err = err;
-               err = -pse51_cond_timedwait_epilogue(cur, &cnd.shadow_cond,
+               err = -pse51_cond_timedwait_epilogue(curr, &cnd.shadow_cond,
                                                    &mx.shadow_mutex, d.count);
                if (err == 0 &&
                    __xn_safe_copy_to_user((void __user *)
@@ -1620,7 +1620,7 @@ static int __pthread_cond_wait_prologue(struct pt_regs 
*regs)
 /* pthread_cond_wait_epilogue(cond, mutex, count) */
 static int __pthread_cond_wait_epilogue(struct pt_regs *regs)
 {
-       xnthread_t *cur = xnshadow_thread(current);
+       xnthread_t *curr = xnshadow_thread(current);
        union __xeno_cond cnd, *ucnd;
        union __xeno_mutex mx, *umx;
        unsigned count;
@@ -1646,7 +1646,7 @@ static int __pthread_cond_wait_epilogue(struct pt_regs 
*regs)
                                     ))
                return -EFAULT;
 
-       err = pse51_cond_timedwait_epilogue(cur, &cnd.shadow_cond,
+       err = pse51_cond_timedwait_epilogue(curr, &cnd.shadow_cond,
                                              &mx.shadow_mutex, count);
 
        if (err == 0
@@ -2157,7 +2157,7 @@ static int __intr_wait(struct pt_regs *regs)
        pthread_intr_t intr = (pthread_intr_t) __xn_reg_arg1(regs);
        union xnsched_policy_param param;
        struct timespec ts;
-       xnthread_t *thread;
+       xnthread_t *curr;
        xnticks_t timeout;
        int err = 0;
        spl_t s;
@@ -2188,21 +2188,21 @@ static int __intr_wait(struct pt_regs *regs)
        }
 
        if (!intr->pending) {
-               thread = xnpod_current_thread();
+               curr = xnpod_current_thread();
 
-               if (xnthread_base_priority(thread) != XNSCHED_IRQ_PRIO) {
+               if (xnthread_base_priority(curr) != XNSCHED_IRQ_PRIO) {
                        /* Boost the waiter above all regular threads if 
needed. */
                        param.rt.prio = XNSCHED_IRQ_PRIO;
-                       xnpod_set_thread_schedparam(thread, &xnsched_class_rt, 
&param);
+                       xnpod_set_thread_schedparam(curr, &xnsched_class_rt, 
&param);
                }
 
                xnsynch_sleep_on(&intr->synch_base, timeout, XN_RELATIVE);
 
-               if (xnthread_test_info(thread, XNRMID))
+               if (xnthread_test_info(curr, XNRMID))
                        err = -EIDRM;   /* Interrupt object deleted while 
pending. */
-               else if (xnthread_test_info(thread, XNTIMEO))
+               else if (xnthread_test_info(curr, XNTIMEO))
                        err = -ETIMEDOUT;       /* Timeout. */
-               else if (xnthread_test_info(thread, XNBREAK))
+               else if (xnthread_test_info(curr, XNBREAK))
                        err = -EINTR;   /* Unblocked. */
                else {
                        err = intr->pending;
@@ -2416,12 +2416,12 @@ static int __select(struct pt_regs *regs)
        xntmode_t mode = XN_RELATIVE;
        struct xnselector *selector;
        struct timeval tv;
-       xnthread_t *thread;
+       xnthread_t *curr;
        int i, err, nfds;
        size_t fds_size;
 
-       thread = xnpod_current_thread();
-       if (!thread)
+       curr = xnpod_current_thread();
+       if (!curr)
                return -EPERM;
 
        if (__xn_reg_arg5(regs)) {
@@ -2453,7 +2453,7 @@ static int __select(struct pt_regs *regs)
                                return -EFAULT;
                }
 
-       selector = thread->selector;
+       selector = curr->selector;
        if (!selector) {
                /* This function may be called from pure Linux fd_sets, we want
                   to avoid the xnselector allocation in this case, so, we do a
@@ -2462,10 +2462,10 @@ static int __select(struct pt_regs *regs)
                if (!first_fd_valid_p(in_fds, nfds))
                        return -EBADF;
 
-               if (!(selector = xnmalloc(sizeof(*thread->selector))))
+               if (!(selector = xnmalloc(sizeof(*curr->selector))))
                        return -ENOMEM;
                xnselector_init(selector);
-               thread->selector = selector;
+               curr->selector = selector;
 
                /* Bind directly the file descriptors, we do not need to go
                   through xnselect returning -ECHRNG */
diff --git a/ksrc/skins/posix/thread.c b/ksrc/skins/posix/thread.c
index 78f5634..f1a96f7 100644
--- a/ksrc/skins/posix/thread.c
+++ b/ksrc/skins/posix/thread.c
@@ -620,16 +620,16 @@ int pthread_make_periodic_np(pthread_t thread,
  */
 int pthread_wait_np(unsigned long *overruns_r)
 {
-       xnthread_t *cur;
+       xnthread_t *curr;
        int err;
 
        if (xnpod_unblockable_p())
                return EPERM;
 
-       cur = xnpod_current_thread();
-       thread_cancellation_point(cur);
+       curr = xnpod_current_thread();
+       thread_cancellation_point(curr);
        err = -xnpod_wait_thread_period(overruns_r);
-       thread_cancellation_point(cur);
+       thread_cancellation_point(curr);
 
        return err;
 }
@@ -667,11 +667,11 @@ int pthread_wait_np(unsigned long *overruns_r)
  */
 int pthread_set_mode_np(int clrmask, int setmask)
 {
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        xnflags_t valid_flags = XNLOCK;
 
 #ifdef CONFIG_XENO_OPT_PERVASIVE
-       if (xnthread_test_state(cur, XNSHADOW))
+       if (xnthread_test_state(curr, XNSHADOW))
                valid_flags |= XNTHREAD_STATE_SPARE1 | XNTRAPSW | XNRPIOFF;
 #endif /* CONFIG_XENO_OPT_PERVASIVE */
 
@@ -680,7 +680,7 @@ int pthread_set_mode_np(int clrmask, int setmask)
        if ((clrmask & ~valid_flags) != 0 || (setmask & ~valid_flags) != 0)
                return EINVAL;
 
-       xnpod_set_thread_mode(cur,
+       xnpod_set_thread_mode(curr,
                              clrmask & ~XNTHREAD_STATE_SPARE1,
                              setmask & ~XNTHREAD_STATE_SPARE1);
 
@@ -689,7 +689,7 @@ int pthread_set_mode_np(int clrmask, int setmask)
                xnpod_schedule();
 
 #ifdef CONFIG_XENO_OPT_PERVASIVE
-       if (xnthread_test_state(cur, XNSHADOW) && (clrmask & 
XNTHREAD_STATE_SPARE1) != 0)
+       if (xnthread_test_state(curr, XNSHADOW) && (clrmask & 
XNTHREAD_STATE_SPARE1) != 0)
                xnshadow_relax(0, 0);
 #endif /* CONFIG_XENO_OPT_PERVASIVE */
 
diff --git a/ksrc/skins/rtdm/drvlib.c b/ksrc/skins/rtdm/drvlib.c
index ba3e78c..1320cf0 100644
--- a/ksrc/skins/rtdm/drvlib.c
+++ b/ksrc/skins/rtdm/drvlib.c
@@ -387,15 +387,15 @@ int rtdm_task_sleep_abs(nanosecs_abs_t wakeup_time, enum 
rtdm_timer_mode mode);
 
 int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode)
 {
-       xnthread_t *thread = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
 
        XENO_ASSERT(RTDM, !xnpod_unblockable_p(), return -EPERM;);
 
-       xnpod_suspend_thread(thread, XNDELAY,
-                            xntbase_ns2ticks_ceil(xnthread_time_base(thread),
+       xnpod_suspend_thread(curr, XNDELAY,
+                            xntbase_ns2ticks_ceil(xnthread_time_base(curr),
                                                   timeout), mode, NULL);
 
-       return xnthread_test_info(thread, XNBREAK) ? -EINTR : 0;
+       return xnthread_test_info(curr, XNBREAK) ? -EINTR : 0;
 }
 
 EXPORT_SYMBOL_GPL(__rtdm_task_sleep);
@@ -944,7 +944,7 @@ EXPORT_SYMBOL_GPL(rtdm_event_wait);
 int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
                         rtdm_toseq_t *timeout_seq)
 {
-       xnthread_t *thread;
+       xnthread_t *curr;
        spl_t s;
        int err = 0;
 
@@ -970,7 +970,7 @@ int rtdm_event_timedwait(rtdm_event_t *event, 
nanosecs_rel_t timeout,
                        goto unlock_out;
                }
 
-               thread = xnpod_current_thread();
+               curr = xnpod_current_thread();
 
                if (timeout_seq && (timeout > 0)) {
                        /* timeout sequence */
@@ -980,18 +980,18 @@ int rtdm_event_timedwait(rtdm_event_t *event, 
nanosecs_rel_t timeout,
                        /* infinite or relative timeout */
                        xnsynch_sleep_on(&event->synch_base,
                                         xntbase_ns2ticks_ceil
-                                        (xnthread_time_base(thread), timeout),
+                                        (xnthread_time_base(curr), timeout),
                                         XN_RELATIVE);
                }
 
                if (likely
-                   (!xnthread_test_info(thread, XNTIMEO | XNRMID | XNBREAK))) {
+                   (!xnthread_test_info(curr, XNTIMEO | XNRMID | XNBREAK))) {
                        xnsynch_clear_flags(&event->synch_base,
                                            RTDM_EVENT_PENDING);
                        xnselect_signal(&event->select_block, 0);
-               } else if (xnthread_test_info(thread, XNTIMEO))
+               } else if (xnthread_test_info(curr, XNTIMEO))
                        err = -ETIMEDOUT;
-               else if (xnthread_test_info(thread, XNRMID))
+               else if (xnthread_test_info(curr, XNRMID))
                        err = -EIDRM;
                else /* XNBREAK */
                        err = -EINTR;
@@ -1229,7 +1229,7 @@ EXPORT_SYMBOL_GPL(rtdm_sem_down);
 int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
                       rtdm_toseq_t *timeout_seq)
 {
-       xnthread_t *thread;
+       xnthread_t *curr;
        spl_t s;
        int err = 0;
 
@@ -1249,7 +1249,7 @@ int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t 
timeout,
        } else if (timeout < 0) /* non-blocking mode */
                err = -EWOULDBLOCK;
        else {
-               thread = xnpod_current_thread();
+               curr = xnpod_current_thread();
 
                if (timeout_seq && (timeout > 0)) {
                        /* timeout sequence */
@@ -1259,14 +1259,14 @@ int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t 
timeout,
                        /* infinite or relative timeout */
                        xnsynch_sleep_on(&sem->synch_base,
                                         xntbase_ns2ticks_ceil
-                                        (xnthread_time_base(thread), timeout),
+                                        (xnthread_time_base(curr), timeout),
                                         XN_RELATIVE);
                }
 
-               if (xnthread_test_info(thread, XNTIMEO | XNRMID | XNBREAK)) {
-                       if (xnthread_test_info(thread, XNTIMEO))
+               if (xnthread_test_info(curr, XNTIMEO | XNRMID | XNBREAK)) {
+                       if (xnthread_test_info(curr, XNTIMEO))
                                err = -ETIMEDOUT;
-                       else if (xnthread_test_info(thread, XNRMID))
+                       else if (xnthread_test_info(curr, XNRMID))
                                err = -EIDRM;
                        else /* XNBREAK */
                                err = -EINTR;
@@ -1523,7 +1523,7 @@ EXPORT_SYMBOL_GPL(rtdm_mutex_lock);
 int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
                         rtdm_toseq_t *timeout_seq)
 {
-       xnthread_t *curr_thread = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        spl_t s;
        int err = 0;
 
@@ -1538,10 +1538,10 @@ int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, 
nanosecs_rel_t timeout,
        if (unlikely(xnsynch_test_flags(&mutex->synch_base,
                                        RTDM_SYNCH_DELETED)))
                err = -EIDRM;
-       else if (!xnthread_try_grab(curr_thread, &mutex->synch_base)) {
+       else if (!xnthread_try_grab(curr, &mutex->synch_base)) {
                /* Redefinition to clarify XENO_ASSERT output */
                #define mutex_owner xnsynch_owner(&mutex->synch_base)
-               XENO_ASSERT(RTDM, mutex_owner != curr_thread,
+               XENO_ASSERT(RTDM, mutex_owner != curr,
                            err = -EDEADLK; goto unlock_out;);
 
                /* non-blocking mode */
@@ -1559,15 +1559,15 @@ restart:
                        /* infinite or relative timeout */
                        xnsynch_acquire(&mutex->synch_base,
                                        xntbase_ns2ticks_ceil
-                                       (xnthread_time_base(curr_thread),
+                                       (xnthread_time_base(curr),
                                         timeout), XN_RELATIVE);
                }
 
-               if (unlikely(xnthread_test_info(curr_thread,
+               if (unlikely(xnthread_test_info(curr,
                                                XNTIMEO | XNRMID | XNBREAK))) {
-                       if (xnthread_test_info(curr_thread, XNTIMEO))
+                       if (xnthread_test_info(curr, XNTIMEO))
                                err = -ETIMEDOUT;
-                       else if (xnthread_test_info(curr_thread, XNRMID))
+                       else if (xnthread_test_info(curr, XNRMID))
                                err = -EIDRM;
                        else /*  XNBREAK */
                                goto restart;
diff --git a/ksrc/skins/vrtx/mx.c b/ksrc/skins/vrtx/mx.c
index f40febb..9d3322b 100644
--- a/ksrc/skins/vrtx/mx.c
+++ b/ksrc/skins/vrtx/mx.c
@@ -204,7 +204,7 @@ int sc_mcreate(unsigned int opt, int *errp)
 
 void sc_mpost(int mid, int *errp)
 {
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        vrtxmx_t *mx;
        spl_t s;
 
@@ -212,7 +212,7 @@ void sc_mpost(int mid, int *errp)
 
        mx = xnmap_fetch(vrtx_mx_idmap, mid);
        /* Return ER_ID if the poster does not own the mutex. */
-       if (mx == NULL || xnsynch_owner(&mx->synchbase) != cur) {
+       if (mx == NULL || xnsynch_owner(&mx->synchbase) != curr) {
                *errp = ER_ID;
                goto unlock_and_exit;
        }
@@ -264,7 +264,7 @@ unlock_and_exit:
 
 void sc_mpend(int mid, unsigned long timeout, int *errp)
 {
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        vrtxtask_t *task;
        vrtxmx_t *mx;
        spl_t s;
@@ -284,13 +284,13 @@ void sc_mpend(int mid, unsigned long timeout, int *errp)
 
        *errp = RET_OK;
 
-       if (xnthread_try_grab(cur, &mx->synchbase))
+       if (xnthread_try_grab(curr, &mx->synchbase))
                goto unlock_and_exit;
 
-       if (xnsynch_owner(&mx->synchbase) == cur)
+       if (xnsynch_owner(&mx->synchbase) == curr)
                goto unlock_and_exit;
 
-       task = thread2vrtxtask(cur);
+       task = thread2vrtxtask(curr);
        task->vrtxtcb.TCBSTAT = TBSMUTEX;
 
        if (timeout)
@@ -298,11 +298,11 @@ void sc_mpend(int mid, unsigned long timeout, int *errp)
 
        xnsynch_acquire(&mx->synchbase, timeout, XN_RELATIVE);
 
-       if (xnthread_test_info(cur, XNBREAK))
+       if (xnthread_test_info(curr, XNBREAK))
                *errp = -EINTR;
-       else if (xnthread_test_info(cur, XNRMID))
+       else if (xnthread_test_info(curr, XNRMID))
                *errp = ER_DEL; /* Mutex deleted while pending. */
-       else if (xnthread_test_info(cur, XNTIMEO))
+       else if (xnthread_test_info(curr, XNTIMEO))
                *errp = ER_TMO; /* Timeout. */
 
       unlock_and_exit:
diff --git a/ksrc/skins/vxworks/semLib.c b/ksrc/skins/vxworks/semLib.c
index 06b5068..51b60c5 100644
--- a/ksrc/skins/vxworks/semLib.c
+++ b/ksrc/skins/vxworks/semLib.c
@@ -307,7 +307,7 @@ STATUS semFlush(SEM_ID sem_id)
 /* Must be called with nklock locked, interrupts off. */
 static STATUS semb_take(wind_sem_t *sem, xnticks_t to)
 {
-       xnthread_t *thread = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
 
        if (sem->count > 0)
                --sem->count;
@@ -317,13 +317,13 @@ static STATUS semb_take(wind_sem_t *sem, xnticks_t to)
 
                xnsynch_sleep_on(&sem->synchbase, to, XN_RELATIVE);
 
-               error_check(xnthread_test_info(thread, XNBREAK), -EINTR,
+               error_check(xnthread_test_info(curr, XNBREAK), -EINTR,
                            return ERROR);
 
-               error_check(xnthread_test_info(thread, XNRMID),
+               error_check(xnthread_test_info(curr, XNRMID),
                            S_objLib_OBJ_DELETED, return ERROR);
 
-               error_check(xnthread_test_info(thread, XNTIMEO),
+               error_check(xnthread_test_info(curr, XNTIMEO),
                            S_objLib_OBJ_TIMEOUT, return ERROR);
        }
 
@@ -383,12 +383,12 @@ static const sem_vtbl_t semc_vtbl = {
 /* Must be called with nklock locked, interrupts off. */
 static STATUS semm_take(wind_sem_t *sem, xnticks_t to)
 {
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
 
-       if (xnthread_try_grab(cur, &sem->synchbase))
+       if (xnthread_try_grab(curr, &sem->synchbase))
                goto grab_sem;
 
-       if (xnsynch_owner(&sem->synchbase) == cur) {
+       if (xnsynch_owner(&sem->synchbase) == curr) {
                sem->count++;
                return OK;
        }
@@ -398,13 +398,13 @@ static STATUS semm_take(wind_sem_t *sem, xnticks_t to)
 
        xnsynch_acquire(&sem->synchbase, to, XN_RELATIVE);
 
-       error_check(xnthread_test_info(cur, XNBREAK),
+       error_check(xnthread_test_info(curr, XNBREAK),
                    -EINTR, return ERROR);
 
-       error_check(xnthread_test_info(cur, XNRMID),
+       error_check(xnthread_test_info(curr, XNRMID),
                    S_objLib_OBJ_DELETED, return ERROR);
 
-       error_check(xnthread_test_info(cur, XNTIMEO),
+       error_check(xnthread_test_info(curr, XNTIMEO),
                    S_objLib_OBJ_TIMEOUT, return ERROR);
  grab_sem:
        /*
@@ -414,7 +414,7 @@ static STATUS semm_take(wind_sem_t *sem, xnticks_t to)
        sem->count = 1;
 
        if (xnsynch_test_flags(&sem->synchbase, WIND_SEM_DEL_SAFE))
-               taskSafeInner(cur);
+               taskSafeInner(curr);
 
        return OK;
 }
@@ -422,12 +422,12 @@ static STATUS semm_take(wind_sem_t *sem, xnticks_t to)
 /* Must be called with nklock locked, interrupts off. */
 static STATUS semm_give(wind_sem_t *sem)
 {
-       xnthread_t *cur = xnpod_current_thread();
+       xnthread_t *curr = xnpod_current_thread();
        int resched = 0;
 
        check_NOT_ISR_CALLABLE(return ERROR);
 
-       if (cur != xnsynch_owner(&sem->synchbase)) {
+       if (curr != xnsynch_owner(&sem->synchbase)) {
                wind_errnoset(S_semLib_INVALID_OPERATION);
                return ERROR;
        }
@@ -441,7 +441,7 @@ static STATUS semm_give(wind_sem_t *sem)
        }
 
        if (xnsynch_test_flags(&sem->synchbase, WIND_SEM_DEL_SAFE))
-               if (taskUnsafeInner(cur))
+               if (taskUnsafeInner(curr))
                        resched = 1;
 
        if (resched)
diff --git a/ksrc/skins/vxworks/syscall.c b/ksrc/skins/vxworks/syscall.c
index 3b26bdd..7c1e158 100644
--- a/ksrc/skins/vxworks/syscall.c
+++ b/ksrc/skins/vxworks/syscall.c
@@ -347,7 +347,7 @@ static int __wind_task_unlock(struct pt_regs *regs)
 static int __wind_task_safe(struct pt_regs *regs)
 {
        xnhandle_t handle = __xn_reg_arg1(regs);
-       xnthread_t *thread;
+       xnthread_t *curr;
        WIND_TCB *pTcb;
        spl_t s;
 
@@ -359,11 +359,11 @@ static int __wind_task_safe(struct pt_regs *regs)
                        xnlock_put_irqrestore(&nklock, s);
                        return S_objLib_OBJ_ID_ERROR;
                }
-               thread = &pTcb->threadbase;
+               curr = &pTcb->threadbase;
        } else
-               thread = xnpod_current_thread();
+               curr = xnpod_current_thread();
 
-       taskSafeInner(thread);
+       taskSafeInner(curr);
        xnlock_put_irqrestore(&nklock, s);
 
        return 0;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to