Module: xenomai-forge
Branch: next
Commit: 3b09ec2b4dec367a02b68f03f80eb2b606b06802
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=3b09ec2b4dec367a02b68f03f80eb2b606b06802

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu Apr 24 14:39:27 2014 +0200

cobalt/kernel: retrieve current TCB using xnshadow_current() when unambiguous

Unlike xnsched_current_thread(), xnshadow_current() does not involve
accessing protected per-cpu data on SMP. So prefer it when the context
is obviously primary, which guarantees that xnsched_current_thread()
== xnshadow_current().

---

 kernel/cobalt/posix/clock.c   |    2 +-
 kernel/cobalt/posix/monitor.c |   29 +++++++++++++++++------------
 kernel/cobalt/posix/mutex.c   |   24 +++++++++++++-----------
 kernel/cobalt/posix/select.c  |   12 +++++-------
 kernel/cobalt/posix/signal.c  |    2 +-
 kernel/cobalt/posix/thread.c  |    4 ++--
 kernel/cobalt/registry.c      |    3 +--
 kernel/cobalt/rtdm/drvlib.c   |   20 +++++++++++---------
 kernel/cobalt/select.c        |   15 ++++++---------
 kernel/cobalt/shadow.c        |   26 +++++++++++++++++---------
 kernel/cobalt/synch.c         |   18 ++++++++++++++----
 kernel/cobalt/thread.c        |    8 ++++----
 12 files changed, 92 insertions(+), 71 deletions(-)

diff --git a/kernel/cobalt/posix/clock.c b/kernel/cobalt/posix/clock.c
index f98592c..68383a7 100644
--- a/kernel/cobalt/posix/clock.c
+++ b/kernel/cobalt/posix/clock.c
@@ -270,7 +270,7 @@ int cobalt_clock_nanosleep(clockid_t clock_id, int flags,
        if (flags & ~TIMER_ABSTIME)
                return -EINVAL;
 
-       cur = xnsched_current_thread();
+       cur = xnshadow_current();
 
        xnlock_get_irqsave(&nklock, s);
 
diff --git a/kernel/cobalt/posix/monitor.c b/kernel/cobalt/posix/monitor.c
index c6b982e..5507829 100644
--- a/kernel/cobalt/posix/monitor.c
+++ b/kernel/cobalt/posix/monitor.c
@@ -114,9 +114,8 @@ int cobalt_monitor_init(struct cobalt_monitor_shadow __user 
*u_mon,
 }
 
 /* nklock held, irqs off */
-static int cobalt_monitor_enter_inner(xnhandle_t handle)
+static int cobalt_monitor_enter_inner(xnhandle_t handle, struct xnthread *curr)
 {
-       struct xnthread *cur = xnsched_current_thread();
        struct cobalt_monitor *mon;
        int ret = 0, info;
 
@@ -131,11 +130,11 @@ static int cobalt_monitor_enter_inner(xnhandle_t handle)
         *
         * NOTE: monitors do not support recursive entries.
         */
-       ret = xnsynch_fast_acquire(mon->gate.fastlock, xnthread_handle(cur));
+       ret = xnsynch_fast_acquire(mon->gate.fastlock, xnthread_handle(curr));
        switch(ret) {
        case 0:
-               if (xnthread_test_state(cur, XNWEAK))
-                       xnthread_inc_rescnt(cur);
+               if (xnthread_test_state(curr, XNWEAK))
+                       xnthread_inc_rescnt(curr);
                break;
        default:
                /* Nah, we really have to wait. */
@@ -154,6 +153,7 @@ static int cobalt_monitor_enter_inner(xnhandle_t handle)
 
 int cobalt_monitor_enter(struct cobalt_monitor_shadow __user *u_mon)
 {
+       struct xnthread *curr = xnshadow_current();
        xnhandle_t handle;
        int ret;
        spl_t s;
@@ -161,7 +161,7 @@ int cobalt_monitor_enter(struct cobalt_monitor_shadow 
__user *u_mon)
        handle = cobalt_get_handle_from_user(&u_mon->handle);
 
        xnlock_get_irqsave(&nklock, s);
-       ret = cobalt_monitor_enter_inner(handle);
+       ret = cobalt_monitor_enter_inner(handle, curr);
        xnlock_put_irqrestore(&nklock, s);
 
        return ret;
@@ -301,7 +301,7 @@ int cobalt_monitor_wait(struct cobalt_monitor_shadow __user 
*u_mon,
                        opret = -ETIMEDOUT;
        }
 
-       ret = cobalt_monitor_enter_inner(handle);
+       ret = cobalt_monitor_enter_inner(handle, &curr->threadbase);
 out:
        xnlock_put_irqrestore(&nklock, s);
 
@@ -313,11 +313,13 @@ out:
 int cobalt_monitor_sync(struct cobalt_monitor_shadow __user *u_mon)
 {
        struct cobalt_monitor *mon;
+       struct xnthread *curr;
        xnhandle_t handle;
        int ret = 0;
        spl_t s;
 
        handle = cobalt_get_handle_from_user(&u_mon->handle);
+       curr = xnshadow_current();
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -326,9 +328,9 @@ int cobalt_monitor_sync(struct cobalt_monitor_shadow __user 
*u_mon)
                ret = -EINVAL;
        else if (mon->data->flags & COBALT_MONITOR_SIGNALED) {
                cobalt_monitor_wakeup(mon);
-               xnsynch_release(&mon->gate, xnsched_current_thread());
+               xnsynch_release(&mon->gate, curr);
                xnsched_run();
-               ret = cobalt_monitor_enter_inner(handle);
+               ret = cobalt_monitor_enter_inner(handle, curr);
        }
 
        xnlock_put_irqrestore(&nklock, s);
@@ -339,11 +341,13 @@ int cobalt_monitor_sync(struct cobalt_monitor_shadow 
__user *u_mon)
 int cobalt_monitor_exit(struct cobalt_monitor_shadow __user *u_mon)
 {
        struct cobalt_monitor *mon;
+       struct xnthread *curr;
        xnhandle_t handle;
        int ret = 0;
        spl_t s;
 
        handle = cobalt_get_handle_from_user(&u_mon->handle);
+       curr = xnshadow_current();
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -354,7 +358,7 @@ int cobalt_monitor_exit(struct cobalt_monitor_shadow __user 
*u_mon)
                if (mon->data->flags & COBALT_MONITOR_SIGNALED)
                        cobalt_monitor_wakeup(mon);
 
-               xnsynch_release(&mon->gate, xnsched_current_thread());
+               xnsynch_release(&mon->gate, curr);
                xnsched_run();
        }
 
@@ -385,13 +389,14 @@ static void cobalt_monitor_destroy_inner(struct 
cobalt_monitor *mon,
 
 int cobalt_monitor_destroy(struct cobalt_monitor_shadow __user *u_mon)
 {
-       struct xnthread *cur = xnsched_current_thread();
        struct cobalt_monitor *mon;
+       struct xnthread *curr;
        xnhandle_t handle;
        int ret = 0;
        spl_t s;
 
        handle = cobalt_get_handle_from_user(&u_mon->handle);
+       curr = xnshadow_current();
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -410,7 +415,7 @@ int cobalt_monitor_destroy(struct cobalt_monitor_shadow 
__user *u_mon)
         * A monitor must be destroyed by the thread currently holding
         * its gate lock.
         */
-       if (xnsynch_owner_check(&mon->gate, cur)) {
+       if (xnsynch_owner_check(&mon->gate, curr)) {
                ret = -EPERM;
                goto fail;
        }
diff --git a/kernel/cobalt/posix/mutex.c b/kernel/cobalt/posix/mutex.c
index 11f6c78..4015d23 100644
--- a/kernel/cobalt/posix/mutex.c
+++ b/kernel/cobalt/posix/mutex.c
@@ -216,14 +216,14 @@ static inline
 int cobalt_mutex_timedlock_break(struct cobalt_mutex *mutex,
                                 int timed, const struct timespec __user *u_ts)
 {
-       struct xnthread *cur = xnsched_current_thread();
+       struct xnthread *curr = xnshadow_current();
        int ret;
 
        /* We need a valid thread handle for the fast lock. */
-       if (xnthread_handle(cur) == XN_NO_HANDLE)
+       if (xnthread_handle(curr) == XN_NO_HANDLE)
                return -EPERM;
 
-       ret = cobalt_mutex_acquire(cur, mutex, timed, u_ts);
+       ret = cobalt_mutex_acquire(curr, mutex, timed, u_ts);
        if (ret != -EBUSY)
                return ret;
 
@@ -233,9 +233,9 @@ int cobalt_mutex_timedlock_break(struct cobalt_mutex *mutex,
 #if XENO_DEBUG(COBALT)
                printk(XENO_WARN
                       "thread %s deadlocks on non-recursive mutex\n",
-                      cur->name);
+                      curr->name);
 #endif /* XENO_DEBUG(COBALT) */
-               cobalt_mutex_acquire_unchecked(cur, mutex, timed, u_ts);
+               cobalt_mutex_acquire_unchecked(curr, mutex, timed, u_ts);
                break;
 
                /* Recursive mutexes are handled in user-space, so
@@ -359,7 +359,7 @@ int cobalt_mutex_destroy(struct cobalt_mutex_shadow __user 
*u_mx)
 
 int cobalt_mutex_trylock(struct cobalt_mutex_shadow __user *u_mx)
 {
-       struct xnthread *cur = xnsched_current_thread();
+       struct xnthread *curr = xnshadow_current();
        struct cobalt_mutex *mutex;
        xnhandle_t handle;
        spl_t s;
@@ -375,11 +375,11 @@ int cobalt_mutex_trylock(struct cobalt_mutex_shadow 
__user *u_mx)
        }
 
        err = xnsynch_fast_acquire(mutex->synchbase.fastlock,
-                                  xnthread_handle(cur));
+                                  xnthread_handle(curr));
        switch(err) {
        case 0:
-               if (xnthread_test_state(cur, XNWEAK))
-                       xnthread_inc_rescnt(cur);
+               if (xnthread_test_state(curr, XNWEAK))
+                       xnthread_inc_rescnt(curr);
                break;
 
 /* This should not happen, as recursive mutexes are handled in
@@ -434,15 +434,17 @@ int cobalt_mutex_timedlock(struct cobalt_mutex_shadow 
__user *u_mx,
 int cobalt_mutex_unlock(struct cobalt_mutex_shadow __user *u_mx)
 {
        struct cobalt_mutex *mutex;
+       struct xnthread *curr;
        xnhandle_t handle;
        int err;
        spl_t s;
 
        handle = cobalt_get_handle_from_user(&u_mx->handle);
+       curr = xnshadow_current();
 
        xnlock_get_irqsave(&nklock, s);
        mutex = xnregistry_lookup(handle, NULL);
-       err = cobalt_mutex_release(xnsched_current_thread(), mutex);
+       err = cobalt_mutex_release(curr, mutex);
        if (err < 0)
                goto out;
 
@@ -450,7 +452,7 @@ int cobalt_mutex_unlock(struct cobalt_mutex_shadow __user 
*u_mx)
                xnsched_run();
                err = 0;
        }
-  out:
+ out:
        xnlock_put_irqrestore(&nklock, s);
 
        return err;
diff --git a/kernel/cobalt/posix/select.c b/kernel/cobalt/posix/select.c
index e88439a..a477c60 100644
--- a/kernel/cobalt/posix/select.c
+++ b/kernel/cobalt/posix/select.c
@@ -125,14 +125,12 @@ int cobalt_select(int nfds,
        xnticks_t timeout = XN_INFINITE;
        xntmode_t mode = XN_RELATIVE;
        struct xnselector *selector;
-       struct xnthread *thread;
+       struct xnthread *curr;
        struct timeval tv;
        size_t fds_size;
        int i, err;
 
-       thread = xnsched_current_thread();
-       if (!thread)
-               return -EPERM;
+       curr = xnshadow_current();
 
        if (u_tv) {
                if (!access_wok(u_tv, sizeof(tv))
@@ -160,7 +158,7 @@ int cobalt_select(int nfds,
                                return -EFAULT;
                }
 
-       selector = thread->selector;
+       selector = curr->selector;
        if (!selector) {
                /* This function may be called from pure Linux fd_sets, we want
                   to avoid the xnselector allocation in this case, so, we do a
@@ -169,11 +167,11 @@ int cobalt_select(int nfds,
                if (!first_fd_valid_p(in_fds, nfds))
                        return -EBADF;
 
-               selector = xnmalloc(sizeof(*thread->selector));
+               selector = xnmalloc(sizeof(*curr->selector));
                if (selector == NULL)
                        return -ENOMEM;
                xnselector_init(selector);
-               thread->selector = selector;
+               curr->selector = selector;
 
                /* Bind directly the file descriptors, we do not need to go
                   through xnselect returning -ECHRNG */
diff --git a/kernel/cobalt/posix/signal.c b/kernel/cobalt/posix/signal.c
index 712f073..93a1dda 100644
--- a/kernel/cobalt/posix/signal.c
+++ b/kernel/cobalt/posix/signal.c
@@ -435,7 +435,7 @@ int __cobalt_kill(struct cobalt_thread *thread, int sig, 
int group) /* nklocked,
                 */
                xnthread_suspend(&thread->threadbase, XNSUSP,
                                 XN_INFINITE, XN_RELATIVE, NULL);
-               if (&thread->threadbase == xnsched_current_thread() &&
+               if (&thread->threadbase == xnshadow_current() &&
                    xnthread_test_info(&thread->threadbase, XNBREAK))
                        ret = EINTR;
                break;
diff --git a/kernel/cobalt/posix/thread.c b/kernel/cobalt/posix/thread.c
index 3ab3879..83f58d0 100644
--- a/kernel/cobalt/posix/thread.c
+++ b/kernel/cobalt/posix/thread.c
@@ -780,7 +780,7 @@ static inline int pthread_make_periodic_np(struct 
cobalt_thread *thread,
 static inline int pthread_set_mode_np(int clrmask, int setmask, int *mode_r)
 {
        const int valid_flags = XNLOCK|XNTRAPSW|XNTRAPLB;
-       struct xnthread *cur = xnsched_current_thread();
+       struct xnthread *curr = xnshadow_current();
        int old;
 
        /*
@@ -790,7 +790,7 @@ static inline int pthread_set_mode_np(int clrmask, int 
setmask, int *mode_r)
        if ((clrmask & ~valid_flags) != 0 || (setmask & ~valid_flags) != 0)
                return -EINVAL;
 
-       old = xnthread_set_mode(cur, clrmask, setmask);
+       old = xnthread_set_mode(curr, clrmask, setmask);
        if (mode_r)
                *mode_r = old;
 
diff --git a/kernel/cobalt/registry.c b/kernel/cobalt/registry.c
index 9c84d7c..eaa11e9 100644
--- a/kernel/cobalt/registry.c
+++ b/kernel/cobalt/registry.c
@@ -742,8 +742,6 @@ int xnregistry_bind(const char *key, xnticks_t timeout, int 
timeout_mode,
        if (key == NULL)
                return -EINVAL;
 
-       thread = xnsched_current_thread();
-
        xnlock_get_irqsave(&nklock, s);
 
        if (timeout_mode == XN_RELATIVE &&
@@ -765,6 +763,7 @@ int xnregistry_bind(const char *key, xnticks_t timeout, int 
timeout_mode,
                        goto unlock_and_exit;
                }
 
+               thread = xnshadow_current();
                thread->registry.waitkey = key;
                info = xnsynch_sleep_on(&register_synch, timeout, timeout_mode);
                if (info & XNTIMEO) {
diff --git a/kernel/cobalt/rtdm/drvlib.c b/kernel/cobalt/rtdm/drvlib.c
index 714d69a..f197f07 100644
--- a/kernel/cobalt/rtdm/drvlib.c
+++ b/kernel/cobalt/rtdm/drvlib.c
@@ -402,11 +402,12 @@ int rtdm_task_sleep_abs(nanosecs_abs_t wakeup_time, enum 
rtdm_timer_mode mode);
 
 int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode)
 {
-       struct xnthread *thread = xnsched_current_thread();
+       struct xnthread *thread;
 
        if (!XENO_ASSERT(RTDM, !xnsched_unblockable_p()))
                return -EPERM;
 
+       thread = xnshadow_current();
        xnthread_suspend(thread, XNDELAY, timeout, mode, NULL);
 
        return xnthread_test_info(thread, XNBREAK) ? -EINTR : 0;
@@ -951,7 +952,7 @@ int rtdm_event_timedwait(rtdm_event_t *event, 
nanosecs_rel_t timeout,
        if (!XENO_ASSERT(RTDM, !xnsched_unblockable_p()))
                return -EPERM;
 
-       trace_cobalt_driver_event_wait(event, xnsched_current_thread());
+       trace_cobalt_driver_event_wait(event, xnshadow_current());
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -967,7 +968,7 @@ int rtdm_event_timedwait(rtdm_event_t *event, 
nanosecs_rel_t timeout,
                        goto unlock_out;
                }
 
-               thread = xnsched_current_thread();
+               thread = xnshadow_current();
 
                if (timeout_seq && (timeout > 0)) {
                        /* timeout sequence */
@@ -1228,7 +1229,7 @@ int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t 
timeout,
        if (!XENO_ASSERT(RTDM, !xnsched_unblockable_p()))
                return -EPERM;
 
-       trace_cobalt_driver_sem_wait(sem, xnsched_current_thread());
+       trace_cobalt_driver_sem_wait(sem, xnshadow_current());
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -1240,7 +1241,7 @@ int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t 
timeout,
        } else if (timeout < 0) /* non-blocking mode */
                err = -EWOULDBLOCK;
        else {
-               thread = xnsched_current_thread();
+               thread = xnshadow_current();
 
                if (timeout_seq && (timeout > 0)) {
                        /* timeout sequence */
@@ -1508,15 +1509,16 @@ EXPORT_SYMBOL_GPL(rtdm_mutex_lock);
 int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
                         rtdm_toseq_t *timeout_seq)
 {
-       struct xnthread *curr_thread = xnsched_current_thread();
-       spl_t s;
+       struct xnthread *curr_thread;
        int err = 0;
-
-       trace_cobalt_driver_mutex_wait(mutex, curr_thread);
+       spl_t s;
 
        if (!XENO_ASSERT(RTDM, !xnsched_unblockable_p()))
                return -EPERM;
 
+       curr_thread = xnshadow_current();
+       trace_cobalt_driver_mutex_wait(mutex, curr_thread);
+
        xnlock_get_irqsave(&nklock, s);
 
        if (unlikely(mutex->synch_base.status & RTDM_SYNCH_DELETED))
diff --git a/kernel/cobalt/select.c b/kernel/cobalt/select.c
index d7fedc7..43212bc 100644
--- a/kernel/cobalt/select.c
+++ b/kernel/cobalt/select.c
@@ -329,15 +329,13 @@ int xnselect(struct xnselector *selector,
             int nfds,
             xnticks_t timeout, xntmode_t timeout_mode)
 {
-       unsigned i, not_empty = 0;
-       struct xnthread *thread;
+       unsigned int i, not_empty = 0, count;
+       int info = 0;
        spl_t s;
 
        if ((unsigned) nfds > __FD_SETSIZE)
                return -EINVAL;
 
-       thread = xnsched_current_thread();
-
        for (i = 0; i < XNSELECT_MAX_TYPES; i++)
                if (out_fds[i])
                        fd_set_zeropad(out_fds[i], nfds);
@@ -361,7 +359,8 @@ int xnselect(struct xnselector *selector,
                        not_empty = 1;
 
        while (!not_empty) {
-               xnsynch_sleep_on(&selector->synchbase, timeout, timeout_mode);
+               info = xnsynch_sleep_on(&selector->synchbase,
+                                       timeout, timeout_mode);
 
                for (i = 0; i < XNSELECT_MAX_TYPES; i++)
                        if (out_fds[i]
@@ -369,14 +368,12 @@ int xnselect(struct xnselector *selector,
                                          &selector->fds[i].pending, nfds))
                                not_empty = 1;
 
-               if (xnthread_test_info(thread, XNBREAK | XNTIMEO))
+               if (info & (XNBREAK | XNTIMEO))
                        break;
        }
        xnlock_put_irqrestore(&nklock, s);
 
        if (not_empty) {
-               unsigned count;
-
                for (count = 0, i = 0; i < XNSELECT_MAX_TYPES; i++)
                        if (out_fds[i])
                                count += fd_set_popcount(out_fds[i], nfds);
@@ -384,7 +381,7 @@ int xnselect(struct xnselector *selector,
                return count;
        }
 
-       if (xnthread_test_info(thread, XNBREAK))
+       if (info & XNBREAK)
                return -EINTR;
 
        return 0; /* Timeout */
diff --git a/kernel/cobalt/shadow.c b/kernel/cobalt/shadow.c
index 1603aad..04652bf 100644
--- a/kernel/cobalt/shadow.c
+++ b/kernel/cobalt/shadow.c
@@ -174,10 +174,11 @@ static void *private_lookup(unsigned int muxid)
 {
        struct xnshadow_process *p = xnshadow_current_process();
 
-       if (p == NULL)
-               p = process_hash_search(current->mm);
-       if (p == NULL)
-               return NULL;
+       if (p == NULL) {
+               p = __process_hash_search(current->mm);
+               if (p == NULL)
+                       return NULL;
+       }
 
        return p->priv[muxid];
 }
@@ -526,7 +527,7 @@ EXPORT_SYMBOL_GPL(xnshadow_harden);
  */
 void xnshadow_relax(int notify, int reason)
 {
-       struct xnthread *thread = xnsched_current_thread();
+       struct xnthread *thread = xnshadow_current();
        struct task_struct *p = current;
        int cpu __maybe_unused;
        siginfo_t si;
@@ -1752,12 +1753,19 @@ EXPORT_SYMBOL_GPL(xnshadow_unregister_personality);
  */
 void *xnshadow_get_context(unsigned int muxid)
 {
-       struct xnthread *curr = xnsched_current_thread();
+       struct xnthread *curr;
+       void *context = NULL;
+       spl_t s;
 
-       if (xnthread_test_state(curr, XNROOT|XNUSER))
-               return private_lookup(muxid);
+       xnlock_get_irqsave(&nklock, s);
 
-       return NULL;
+       curr = xnsched_current_thread();
+       if (likely(xnthread_test_state(curr, XNROOT|XNUSER)))
+               context = private_lookup(muxid);
+
+       xnlock_put_irqrestore(&nklock, s);
+
+       return context;
 }
 EXPORT_SYMBOL_GPL(xnshadow_get_context);
 
diff --git a/kernel/cobalt/synch.c b/kernel/cobalt/synch.c
index c35beb9..e2a12dd 100644
--- a/kernel/cobalt/synch.c
+++ b/kernel/cobalt/synch.c
@@ -135,11 +135,15 @@ EXPORT_SYMBOL_GPL(xnsynch_init);
 int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout,
                     xntmode_t timeout_mode)
 {
-       struct xnthread *thread = xnsched_current_thread();
+       struct xnthread *thread;
        spl_t s;
 
+       primary_mode_only();
+
        XENO_BUGON(NUCLEUS, synch->status & XNSYNCH_OWNER);
 
+       thread = xnshadow_current();
+
        xnlock_get_irqsave(&nklock, s);
 
        trace_cobalt_synch_sleepon(synch, thread);
@@ -336,13 +340,19 @@ static void xnsynch_renice_thread(struct xnthread *thread,
 int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
                    xntmode_t timeout_mode)
 {
-       struct xnthread *thread = xnsched_current_thread(), *owner;
-       xnhandle_t threadh = xnthread_handle(thread), fastlock, old;
-       atomic_long_t *lockp = xnsynch_fastlock(synch);
+       xnhandle_t threadh, fastlock, old;
+       struct xnthread *thread, *owner;
+       atomic_long_t *lockp;
        spl_t s;
 
+       primary_mode_only();
+
        XENO_BUGON(NUCLEUS, (synch->status & XNSYNCH_OWNER) == 0);
 
+       thread = xnshadow_current();
+       threadh = xnthread_handle(thread);
+       lockp = xnsynch_fastlock(synch);
+
        trace_cobalt_synch_acquire(synch, thread);
 redo:
        fastlock = atomic_long_cmpxchg(lockp, XN_NO_HANDLE, threadh);
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 868aeca..1bff44d 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -350,7 +350,7 @@ EXPORT_SYMBOL_GPL(xnthread_get_period);
 
 void xnthread_prepare_wait(struct xnthread_wait_context *wc)
 {
-       struct xnthread *curr = xnsched_current_thread();
+       struct xnthread *curr = xnshadow_current();
 
        wc->posted = 0;
        curr->wcontext = wc;
@@ -1286,7 +1286,7 @@ int xnthread_wait_period(unsigned long *overruns_r)
        int ret = 0;
        spl_t s;
 
-       thread = xnsched_current_thread();
+       thread = xnshadow_current();
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -1525,7 +1525,7 @@ int xnthread_join(struct xnthread *thread, bool 
uninterruptible)
                return 0;
        }
 
-       if (thread == xnsched_current_thread())
+       if (thread == xnshadow_current())
                ret = -EDEADLK;
        else if (xnsynch_pended_p(&thread->join_synch))
                ret = -EBUSY;
@@ -1579,7 +1579,7 @@ int xnthread_migrate(int cpu)
                goto unlock_and_exit;
        }
 
-       thread = xnsched_current_thread();
+       thread = xnshadow_current();
        if (!cpu_isset(cpu, thread->affinity)) {
                ret = -EINVAL;
                goto unlock_and_exit;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to