Module: xenomai-head
Branch: master
Commit: 6653a9e8eb7339b749989bd74adc3ac3bd29e4da
URL:    
http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=6653a9e8eb7339b749989bd74adc3ac3bd29e4da

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sun Oct 10 18:26:38 2010 +0200

nucleus, posix, native: introduce auto-relax feature

The preferred runtime mode for non real-time shadows is relaxed. This
patch enforces this rule, by downgrading them to secondary mode
if necessary after each syscall, unless they own a resource
(e.g. anything based on a synchronization object with ownership
(xnsynch + PIP)).

---

 include/nucleus/thread.h   |   10 +++++++++-
 ksrc/nucleus/shadow.c      |   34 ++++++++++++++++++++++++++--------
 ksrc/nucleus/synch.c       |    9 ++++++++-
 ksrc/nucleus/thread.c      |    1 +
 src/skins/common/current.c |    2 +-
 src/skins/native/mutex.c   |   21 ++++++++++++++++++++-
 src/skins/posix/mutex.c    |   39 ++++++++++++++++++++++++++++++++-------
 7 files changed, 97 insertions(+), 19 deletions(-)

diff --git a/include/nucleus/thread.h b/include/nucleus/thread.h
index 725233f..d14f7c3 100644
--- a/include/nucleus/thread.h
+++ b/include/nucleus/thread.h
@@ -255,6 +255,8 @@ typedef struct xnthread {
        struct xnsynch *wchan;          /* Resource the thread pends on */
 
        struct xnsynch *wwake;          /* Wait channel the thread was resumed 
from */
+
+       int hrescnt;                    /* Held resources count */
        
        xntimer_t rtimer;               /* Resource timer */
 
@@ -393,7 +395,13 @@ typedef struct xnhook {
 #define xnthread_sigpending(thread) ((thread)->u_sigpending)
 #define xnthread_set_sigpending(thread, pending) \
        ((thread)->u_sigpending = (pending))
-#endif /* CONFIG_XENO_OPT_PERVASIVE */
+#define xnthread_inc_rescnt(thread)        ({ (thread)->hrescnt++; })
+#define xnthread_dec_rescnt(thread)        ({ --(thread)->hrescnt; })
+#define xnthread_get_rescnt(thread)        ((thread)->hrescnt)
+#else /* !CONFIG_XENO_OPT_PERVASIVE */
+#define xnthread_inc_rescnt(thread)        do { } while (0)
+#define xnthread_dec_rescnt(thread)        do { } while (0)
+#endif /* !CONFIG_XENO_OPT_PERVASIVE */
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 #define xnthread_amok_p(thread)            xnthread_test_info(thread, XNAMOK)
 #define xnthread_clear_amok(thread)        xnthread_clear_info(thread, XNAMOK)
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index b25423c..f6bea1a 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -2380,11 +2380,19 @@ static inline int do_hisyscall_event(unsigned event, 
unsigned domid, void *data)
        __xn_status_return(regs, err);
 
        sigs = 0;
-       if (xnpod_shadow_p() &&
-           (signal_pending(p) || xnthread_amok_p(thread))) {
-               sigs = 1;
-               xnthread_clear_amok(thread);
-               request_syscall_restart(thread, regs, sysflags);
+       if (xnpod_shadow_p()) {
+               if (signal_pending(p) || xnthread_amok_p(thread)) {
+                       sigs = 1;
+                       xnthread_clear_amok(thread);
+                       request_syscall_restart(thread, regs, sysflags);
+               } else if (!xnthread_sigpending(thread) &&
+                          xnthread_test_state(thread, XNOTHER) &&
+                          xnthread_get_rescnt(thread) == 0) {
+                       if (switched)
+                               switched = 0;
+                       else
+                               xnshadow_relax(0, 0);
+               }
        }
        if (thread && xnthread_sigpending(thread)) {
                sigs = 1;
@@ -2540,9 +2548,19 @@ static inline int do_losyscall_event(unsigned event, 
unsigned domid, void *data)
        __xn_status_return(regs, err);
 
        sigs = 0;
-       if (xnpod_active_p() && xnpod_shadow_p() && signal_pending(current)) {
-               sigs = 1;
-               request_syscall_restart(xnshadow_thread(current), regs, 
sysflags);
+       if (xnpod_active_p() && xnpod_shadow_p()) {
+               /*
+                * We may have gained a shadow TCB from the syscall we
+                * just invoked, so make sure to fetch it.
+                */
+               thread = xnshadow_thread(current);
+               if (signal_pending(current)) {
+                       sigs = 1;
+                       request_syscall_restart(thread, regs, sysflags);
+               } else if (!xnthread_sigpending(thread) &&
+                          xnthread_test_state(thread, XNOTHER) &&
+                          xnthread_get_rescnt(thread) == 0)
+                       sysflags |= __xn_exec_switchback;
        }
        if (thread && xnthread_sigpending(thread)) {
                sigs = 1;
diff --git a/ksrc/nucleus/synch.c b/ksrc/nucleus/synch.c
index 03f0952..3a53527 100644
--- a/ksrc/nucleus/synch.c
+++ b/ksrc/nucleus/synch.c
@@ -424,6 +424,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                                                 XN_NO_HANDLE, threadh);
 
                if (likely(fastlock == XN_NO_HANDLE)) {
+                       xnthread_inc_rescnt(thread);
                        xnthread_clear_info(thread,
                                            XNRMID | XNTIMEO | XNBREAK);
                        return 0;
@@ -474,6 +475,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
 
                if (!owner) {
                        synch->owner = thread;
+                       xnthread_inc_rescnt(thread);
                        xnthread_clear_info(thread,
                                            XNRMID | XNTIMEO | XNBREAK);
                        goto unlock_and_exit;
@@ -538,6 +540,8 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
 
              grab_and_exit:
 
+               xnthread_inc_rescnt(thread);
+
                if (use_fastlock) {
                        xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
                        /* We are the new owner, update the fastlock
@@ -714,7 +718,10 @@ struct xnthread *xnsynch_release(struct xnsynch *synch)
 
        XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));
 
-       lastownerh = xnthread_handle(xnpod_current_thread());
+       lastowner = xnpod_current_thread();
+       xnthread_dec_rescnt(lastowner);
+       XENO_BUGON(NUCLEUS, xnthread_get_rescnt(lastowner) < 0);
+       lastownerh = xnthread_handle(lastowner);
 
        if (use_fastlock &&
            likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
diff --git a/ksrc/nucleus/thread.c b/ksrc/nucleus/thread.c
index ac38144..82b5d7f 100644
--- a/ksrc/nucleus/thread.c
+++ b/ksrc/nucleus/thread.c
@@ -122,6 +122,7 @@ int xnthread_init(struct xnthread *thread,
        thread->wchan = NULL;
        thread->wwake = NULL;
        thread->wcontext = NULL;
+       thread->hrescnt = 0;
        thread->errcode = 0;
        thread->registry.handle = XN_NO_HANDLE;
        thread->registry.waitkey = NULL;
diff --git a/src/skins/common/current.c b/src/skins/common/current.c
index 91a5088..9903d7b 100644
--- a/src/skins/common/current.c
+++ b/src/skins/common/current.c
@@ -156,5 +156,5 @@ unsigned long xeno_slow_get_current_mode(void)
        if (err < 0)
                return XNRELAX;
 
-       return info.state & XNRELAX;
+       return info.state & (XNRELAX|XNOTHER);
 }
diff --git a/src/skins/native/mutex.c b/src/skins/native/mutex.c
index 8e2a57c..02cdf95 100644
--- a/src/skins/native/mutex.c
+++ b/src/skins/native/mutex.c
@@ -73,13 +73,23 @@ static int rt_mutex_acquire_inner(RT_MUTEX *mutex, RTIME 
timeout, xntmode_t mode
 {
        int err;
 #ifdef CONFIG_XENO_FASTSYNCH
+       unsigned long status;
        xnhandle_t cur;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
                return -EPERM;
 
-       if (likely(!(xeno_get_current_mode() & XNRELAX))) {
+       /*
+        * We track resource ownership for non real-time shadows in
+        * order to handle the auto-relax feature, so we must always
+        * obtain them via a syscall.
+        */
+       status = xeno_get_current_mode();
+       if (unlikely(status & XNOTHER))
+               goto do_syscall;
+
+       if (likely(!(status & XNRELAX))) {
                err = xnsynch_fast_acquire(mutex->fastlock, cur);
                if (likely(!err)) {
                        mutex->lockcnt = 1;
@@ -113,6 +123,7 @@ static int rt_mutex_acquire_inner(RT_MUTEX *mutex, RTIME 
timeout, xntmode_t mode
        }
 #endif /* CONFIG_XENO_FASTSYNCH */
 
+do_syscall:
        err = XENOMAI_SKINCALL3(__native_muxid,
                                __native_mutex_acquire, mutex, mode, &timeout);
 
@@ -137,12 +148,18 @@ int rt_mutex_acquire_until(RT_MUTEX *mutex, RTIME timeout)
 int rt_mutex_release(RT_MUTEX *mutex)
 {
 #ifdef CONFIG_XENO_FASTSYNCH
+       unsigned long status;
        xnhandle_t cur;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
                return -EPERM;
 
+       status = xeno_get_current_mode();
+       if (unlikely(status & XNOTHER))
+               /* See rt_mutex_acquire_inner() */
+               goto do_syscall;
+
        if (unlikely(xnsynch_fast_owner_check(mutex->fastlock, cur) != 0))
                return -EPERM;
 
@@ -153,6 +170,8 @@ int rt_mutex_release(RT_MUTEX *mutex)
 
        if (likely(xnsynch_fast_release(mutex->fastlock, cur)))
                return 0;
+
+do_syscall:
 #endif /* CONFIG_XENO_FASTSYNCH */
 
        return XENOMAI_SKINCALL1(__native_muxid, __native_mutex_release, mutex);
diff --git a/src/skins/posix/mutex.c b/src/skins/posix/mutex.c
index 08eb441..6f60d93 100644
--- a/src/skins/posix/mutex.c
+++ b/src/skins/posix/mutex.c
@@ -150,21 +150,29 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
        int err;
 
 #ifdef CONFIG_XENO_FASTSYNCH
+       unsigned long status;
        xnhandle_t cur;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
+       status = xeno_get_current_mode();
+
        if (unlikely(cb_try_read_lock(&shadow->lock, s)))
                return EINVAL;
 
-       if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) {
+       if (shadow->magic != PSE51_MUTEX_MAGIC) {
                err = -EINVAL;
                goto out;
        }
 
-       if (likely(!(xeno_get_current_mode() & XNRELAX))) {
+       /*
+        * We track resource ownership for non real-time shadows in
+        * order to handle the auto-relax feature, so we must always
+        * obtain them via a syscall.
+        */
+       if (likely(!(status & (XNRELAX|XNOTHER)))) {
                err = xnsynch_fast_acquire(get_ownerp(shadow), cur);
 
                if (likely(!err)) {
@@ -214,12 +222,15 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
        int err;
 
 #ifdef CONFIG_XENO_FASTSYNCH
+       unsigned long status;
        xnhandle_t cur;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
+       status = xeno_get_current_mode();
+
        if (unlikely(cb_try_read_lock(&shadow->lock, s)))
                return EINVAL;
 
@@ -228,7 +239,8 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
                goto out;
        }
 
-       if (likely(!(xeno_get_current_mode() & XNRELAX))) {
+       /* See __wrap_pthread_mutex_lock() */
+       if (likely(!(status & (XNRELAX|XNOTHER)))) {
                err = xnsynch_fast_acquire(get_ownerp(shadow), cur);
 
                if (likely(!err)) {
@@ -278,12 +290,17 @@ int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
        int err;
 
 #ifdef CONFIG_XENO_FASTSYNCH
+       unsigned long status;
        xnhandle_t cur;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
+       status = xeno_get_current_mode();
+       if (unlikely(status & XNOTHER))
+               goto do_syscall;
+
        if (unlikely(cb_try_read_lock(&shadow->lock, s)))
                return EINVAL;
 
@@ -292,7 +309,7 @@ int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
                goto out;
        }
 
-       if (unlikely(xeno_get_current_mode() & XNRELAX)) {
+       if (unlikely(status & XNRELAX)) {
                do {
                        err = XENOMAI_SYSCALL1(__xn_sys_migrate,
                                               XENOMAI_XENO_DOMAIN);
@@ -322,16 +339,16 @@ int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
 
   out:
        cb_read_unlock(&shadow->lock, s);
+       return -err;
 
-#else /* !CONFIG_XENO_FASTSYNCH */
+do_syscall:
+#endif /* !CONFIG_XENO_FASTSYNCH */
 
        do {
                err = XENOMAI_SKINCALL1(__pse51_muxid,
                                        __pse51_mutex_trylock, shadow);
        } while (err == -EINTR);
 
-#endif /* !CONFIG_XENO_FASTSYNCH */
-
        return -err;
 }
 
@@ -343,12 +360,15 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
 
 #ifdef CONFIG_XENO_FASTSYNCH
        xnarch_atomic_t *ownerp;
+       unsigned long status;
        xnhandle_t cur;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
+       status = xeno_get_current_mode();
+
        if (unlikely(cb_try_read_lock(&shadow->lock, s)))
                return EINVAL;
 
@@ -357,6 +377,9 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
                goto out_err;
        }
 
+       if (unlikely(status & XNOTHER))
+               goto do_syscall;
+
        ownerp = get_ownerp(shadow);
 
        err = xnsynch_fast_owner_check(ownerp, cur);
@@ -373,6 +396,8 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
                cb_read_unlock(&shadow->lock, s);
                return 0;
        }
+
+do_syscall:
 #endif /* CONFIG_XENO_FASTSYNCH */
 
        do {


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to