Module: xenomai-forge
Branch: master
Commit: 23f34323bbc0abe7f49b30d3b6949c29d9ff2215
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=23f34323bbc0abe7f49b30d3b6949c29d9ff2215

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Mon Nov 14 21:14:54 2011 +0100

cobalt: remove CONFIG_XENO_FASTSYNCH

---

 configure                      |   52 +++-------
 configure.in                   |    9 +--
 include/asm-arm/features.h     |    4 -
 include/asm-generic/features.h |    4 -
 include/cobalt/nucleus/synch.h |   26 -----
 kernel/cobalt/cb_lock.h        |   31 ------
 kernel/cobalt/cond.c           |   18 ----
 kernel/cobalt/cond.h           |    4 -
 kernel/cobalt/mutex.c          |   31 ------
 kernel/cobalt/mutex.h          |    4 -
 kernel/cobalt/nucleus/synch.c  |  156 +++++++++++++-----------------
 kernel/cobalt/syscall.c        |  211 ++--------------------------------------
 lib/cobalt/cond.c              |   14 ---
 lib/cobalt/mutex.c             |   31 +------
 lib/cobalt/printf.c            |    8 --
 lib/cobalt/sem_heap.c          |    2 -
 lib/include/xeno_config.h.in   |    3 -
 17 files changed, 92 insertions(+), 516 deletions(-)

diff --git a/configure b/configure
index 889da79..84359cb 100755
--- a/configure
+++ b/configure
@@ -770,8 +770,6 @@ CONFIG_XENO_DOC_DOX_FALSE
 CONFIG_XENO_DOC_DOX_TRUE
 CONFIG_XENO_SHARED_FALSE
 CONFIG_XENO_SHARED_TRUE
-CONFIG_XENO_FASTSYNCH_FALSE
-CONFIG_XENO_FASTSYNCH_TRUE
 DBX_ABS_SRCDIR_FALSE
 DBX_ABS_SRCDIR_TRUE
 DBX_XSL_ROOT
@@ -5305,8 +5303,6 @@ fi
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for target architecture" >&5
 $as_echo_n "checking for target architecture... " >&6; }
 
-CONFIG_XENO_FASTSYNCH=
-
 if test x$host_alias = x; then
   build_for=$host
 else
@@ -5318,12 +5314,10 @@ case "$build_for" in
  i*86*-*)
        use__thread=yes
        XENO_TARGET_ARCH=x86
-       CONFIG_XENO_FASTSYNCH=y
        CONFIG_XENO_DEFAULT_PERIOD=100000
        ;;
  ppc-*|powerpc-*|powerpc64-*|ppc64-*)
        XENO_TARGET_ARCH=powerpc
-       CONFIG_XENO_FASTSYNCH=y
        CONFIG_XENO_DEFAULT_PERIOD=100000
        ;;
  bfin-*|bfinnommu-*|blackfin-*)
@@ -5339,7 +5333,6 @@ case "$build_for" in
  x86_64-*|amd64-*)
        use__thread=yes
        XENO_TARGET_ARCH=x86
-       CONFIG_XENO_FASTSYNCH=y
        CONFIG_XENO_DEFAULT_PERIOD=100000
        ;;
  nios2-*)
@@ -6043,13 +6036,13 @@ if test "${lt_cv_nm_interface+set}" = set; then :
 else
   lt_cv_nm_interface="BSD nm"
   echo "int some_variable = 0;" > conftest.$ac_ext
-  (eval echo "\"\$as_me:6046: $ac_compile\"" >&5)
+  (eval echo "\"\$as_me:6039: $ac_compile\"" >&5)
   (eval "$ac_compile" 2>conftest.err)
   cat conftest.err >&5
-  (eval echo "\"\$as_me:6049: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+  (eval echo "\"\$as_me:6042: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
   (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
   cat conftest.err >&5
-  (eval echo "\"\$as_me:6052: output\"" >&5)
+  (eval echo "\"\$as_me:6045: output\"" >&5)
   cat conftest.out >&5
   if $GREP 'External.*some_variable' conftest.out > /dev/null; then
     lt_cv_nm_interface="MS dumpbin"
@@ -7254,7 +7247,7 @@ ia64-*-hpux*)
   ;;
 *-*-irix6*)
   # Find out which ABI we are using.
-  echo '#line 7257 "configure"' > conftest.$ac_ext
+  echo '#line 7250 "configure"' > conftest.$ac_ext
   if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
   (eval $ac_compile) 2>&5
   ac_status=$?
@@ -8616,11 +8609,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:8619: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:8612: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:8623: \$? = $ac_status" >&5
+   echo "$as_me:8616: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -8955,11 +8948,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:8958: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:8951: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:8962: \$? = $ac_status" >&5
+   echo "$as_me:8955: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -9060,11 +9053,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9063: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9056: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:9067: \$? = $ac_status" >&5
+   echo "$as_me:9060: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -9115,11 +9108,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9118: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9111: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:9122: \$? = $ac_status" >&5
+   echo "$as_me:9115: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -11499,7 +11492,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<_LT_EOF
-#line 11502 "configure"
+#line 11495 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -11595,7 +11588,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<_LT_EOF
-#line 11598 "configure"
+#line 11591 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -11986,7 +11979,7 @@ fi
 $as_echo "${use_registry:-no}" >&6; }
 
 if test x$use_registry = xy; then
-        fuse_cflags="-D_FILE_OFFSET_BITS=64 -DFUSE_USE_VERSION=26"
+       fuse_cflags="-D_FILE_OFFSET_BITS=64 -DFUSE_USE_VERSION=26"
        fuse_ldflags="-lfuse"
 
        ac_ext=c
@@ -12680,17 +12673,6 @@ $as_echo "#define CONFIG_XENO_X86_SEP 1" >>confdefs.h
 test x$CONFIG_SMP = xy &&
 $as_echo "#define CONFIG_SMP 1" >>confdefs.h
 
-test x$CONFIG_XENO_FASTSYNCH = xy &&
-$as_echo "#define CONFIG_XENO_FASTSYNCH 1" >>confdefs.h
-
- if test "$CONFIG_XENO_FASTSYNCH" = y; then
-  CONFIG_XENO_FASTSYNCH_TRUE=
-  CONFIG_XENO_FASTSYNCH_FALSE='#'
-else
-  CONFIG_XENO_FASTSYNCH_TRUE='#'
-  CONFIG_XENO_FASTSYNCH_FALSE=
-fi
-
 
 test x$ac_cv_func_fork = xyes &&
 $as_echo "#define CONFIG_MMU 1" >>confdefs.h
@@ -13266,10 +13248,6 @@ if test -z "${DBX_ABS_SRCDIR_TRUE}" && test -z 
"${DBX_ABS_SRCDIR_FALSE}"; then
   as_fn_error $? "conditional \"DBX_ABS_SRCDIR\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
-if test -z "${CONFIG_XENO_FASTSYNCH_TRUE}" && test -z 
"${CONFIG_XENO_FASTSYNCH_FALSE}"; then
-  as_fn_error $? "conditional \"CONFIG_XENO_FASTSYNCH\" was never defined.
-Usually this means the macro was only invoked conditionally." "$LINENO" 5
-fi
 if test -z "${CONFIG_XENO_SHARED_TRUE}" && test -z 
"${CONFIG_XENO_SHARED_FALSE}"; then
   as_fn_error $? "conditional \"CONFIG_XENO_SHARED\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
diff --git a/configure.in b/configure.in
index 72262bb..217fb91 100644
--- a/configure.in
+++ b/configure.in
@@ -83,8 +83,6 @@ AM_PROG_LEX
 
 AC_MSG_CHECKING([for target architecture])
 
-CONFIG_XENO_FASTSYNCH=
-
 if test x$host_alias = x; then
   build_for=$host
 else
@@ -96,12 +94,10 @@ case "$build_for" in
  i*86*-*)
        use__thread=yes
        XENO_TARGET_ARCH=x86
-       CONFIG_XENO_FASTSYNCH=y
        CONFIG_XENO_DEFAULT_PERIOD=100000
        ;;
  ppc-*|powerpc-*|powerpc64-*|ppc64-*)
        XENO_TARGET_ARCH=powerpc
-       CONFIG_XENO_FASTSYNCH=y
        CONFIG_XENO_DEFAULT_PERIOD=100000
        ;;
  bfin-*|bfinnommu-*|blackfin-*)
@@ -117,7 +113,6 @@ case "$build_for" in
  x86_64-*|amd64-*)
        use__thread=yes
        XENO_TARGET_ARCH=x86
-       CONFIG_XENO_FASTSYNCH=y
        CONFIG_XENO_DEFAULT_PERIOD=100000
        ;;
  nios2-*)
@@ -265,7 +260,7 @@ AC_ARG_ENABLE(registry,
 AC_MSG_RESULT(${use_registry:-no})
 
 if test x$use_registry = xy; then
-        fuse_cflags="-D_FILE_OFFSET_BITS=64 -DFUSE_USE_VERSION=26"
+       fuse_cflags="-D_FILE_OFFSET_BITS=64 -DFUSE_USE_VERSION=26"
        fuse_ldflags="-lfuse"
        AC_LANG_SAVE
        AC_LANG_C
@@ -485,8 +480,6 @@ dnl
 
 test x$CONFIG_XENO_X86_SEP = xy && AC_DEFINE(CONFIG_XENO_X86_SEP,1,[config])
 test x$CONFIG_SMP = xy && AC_DEFINE(CONFIG_SMP,1,[config])
-test x$CONFIG_XENO_FASTSYNCH = xy && 
AC_DEFINE(CONFIG_XENO_FASTSYNCH,1,[config])
-AM_CONDITIONAL(CONFIG_XENO_FASTSYNCH,[test "$CONFIG_XENO_FASTSYNCH" = y])
 
 dnl
 dnl Userland may want to know about MMU availability on the target.
diff --git a/include/asm-arm/features.h b/include/asm-arm/features.h
index 187a043..d801d5e 100644
--- a/include/asm-arm/features.h
+++ b/include/asm-arm/features.h
@@ -72,10 +72,6 @@
 #error "Could not find current ARM architecture"
 #endif
 
-#if __LINUX_ARM_ARCH__ >= 6 || !defined(CONFIG_SMP)
-#define CONFIG_XENO_FASTSYNCH 1
-#endif
-
 #if CONFIG_XENO_ARM_TSC_TYPE == __XN_TSC_TYPE_KUSER
 #define CONFIG_XENO_ARM_KUSER_TSC 1
 #endif
diff --git a/include/asm-generic/features.h b/include/asm-generic/features.h
index 6233f73..2eff3c8 100644
--- a/include/asm-generic/features.h
+++ b/include/asm-generic/features.h
@@ -56,11 +56,7 @@ struct xnfeatinfo {
 #define __xn_feat_smp_mask __xn_feat_nosmp
 #endif
 
-#ifdef CONFIG_XENO_FASTSYNCH
 #define __xn_feat_fastsynch_mask __xn_feat_fastsynch
-#else
-#define __xn_feat_fastsynch_mask __xn_feat_nofastsynch
-#endif
 
 /* List of generic features kernel or userland may support */
 #define __xn_feat_generic_mask \
diff --git a/include/cobalt/nucleus/synch.h b/include/cobalt/nucleus/synch.h
index 0b380ad..0794852 100644
--- a/include/cobalt/nucleus/synch.h
+++ b/include/cobalt/nucleus/synch.h
@@ -36,7 +36,6 @@
 #define CONFIG_XENO_OPT_DEBUG_SYNCH_RELAX 0
 #endif /* CONFIG_XENO_OPT_DEBUG_SYNCH_RELAX */
 
-#ifdef CONFIG_XENO_FASTSYNCH
 #define XNSYNCH_FLCLAIM XN_HANDLE_SPARE3 /* Corresponding bit in fast lock */
 
 /* Fast lock API */
@@ -131,22 +130,6 @@ static inline int xnsynch_fast_release(xnarch_atomic_t 
*fastlock,
                                      XN_NO_HANDLE | spares) == cur_ownerh);
 }
 
-#else /* !CONFIG_XENO_FASTSYNCH */
-
-static inline int xnsynch_fast_acquire(xnarch_atomic_t *fastlock,
-                                      xnhandle_t new_ownerh)
-{
-       return -ENOSYS;
-}
-
-static inline int xnsynch_fast_release(xnarch_atomic_t *fastlock,
-                                      xnhandle_t cur_ownerh)
-{
-       return -1;
-}
-
-#endif /* !CONFIG_XENO_FASTSYNCH */
-
 #if defined(__KERNEL__) || defined(__XENO_SIM__)
 
 #define XNSYNCH_CLAIMED 0x10   /* Claimed by other thread(s) w/ PIP */
@@ -182,9 +165,7 @@ typedef struct xnsynch {
 
     struct xnthread *owner; /* Thread which owns the resource */
 
-#ifdef CONFIG_XENO_FASTSYNCH
     xnarch_atomic_t *fastlock; /* Pointer to fast lock word */
-#endif /* CONFIG_XENO_FASTSYNCH */
 
     void (*cleanup)(struct xnsynch *synch); /* Cleanup handler */
 
@@ -200,17 +181,10 @@ typedef struct xnsynch {
 #define xnsynch_pended_p(synch)                (!emptypq_p(&((synch)->pendq)))
 #define xnsynch_owner(synch)           ((synch)->owner)
 
-#ifdef CONFIG_XENO_FASTSYNCH
 #define xnsynch_fastlock(synch)                ((synch)->fastlock)
 #define xnsynch_fastlock_p(synch)      ((synch)->fastlock != NULL)
 #define xnsynch_owner_check(synch, thread) \
        xnsynch_fast_owner_check((synch)->fastlock, xnthread_handle(thread))
-#else /* !CONFIG_XENO_FASTSYNCH */
-#define xnsynch_fastlock(synch)                ((xnarch_atomic_t *)NULL)
-#define xnsynch_fastlock_p(synch)      0
-#define xnsynch_owner_check(synch, thread) \
-       ((synch)->owner == thread ? 0 : -EPERM)
-#endif /* !CONFIG_XENO_FASTSYNCH */
 
 #define xnsynch_fast_is_claimed(fastlock) \
        xnhandle_test_spares(fastlock, XNSYNCH_FLCLAIM)
diff --git a/kernel/cobalt/cb_lock.h b/kernel/cobalt/cb_lock.h
index 699b3d1..6eae926 100644
--- a/kernel/cobalt/cb_lock.h
+++ b/kernel/cobalt/cb_lock.h
@@ -11,17 +11,6 @@ typedef void xnthread_t;
 
 #define __CLAIMED_BIT          XN_HANDLE_SPARE3
 
-#define test_claimed(owner)    xnhandle_test_spare(owner, __CLAIMED_BIT)
-#define clear_claimed(owner)   xnhandle_mask_spare(owner)
-#define set_claimed(owner, bit) ({ \
-       xnhandle_t __tmp = xnhandle_mask_spare(owner); \
-       if (bit) \
-               xnhandle_set_spare(__tmp, __CLAIMED_BIT); \
-       __tmp; \
-})
-
-#ifdef CONFIG_XENO_FASTSYNCH
-
 static  __inline__ int __cb_try_read_lock(xnarch_atomic_t *lock)
 {
        unsigned val = xnarch_atomic_get(lock);
@@ -68,25 +57,5 @@ static __inline__ void __cb_write_unlock(xnarch_atomic_t 
*lock)
 #define cb_try_write_lock(lock, flags) __cb_try_write_lock(lock)
 #define cb_force_write_lock(lock, flags) __cb_force_write_lock(lock)
 #define cb_write_unlock(lock, flags) __cb_write_unlock(lock)
-#else /* !CONFIG_XENO_FASTSYNCH */
-#if defined(__KERNEL__) || defined(__XENO_SIM__)
-#define DECLARE_CB_LOCK_FLAGS(name) spl_t name
-#define cb_try_read_lock(lock, flags) \
-       ({ xnlock_get_irqsave(&nklock, flags); 0; })
-#define cb_read_unlock(lock, flags) xnlock_put_irqrestore(&nklock, flags)
-#define cb_try_write_lock(lock, flags)  \
-       ({ xnlock_get_irqsave(&nklock, flags); 0; })
-#define cb_force_write_lock(lock, flags)  \
-       ({ xnlock_get_irqsave(&nklock, flags); 0; })
-#define cb_write_unlock(lock, flags) xnlock_put_irqrestore(&nklock, flags)
-#else /* !__KERNEL__ */
-#define DECLARE_CB_LOCK_FLAGS(name)
-#define cb_try_read_lock(lock, flags) (0)
-#define cb_read_unlock(lock, flags) do { } while (0)
-#define cb_try_write_lock(lock, flags) (0)
-#define cb_force_write_lock(lock, flags) do { } while (0)
-#define cb_write_unlock(lock, flags) do { } while (0)
-#endif /* !__KERNEL__ */
-#endif /* !CONFIG_XENO_FASTSYNCH */
 
 #endif /* CB_LOCK_H */
diff --git a/kernel/cobalt/cond.c b/kernel/cobalt/cond.c
index 6ffd356..479646d 100644
--- a/kernel/cobalt/cond.c
+++ b/kernel/cobalt/cond.c
@@ -64,10 +64,8 @@ static void cond_destroy_internal(cobalt_cond_t * cond, 
cobalt_kqueues_t *q)
           xnpod_schedule(). */
        xnsynch_destroy(&cond->synchbase);
        xnlock_put_irqrestore(&nklock, s);
-#ifdef CONFIG_XENO_FASTSYNCH
        xnheap_free(&xnsys_ppd_get(cond->attr.pshared)->sem_heap,
                    cond->pending_signals);
-#endif /* CONFIG_XENO_FASTSYNCH */
        xnfree(cond);
 }
 
@@ -113,7 +111,6 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
        if (!cond)
                return ENOMEM;
 
-#ifdef CONFIG_XENO_FASTSYNCH
        sys_ppd = xnsys_ppd_get(attr->pshared);
        cond->pending_signals = (unsigned long *)
                xnheap_alloc(&sys_ppd->sem_heap,
@@ -123,7 +120,6 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
                goto err_free_cond;
        }
        *(cond->pending_signals) = 0;
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -145,13 +141,11 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
                        }
        }
 
-#ifdef CONFIG_XENO_FASTSYNCH
        shadow->attr = *attr;
        shadow->pending_signals_offset =
                xnheap_mapped_offset(&sys_ppd->sem_heap,
                                     cond->pending_signals);
        shadow->mutex_ownerp = (xnarch_atomic_t *)~0UL;
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        shadow->magic = COBALT_COND_MAGIC;
        shadow->cond = cond;
@@ -171,12 +165,10 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
 
   err_free_pending_signals:
        xnlock_put_irqrestore(&nklock, s);
-#ifdef CONFIG_XENO_FASTSYNCH
        xnheap_free(&xnsys_ppd_get(cond->attr.pshared)->sem_heap,
                    cond->pending_signals);
   err_free_cond:
        xnfree(cond);
-#endif
        return err;
 }
 
@@ -426,10 +418,8 @@ int pthread_cond_wait(pthread_cond_t * cnd, 
pthread_mutex_t * mx)
        unsigned count;
        int err;
 
-#ifdef CONFIG_XENO_FASTSYNCH
        if (unlikely(cb_try_read_lock(&mutex->lock, s)))
                return EINVAL;
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        err = cobalt_cond_timedwait_prologue(cur, cond, mutex,
                                            &count, 0, XN_INFINITE);
@@ -439,9 +429,7 @@ int pthread_cond_wait(pthread_cond_t * cnd, pthread_mutex_t 
* mx)
                                                               mutex, count))
                        ;
 
-#ifdef CONFIG_XENO_FASTSYNCH
        cb_read_unlock(&mutex->lock, s);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        return err != EINTR ? err : 0;
 }
@@ -496,10 +484,8 @@ int pthread_cond_timedwait(pthread_cond_t * cnd,
        unsigned count;
        int err;
 
-#ifdef CONFIG_XENO_FASTSYNCH
        if (unlikely(cb_try_read_lock(&mutex->lock, s)))
                return EINVAL;
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        err = cobalt_cond_timedwait_prologue(cur, cond, mutex, &count, 1,
                                            ts2ns(abstime) + 1);
@@ -509,9 +495,7 @@ int pthread_cond_timedwait(pthread_cond_t * cnd,
                                                               mutex, count))
                        ;
 
-#ifdef CONFIG_XENO_FASTSYNCH
        cb_read_unlock(&mutex->lock, s);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        return err != EINTR ? err : 0;
 }
@@ -636,7 +620,6 @@ void cobalt_condq_cleanup(cobalt_kqueues_t *q)
        xnlock_put_irqrestore(&nklock, s);
 }
 
-#ifdef CONFIG_XENO_FASTSYNCH
 int cobalt_cond_deferred_signals(struct cobalt_cond *cond)
 {
        unsigned long pending_signals;
@@ -665,7 +648,6 @@ int cobalt_cond_deferred_signals(struct cobalt_cond *cond)
 
        return need_resched;
 }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
 void cobalt_cond_pkg_init(void)
 {
diff --git a/kernel/cobalt/cond.h b/kernel/cobalt/cond.h
index cbce569..c4849c9 100644
--- a/kernel/cobalt/cond.h
+++ b/kernel/cobalt/cond.h
@@ -28,7 +28,6 @@ union __xeno_cond {
        pthread_cond_t native_cond;
        struct __shadow_cond {
                unsigned magic;
-#ifdef CONFIG_XENO_FASTSYNCH
                struct cobalt_condattr attr;
                union {
                        unsigned pending_signals_offset;
@@ -38,7 +37,6 @@ union __xeno_cond {
                        unsigned mutex_ownerp_offset;
                        xnarch_atomic_t *mutex_ownerp;
                };
-#endif /* CONFIG_XENO_FASTSYNCH */
                struct cobalt_cond *cond;
        } shadow_cond;
 };
@@ -79,9 +77,7 @@ int cobalt_cond_timedwait_epilogue(xnthread_t *cur,
                                  struct __shadow_cond *shadow,
                                  struct __shadow_mutex *mutex, unsigned count);
 
-#ifdef CONFIG_XENO_FASTSYNCH
 int cobalt_cond_deferred_signals(struct cobalt_cond *cond);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
 void cobalt_condq_cleanup(cobalt_kqueues_t *q);
 
diff --git a/kernel/cobalt/mutex.c b/kernel/cobalt/mutex.c
index 4d43f2a..def5576 100644
--- a/kernel/cobalt/mutex.c
+++ b/kernel/cobalt/mutex.c
@@ -105,10 +105,8 @@ int cobalt_mutex_init_internal(struct __shadow_mutex 
*shadow,
        shadow->lockcnt = 0;
        xnarch_atomic_set(&shadow->lock, -1);
 
-#ifdef CONFIG_XENO_FASTSYNCH
        shadow->attr = *attr;
        shadow->owner_offset = xnheap_mapped_offset(&sys_ppd->sem_heap, ownerp);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        if (attr->protocol == PTHREAD_PRIO_INHERIT)
                synch_flags |= XNSYNCH_PIP;
@@ -169,23 +167,16 @@ int pthread_mutex_init(pthread_mutex_t *mx, const 
pthread_mutexattr_t *attr)
                goto checked;
 
        err = cobalt_mutex_check_init(shadow, attr);
-#ifndef CONFIG_XENO_FASTSYNCH
-       cb_read_unlock(&shadow->lock, s);
-       if (err)
-               return -err;
-#else /* CONFIG_XENO_FASTSYNCH */
        if (err) {
                cb_read_unlock(&shadow->lock, s);
                return -err;
        }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
   checked:
        mutex = (cobalt_mutex_t *) xnmalloc(sizeof(*mutex));
        if (!mutex)
                return ENOMEM;
 
-#ifdef CONFIG_XENO_FASTSYNCH
        ownerp = (xnarch_atomic_t *)
                xnheap_alloc(&xnsys_ppd_get(attr->pshared)->sem_heap,
                             sizeof(xnarch_atomic_t));
@@ -193,7 +184,6 @@ int pthread_mutex_init(pthread_mutex_t *mx, const 
pthread_mutexattr_t *attr)
                xnfree(mutex);
                return EAGAIN;
        }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        cb_force_write_lock(&shadow->lock, s);
        err = cobalt_mutex_init_internal(shadow, mutex, ownerp, attr);
@@ -201,9 +191,7 @@ int pthread_mutex_init(pthread_mutex_t *mx, const 
pthread_mutexattr_t *attr)
 
        if (err) {
                xnfree(mutex);
-#ifdef CONFIG_XENO_FASTSYNCH
                xnheap_free(&xnsys_ppd_get(attr->pshared)->sem_heap, ownerp);
-#endif /* CONFIG_XENO_FASTSYNCH */
        }
        return -err;
 }
@@ -220,10 +208,8 @@ void cobalt_mutex_destroy_internal(cobalt_mutex_t *mutex,
        xnsynch_destroy(&mutex->synchbase);
        xnlock_put_irqrestore(&nklock, s);
 
-#ifdef CONFIG_XENO_FASTSYNCH
        xnheap_free(&xnsys_ppd_get(mutex->attr.pshared)->sem_heap,
                    mutex->synchbase.fastlock);
-#endif /* CONFIG_XENO_FASTSYNCH */
        xnfree(mutex);
 }
 
@@ -270,12 +256,8 @@ int pthread_mutex_destroy(pthread_mutex_t * mx)
                return EPERM;
        }
 
-#ifdef CONFIG_XENO_FASTSYNCH
        if (xnsynch_fast_owner_check(mutex->synchbase.fastlock,
                                     XN_NO_HANDLE) != 0) {
-#else /* CONFIG_XENO_FASTSYNCH */
-       if (xnsynch_owner_check(&mutex->synchbase, NULL)) {
-#endif
                cb_write_unlock(&shadow->lock, s);
                return EBUSY;
        }
@@ -419,21 +401,8 @@ int pthread_mutex_trylock(pthread_mutex_t *mx)
        }
 #endif /* XENO_DEBUG(POSIX) */
 
-#ifdef CONFIG_XENO_FASTSYNCH
        err = -xnsynch_fast_acquire(mutex->synchbase.fastlock,
                                    xnthread_handle(cur));
-#else /* !CONFIG_XENO_FASTSYNCH */
-       {
-               xnthread_t *owner = xnsynch_owner(&mutex->synchbase);
-               if (!owner)
-                       err = 0;
-               else if (owner == cur)
-                       err = EBUSY;
-               else
-                       err = EAGAIN;
-       }
-#endif /* !CONFIG_XENO_FASTSYNCH */
-
        if (likely(!err))
                shadow->lockcnt = 1;
        else if (err == EBUSY) {
diff --git a/kernel/cobalt/mutex.h b/kernel/cobalt/mutex.h
index a6febbc..dcdb48d 100644
--- a/kernel/cobalt/mutex.h
+++ b/kernel/cobalt/mutex.h
@@ -31,7 +31,6 @@ union __xeno_mutex {
                unsigned lockcnt;
                struct cobalt_mutex *mutex;
                xnarch_atomic_t lock;
-#ifdef CONFIG_XENO_FASTSYNCH
                union {
                        unsigned owner_offset;
                        xnarch_atomic_t *owner;
@@ -39,7 +38,6 @@ union __xeno_mutex {
                struct cobalt_mutexattr attr;
 
 #define COBALT_MUTEX_COND_SIGNAL XN_HANDLE_SPARE2
-#endif /* CONFIG_XENO_FASTSYNCH */
        } shadow_mutex;
 };
 
@@ -156,7 +154,6 @@ static inline int cobalt_mutex_release(xnthread_t *cur,
                *count_ptr = shadow->lockcnt;
 
        need_resched = 0;
-#ifdef CONFIG_XENO_FASTSYNCH
        for (holder = getheadq(&mutex->conds);
             holder; holder = nextq(&mutex->conds, holder)) {
                struct cobalt_cond *cond = mutex_link2cond(holder);
@@ -171,7 +168,6 @@ static inline int cobalt_mutex_release(xnthread_t *cur,
        xnsynch_fast_clear_spares(mutex->synchbase.fastlock,
                                  xnthread_handle(cur),
                                  COBALT_MUTEX_COND_SIGNAL);
-#endif /* CONFIG_XENO_FASTSYNCH */
        need_resched |= xnsynch_release(&mutex->synchbase) != NULL;
 
        return need_resched;
diff --git a/kernel/cobalt/nucleus/synch.c b/kernel/cobalt/nucleus/synch.c
index b111056..e23ee10 100644
--- a/kernel/cobalt/nucleus/synch.c
+++ b/kernel/cobalt/nucleus/synch.c
@@ -109,13 +109,14 @@ void xnsynch_init(struct xnsynch *synch, xnflags_t flags, 
xnarch_atomic_t *fastl
        synch->status = flags & ~XNSYNCH_CLAIMED;
        synch->owner = NULL;
        synch->cleanup = NULL;  /* Only works for PIP-enabled objects. */
-#ifdef CONFIG_XENO_FASTSYNCH
-       if ((flags & XNSYNCH_OWNER) && fastlock) {
-               synch->fastlock = fastlock;
-               xnarch_atomic_set(fastlock, XN_NO_HANDLE);
+       if ((flags & XNSYNCH_OWNER)) {
+               if (fastlock) {
+                       synch->fastlock = fastlock;
+                       xnarch_atomic_set(fastlock, XN_NO_HANDLE);
+               } else
+                       BUG();
        } else
                synch->fastlock = NULL;
-#endif /* CONFIG_XENO_FASTSYNCH */
        initpq(&synch->pendq);
        xnarch_init_display_context(synch);
 }
@@ -402,7 +403,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
 {
        struct xnthread *thread = xnpod_current_thread(), *owner;
        xnhandle_t threadh = xnthread_handle(thread), fastlock, old, spares;
-       const int use_fastlock = xnsynch_fastlock_p(synch);
+       xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
        spl_t s;
 
        XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));
@@ -411,75 +412,58 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, 
xnticks_t timeout,
 
       redo:
 
-       if (use_fastlock) {
-               xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
-
-               spares = xnhandle_get_spares(xnarch_atomic_get(lockp),
+       spares = xnhandle_get_spares(xnarch_atomic_get(lockp),
                                             XN_HANDLE_SPARE_MASK);
-               old = XN_NO_HANDLE | spares;
-               fastlock = xnarch_atomic_cmpxchg(lockp, old, threadh | spares);
-
-               if (likely(fastlock == old)) {
-                       if (xnthread_test_state(thread, XNOTHER))
-                               xnthread_inc_rescnt(thread);
-                       xnthread_clear_info(thread,
-                                           XNRMID | XNTIMEO | XNBREAK);
-                       return 0;
-               }
+       old = XN_NO_HANDLE | spares;
+       fastlock = xnarch_atomic_cmpxchg(lockp, old, threadh | spares);
 
-               xnlock_get_irqsave(&nklock, s);
-
-               /* Set claimed bit.
-                  In case it appears to be set already, re-read its state
-                  under nklock so that we don't miss any change between the
-                  lock-less read and here. But also try to avoid cmpxchg
-                  where possible. Only if it appears not to be set, start
-                  with cmpxchg directly. */
-               if (xnsynch_fast_is_claimed(fastlock)) {
-                       old = xnarch_atomic_get(lockp);
-                       goto test_no_owner;
-               }
-               do {
-                       old = xnarch_atomic_cmpxchg(lockp, fastlock,
-                                       xnsynch_fast_set_claimed(fastlock, 1));
-                       if (likely(old == fastlock))
-                               break;
-
-                 test_no_owner:
-                       if (old == XN_NO_HANDLE) {
-                               /* Owner called xnsynch_release
-                                  (on another cpu) */
-                               xnlock_put_irqrestore(&nklock, s);
-                               goto redo;
-                       }
-                       fastlock = old;
-               } while (!xnsynch_fast_is_claimed(fastlock));
+       if (likely(fastlock == old)) {
+               if (xnthread_test_state(thread, XNOTHER))
+                       xnthread_inc_rescnt(thread);
+               xnthread_clear_info(thread,
+                                   XNRMID | XNTIMEO | XNBREAK);
+               return 0;
+       }
 
-               owner = xnthread_lookup(xnhandle_mask_spares(fastlock));
+       xnlock_get_irqsave(&nklock, s);
 
-               if (!owner) {
-                       /* The handle is broken, therefore pretend that the 
synch
-                          object was deleted to signal an error. */
-                       xnthread_set_info(thread, XNRMID);
-                       goto unlock_and_exit;
+       /* Set claimed bit.
+          In case it appears to be set already, re-read its state
+          under nklock so that we don't miss any change between the
+          lock-less read and here. But also try to avoid cmpxchg
+          where possible. Only if it appears not to be set, start
+          with cmpxchg directly. */
+       if (xnsynch_fast_is_claimed(fastlock)) {
+               old = xnarch_atomic_get(lockp);
+               goto test_no_owner;
+       }
+       do {
+               old = xnarch_atomic_cmpxchg(lockp, fastlock,
+                               xnsynch_fast_set_claimed(fastlock, 1));
+               if (likely(old == fastlock))
+                       break;
+
+         test_no_owner:
+               if (old == XN_NO_HANDLE) {
+                       /* Owner called xnsynch_release
+                          (on another cpu) */
+                       xnlock_put_irqrestore(&nklock, s);
+                       goto redo;
                }
+               fastlock = old;
+       } while (!xnsynch_fast_is_claimed(fastlock));
 
-               xnsynch_set_owner(synch, owner);
-       } else {
-               xnlock_get_irqsave(&nklock, s);
+       owner = xnthread_lookup(xnhandle_mask_spares(fastlock));
 
-               owner = synch->owner;
-
-               if (!owner) {
-                       synch->owner = thread;
-                       if (xnthread_test_state(thread, XNOTHER))
-                               xnthread_inc_rescnt(thread);
-                       xnthread_clear_info(thread,
-                                           XNRMID | XNTIMEO | XNBREAK);
-                       goto unlock_and_exit;
-               }
+       if (!owner) {
+               /* The handle is broken, therefore pretend that the synch
+                  object was deleted to signal an error. */
+               xnthread_set_info(thread, XNRMID);
+               goto unlock_and_exit;
        }
 
+       xnsynch_set_owner(synch, owner);
+
        xnsynch_detect_relaxed_owner(synch, thread);
 
        if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
@@ -535,23 +519,18 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, 
xnticks_t timeout,
                }
                xnthread_set_info(thread, XNTIMEO);
        } else {
-
              grab_and_exit:
-
                if (xnthread_test_state(thread, XNOTHER))
                        xnthread_inc_rescnt(thread);
 
-               if (use_fastlock) {
-                       xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
-                       /* We are the new owner, update the fastlock
-                          accordingly. */
-                       threadh |= xnhandle_get_spares(xnarch_atomic_get(lockp),
-                                                      XN_HANDLE_SPARE_MASK);
-                       threadh =
-                               xnsynch_fast_set_claimed(threadh,
-                                                        
xnsynch_pended_p(synch));
-                       xnarch_atomic_set(lockp, threadh);
-               }
+               /* We are the new owner, update the fastlock
+                  accordingly. */
+               threadh |= xnhandle_get_spares(xnarch_atomic_get(lockp),
+                                              XN_HANDLE_SPARE_MASK);
+               threadh =
+                       xnsynch_fast_set_claimed(threadh,
+                                                xnsynch_pended_p(synch));
+               xnarch_atomic_set(lockp, threadh);
        }
 
       unlock_and_exit:
@@ -674,10 +653,10 @@ EXPORT_SYMBOL_GPL(xnsynch_requeue_sleeper);
 static struct xnthread *
 xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner)
 {
-       const int use_fastlock = xnsynch_fastlock_p(synch);
        xnhandle_t lastownerh, newownerh;
        struct xnthread *newowner;
        struct xnpholder *holder;
+       xnarch_atomic_t *lockp;
        spl_t s;
 
        XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));
@@ -691,8 +670,7 @@ xnsynch_release_thread(struct xnsynch *synch, struct 
xnthread *lastowner)
        }
        lastownerh = xnthread_handle(lastowner);
 
-       if (use_fastlock &&
-           likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
+       if (likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
                return NULL;
 
        xnlock_get_irqsave(&nklock, s);
@@ -718,14 +696,12 @@ xnsynch_release_thread(struct xnsynch *synch, struct 
xnthread *lastowner)
                synch->owner = NULL;
                newownerh = XN_NO_HANDLE;
        }
-       if (use_fastlock) {
-               xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
-               newownerh |=
-                       xnhandle_get_spares(xnarch_atomic_get(lockp),
-                                           XN_HANDLE_SPARE_MASK
-                                           & ~XNSYNCH_FLCLAIM);
-               xnarch_atomic_set(lockp, newownerh);
-       }
+
+       lockp = xnsynch_fastlock(synch);
+       newownerh |= xnhandle_get_spares(xnarch_atomic_get(lockp),
+                                        XN_HANDLE_SPARE_MASK
+                                        & ~XNSYNCH_FLCLAIM);
+       xnarch_atomic_set(lockp, newownerh);
 
        xnlock_put_irqrestore(&nklock, s);
 
diff --git a/kernel/cobalt/syscall.c b/kernel/cobalt/syscall.c
index 79c2a35..5f9b129 100644
--- a/kernel/cobalt/syscall.c
+++ b/kernel/cobalt/syscall.c
@@ -1048,192 +1048,6 @@ static int 
__pthread_mutexattr_setpshared(pthread_mutexattr_t __user *u_attr,
        return __xn_safe_copy_to_user(u_attr, &attr, sizeof(*u_attr));
 }
 
-#ifndef CONFIG_XENO_FASTSYNCH
-static int __pthread_mutex_init(union __xeno_mutex __user *u_mx,
-                               const pthread_mutexattr_t __user *u_attr)
-{
-       pthread_mutexattr_t locattr, *attr;
-       union __xeno_mutex mx;
-       int err;
-
-       if (__xn_safe_copy_from_user(&mx.shadow_mutex,
-                                    &u_mx->shadow_mutex,
-                                    sizeof(mx.shadow_mutex)))
-               return -EFAULT;
-
-       if (u_attr) {
-               if (__xn_safe_copy_from_user(&locattr, u_attr,
-                                            sizeof(locattr)))
-                       return -EFAULT;
-
-               attr = &locattr;
-       } else
-               attr = NULL;
-
-       err = pthread_mutex_init(&mx.native_mutex, attr);
-       if (err)
-               return -err;
-
-       return __xn_safe_copy_to_user(&u_mx->shadow_mutex,
-                                     &mx.shadow_mutex, 
sizeof(u_mx->shadow_mutex));
-}
-
-#define __pthread_mutex_check_init __cobalt_call_not_available
-
-static int __pthread_mutex_destroy(union __xeno_mutex __user *u_mx)
-{
-       union __xeno_mutex mx;
-       int err;
-
-       if (__xn_safe_copy_from_user(&mx.shadow_mutex,
-                                    &u_mx->shadow_mutex,
-                                    sizeof(mx.shadow_mutex)))
-               return -EFAULT;
-
-       err = pthread_mutex_destroy(&mx.native_mutex);
-       if (err)
-               return -err;
-
-       return __xn_safe_copy_to_user(&u_mx->shadow_mutex,
-                                     &mx.shadow_mutex, 
sizeof(u_mx->shadow_mutex));
-}
-
-static int __pthread_mutex_lock(union __xeno_mutex __user *u_mx)
-{
-       DECLARE_CB_LOCK_FLAGS(s);
-       union __xeno_mutex mx;
-       int err;
-
-       if (__xn_safe_copy_from_user(&mx.shadow_mutex,
-                                    &u_mx->shadow_mutex,
-                                    sizeof(mx.shadow_mutex)))
-               return -EFAULT;
-
-       if (unlikely(cb_try_read_lock(&mx.shadow_mutex.lock, s)))
-               return -EINVAL;
-
-       err = cobalt_mutex_timedlock_break(&mx.shadow_mutex, 0, XN_INFINITE);
-
-       cb_read_unlock(&mx.shadow_mutex.lock, s);
-
-       if (err == 0 &&
-           __xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
-                                  &mx.shadow_mutex.lockcnt,
-                                  sizeof(u_mx->shadow_mutex.lockcnt)))
-               return -EFAULT;
-
-       return err;
-}
-
-static int __pthread_mutex_timedlock(union __xeno_mutex __user *u_mx,
-                                    const struct timespec __user *u_ts)
-{
-       DECLARE_CB_LOCK_FLAGS(s);
-       union __xeno_mutex mx;
-       struct timespec ts;
-       int err;
-
-       if (__xn_safe_copy_from_user(&mx.shadow_mutex,
-                                    &u_mx->shadow_mutex,
-                                    sizeof(mx.shadow_mutex)))
-               return -EFAULT;
-
-       if (__xn_safe_copy_from_user(&ts, u_ts, sizeof(ts)))
-               return -EFAULT;
-
-       if (unlikely(cb_try_read_lock(&mx.shadow_mutex.lock, s)))
-               return -EINVAL;
-
-       err = cobalt_mutex_timedlock_break(&mx.shadow_mutex,
-                                         1, ts2ns(&ts) + 1);
-
-       cb_read_unlock(&mx.shadow_mutex.lock, s);
-
-       if (err == 0 &&
-           __xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
-                                  &mx.shadow_mutex.lockcnt,
-                                  sizeof(u_mx->shadow_mutex.lockcnt)))
-               return -EFAULT;
-
-       return err;
-}
-
-static int __pthread_mutex_trylock(union __xeno_mutex __user *u_mx)
-{
-       union __xeno_mutex mx;
-       int err;
-
-       if (__xn_safe_copy_from_user(&mx.shadow_mutex,
-                                    &u_mx->shadow_mutex,
-                                    sizeof(mx.shadow_mutex)))
-               return -EFAULT;
-
-       err = pthread_mutex_trylock(&mx.native_mutex);
-       if (err == 0 &&
-           __xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
-                                  &mx.shadow_mutex.lockcnt,
-                                  sizeof(u_mx->shadow_mutex.lockcnt)))
-               return -EFAULT;
-
-       return -err;
-}
-
-static int __pthread_mutex_unlock(union __xeno_mutex __user *u_mx)
-{
-       xnthread_t *cur = xnpod_current_thread();
-       struct __shadow_mutex *shadow;
-       DECLARE_CB_LOCK_FLAGS(s);
-       union __xeno_mutex mx;
-       cobalt_mutex_t *mutex;
-       int err;
-
-       if (xnpod_root_p())
-               return -EPERM;
-
-       if (__xn_safe_copy_from_user(&mx.shadow_mutex,
-                                    &u_mx->shadow_mutex,
-                                    sizeof(mx.shadow_mutex)))
-               return -EFAULT;
-
-       shadow = &mx.shadow_mutex;
-
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               return -EINVAL;
-
-       if (!cobalt_obj_active(shadow,
-                             COBALT_MUTEX_MAGIC, struct __shadow_mutex)) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       mutex = shadow->mutex;
-
-       err = (xnsynch_owner(&mutex->synchbase) == cur) ? 0 : -EPERM;
-       if (err)
-               goto out;
-
-       if (shadow->lockcnt > 1) {
-               /* Mutex is recursive */
-               --shadow->lockcnt;
-               cb_read_unlock(&shadow->lock, s);
-
-               if (__xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
-                                          &shadow->lockcnt,
-                                          sizeof(u_mx->shadow_mutex.lockcnt)))
-                       return -EFAULT;
-
-               return 0;
-       }
-
-       if (xnsynch_release(&mutex->synchbase))
-               xnpod_schedule();
-
-  out:
-       cb_read_unlock(&shadow->lock, s);
-
-       return err;
-}
-#else /* CONFIG_XENO_FASTSYNCH */
 static int __pthread_mutex_check_init(union __xeno_mutex __user *u_mx,
                                      const pthread_mutexattr_t __user *u_attr)
 {
@@ -1476,7 +1290,6 @@ static int __pthread_mutex_unlock(union __xeno_mutex 
__user *u_mx)
 
        return err;
 }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
 static int __pthread_condattr_init(pthread_condattr_t __user *u_attr)
 {
@@ -1648,13 +1461,13 @@ static int __pthread_cond_wait_prologue(union 
__xeno_cond __user *u_cnd,
                                     sizeof(mx.shadow_mutex)))
                return -EFAULT;
 
-#ifdef CONFIG_XENO_FASTSYNCH
-       cnd.shadow_cond.mutex_ownerp = mx.shadow_mutex.owner;
-       if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
-                                  &cnd.shadow_cond.mutex_ownerp,
-                                  sizeof(cnd.shadow_cond.mutex_ownerp)))
-               return -EFAULT;
-#endif /* CONFIG_XENO_FASTSYNCH */
+       if (!cnd.shadow_cond.cond->mutex) {
+               cnd.shadow_cond.mutex_ownerp = mx.shadow_mutex.owner;
+               if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
+                                          &cnd.shadow_cond.mutex_ownerp,
+                                          
sizeof(cnd.shadow_cond.mutex_ownerp)))
+                       return -EFAULT;
+       }
 
        if (timed) {
                err = __xn_safe_copy_from_user(&ts, u_ts, sizeof(ts))?EFAULT:0;
@@ -1672,7 +1485,6 @@ static int __pthread_cond_wait_prologue(union __xeno_cond 
__user *u_cnd,
                                                    &d.count,
                                                    timed, XN_INFINITE);
 
-#ifdef CONFIG_XENO_FASTSYNCH
        if (!cnd.shadow_cond.cond->mutex) {
                cnd.shadow_cond.mutex_ownerp = (xnarch_atomic_t *)~0UL;
                if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
@@ -1680,7 +1492,6 @@ static int __pthread_cond_wait_prologue(union __xeno_cond 
__user *u_cnd,
                                           
sizeof(cnd.shadow_cond.mutex_ownerp)))
                        return -EFAULT;
        }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        switch(err) {
        case 0:
@@ -1689,7 +1500,6 @@ static int __pthread_cond_wait_prologue(union __xeno_cond 
__user *u_cnd,
                err = -cobalt_cond_timedwait_epilogue(cur, &cnd.shadow_cond,
                                                    &mx.shadow_mutex, d.count);
 
-#ifdef CONFIG_XENO_FASTSYNCH
                if (!cnd.shadow_cond.cond->mutex) {
                        cnd.shadow_cond.mutex_ownerp = (xnarch_atomic_t *)~0UL;
                        if 
(__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
@@ -1697,7 +1507,6 @@ static int __pthread_cond_wait_prologue(union __xeno_cond 
__user *u_cnd,
                                                   
sizeof(cnd.shadow_cond.mutex_ownerp)))
                                return -EFAULT;
                }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
                if (err == 0 &&
                    __xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
@@ -1735,18 +1544,13 @@ static int __pthread_cond_wait_epilogue(union 
__xeno_cond __user *u_cnd,
 
        if (__xn_safe_copy_from_user(&mx.shadow_mutex,
                                     &u_mx->shadow_mutex,
-#ifdef CONFIG_XENO_FASTSYNCH
                                     offsetof(struct __shadow_mutex, lock)
-#else /* !CONFIG_XENO_FASTSYNCH */
-                                    sizeof(mx.shadow_mutex)
-#endif /* !CONFIG_XENO_FASTSYNCH */
                                     ))
                return -EFAULT;
 
        err = cobalt_cond_timedwait_epilogue(cur, &cnd.shadow_cond,
                                            &mx.shadow_mutex, count);
 
-#ifdef CONFIG_XENO_FASTSYNCH
        if (!cnd.shadow_cond.cond->mutex) {
                cnd.shadow_cond.mutex_ownerp = (xnarch_atomic_t *)~0UL;
                if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
@@ -1754,7 +1558,6 @@ static int __pthread_cond_wait_epilogue(union __xeno_cond 
__user *u_cnd,
                                           
sizeof(cnd.shadow_cond.mutex_ownerp)))
                        return -EFAULT;
        }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        if (err == 0
            && __xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
diff --git a/lib/cobalt/cond.c b/lib/cobalt/cond.c
index df03c18..6521238 100644
--- a/lib/cobalt/cond.c
+++ b/lib/cobalt/cond.c
@@ -27,7 +27,6 @@
 
 extern int __cobalt_muxid;
 
-#ifdef CONFIG_XENO_FASTSYNCH
 #define COBALT_COND_MAGIC 0x86860505
 
 extern unsigned long xeno_sem_heap[2];
@@ -52,7 +51,6 @@ static xnarch_atomic_t *get_mutex_ownerp(struct __shadow_cond 
*shadow)
        return (xnarch_atomic_t *)(xeno_sem_heap[1]
                                   + shadow->mutex_ownerp_offset);
 }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
 int __wrap_pthread_condattr_init(pthread_condattr_t *attr)
 {
@@ -100,12 +98,10 @@ int __wrap_pthread_cond_init(pthread_cond_t * cond,
 
        err = -XENOMAI_SKINCALL2(__cobalt_muxid,
                                 __cobalt_cond_init, shadow, attr);
-#ifdef CONFIG_XENO_FASTSYNCH
        if (!err && !shadow->attr.pshared) {
                shadow->pending_signals = (unsigned long *)
                        (xeno_sem_heap[0] + shadow->pending_signals_offset);
        }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        return err;
 }
@@ -220,7 +216,6 @@ int __wrap_pthread_cond_signal(pthread_cond_t * cond)
 {
        struct __shadow_cond *shadow =
                &((union __xeno_cond *)cond)->shadow_cond;
-#ifdef CONFIG_XENO_FASTSYNCH
        unsigned long *pending_signals;
        xnarch_atomic_t *mutex_ownerp;
        xnhandle_t cur;
@@ -244,17 +239,12 @@ int __wrap_pthread_cond_signal(pthread_cond_t * cond)
        }
 
        return 0;
-#else /* !CONFIG_XENO_FASTSYNCH */
-       return -XENOMAI_SKINCALL1(__cobalt_muxid,
-                                 __cobalt_cond_signal, &_cond->shadow_cond);
-#endif /* !CONFIG_XENO_FASTSYNCH */
 }
 
 int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
 {
        struct __shadow_cond *shadow =
                &((union __xeno_cond *)cond)->shadow_cond;
-#ifdef CONFIG_XENO_FASTSYNCH
        unsigned long *pending_signals;
        xnarch_atomic_t *mutex_ownerp;
        xnhandle_t cur;
@@ -277,8 +267,4 @@ int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
        }
 
        return 0;
-#else /* !CONFIG_XENO_FASTSYNCH */
-       return -XENOMAI_SKINCALL1(__cobalt_muxid,
-                                 __cobalt_cond_broadcast, &_cond->shadow_cond);
-#endif /* !CONFIG_XENO_FASTSYNCH */
 }
diff --git a/lib/cobalt/mutex.c b/lib/cobalt/mutex.c
index 99fdce7..f2ae5ab 100644
--- a/lib/cobalt/mutex.c
+++ b/lib/cobalt/mutex.c
@@ -27,7 +27,6 @@
 
 extern int __cobalt_muxid;
 
-#ifdef CONFIG_XENO_FASTSYNCH
 #define COBALT_MUTEX_MAGIC (0x86860303)
 
 extern unsigned long xeno_sem_heap[2];
@@ -39,7 +38,6 @@ static xnarch_atomic_t *get_ownerp(struct __shadow_mutex 
*shadow)
 
        return (xnarch_atomic_t *)(xeno_sem_heap[1] + shadow->owner_offset);
 }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
 int __wrap_pthread_mutexattr_init(pthread_mutexattr_t *attr)
 {
@@ -99,7 +97,6 @@ int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
        struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
        int err;
 
-#ifdef CONFIG_XENO_FASTSYNCH
        if (unlikely(cb_try_read_lock(&shadow->lock, s)))
                goto checked;
 
@@ -112,17 +109,14 @@ int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
 
   checked:
        cb_force_write_lock(&shadow->lock, s);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        err = 
-XENOMAI_SKINCALL2(__cobalt_muxid,__cobalt_mutex_init,shadow,attr);
 
-#ifdef CONFIG_XENO_FASTSYNCH
        if (!shadow->attr.pshared)
                shadow->owner = (xnarch_atomic_t *)
                        (xeno_sem_heap[0] + shadow->owner_offset);
 
        cb_write_unlock(&shadow->lock, s);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        return err;
 }
@@ -149,7 +143,6 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
        struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
        int err;
 
-#ifdef CONFIG_XENO_FASTSYNCH
        unsigned long status;
        xnhandle_t cur;
 
@@ -200,16 +193,13 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
                                goto out;
                        }
                }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        do {
                err = 
XENOMAI_SKINCALL1(__cobalt_muxid,__cobalt_mutex_lock,shadow);
        } while (err == -EINTR);
 
-#ifdef CONFIG_XENO_FASTSYNCH
   out:
        cb_read_unlock(&shadow->lock, s);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        return -err;
 }
@@ -219,11 +209,9 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
        struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
-       int err;
-
-#ifdef CONFIG_XENO_FASTSYNCH
        unsigned long status;
        xnhandle_t cur;
+       int err;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
@@ -268,17 +256,14 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
                                goto out;
                        }
        }
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        do {
                err = XENOMAI_SKINCALL2(__cobalt_muxid,
                                        __cobalt_mutex_timedlock, shadow, to);
        } while (err == -EINTR);
 
-#ifdef CONFIG_XENO_FASTSYNCH
   out:
        cb_read_unlock(&shadow->lock, s);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        return -err;
 }
@@ -287,11 +272,9 @@ int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
        struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
-       int err;
-
-#ifdef CONFIG_XENO_FASTSYNCH
        xnarch_atomic_t *ownerp;
        xnhandle_t cur;
+       int err;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
@@ -335,17 +318,14 @@ int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
        goto out;
 
 do_syscall:
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        do {
                err = -XENOMAI_SKINCALL1(__cobalt_muxid,
                                         __cobalt_mutex_trylock, shadow);
        } while (err == EINTR);
 
-#ifdef CONFIG_XENO_FASTSYNCH
   out:
        cb_read_unlock(&shadow->lock, s);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        return err;
 }
@@ -354,11 +334,9 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
        struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
-       int err;
-
-#ifdef CONFIG_XENO_FASTSYNCH
        xnarch_atomic_t *ownerp;
        xnhandle_t cur;
+       int err;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
@@ -396,17 +374,14 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
        }
 
 do_syscall:
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        do {
                err = XENOMAI_SKINCALL1(__cobalt_muxid,
                                        __cobalt_mutex_unlock, shadow);
        } while (err == -EINTR);
 
-#ifdef CONFIG_XENO_FASTSYNCH
   out_err:
        cb_read_unlock(&shadow->lock, s);
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        return -err;
 }
diff --git a/lib/cobalt/printf.c b/lib/cobalt/printf.c
index 7fc988d..70e2713 100644
--- a/lib/cobalt/printf.c
+++ b/lib/cobalt/printf.c
@@ -85,12 +85,10 @@ static pthread_mutex_t buffer_lock;
 static pthread_cond_t printer_wakeup;
 static pthread_key_t buffer_key;
 static pthread_t printer_thread;
-#ifdef CONFIG_XENO_FASTSYNCH
 static xnarch_atomic_t *pool_bitmap;
 static unsigned pool_bitmap_len;
 static unsigned pool_buf_size;
 static unsigned long pool_start, pool_len;
-#endif /* CONFIG_XENO_FASTSYNCH */
 
 static void cleanup_buffer(struct print_buffer *buffer);
 static void print_buffers(void);
@@ -360,7 +358,6 @@ int rt_print_init(size_t buffer_size, const char 
*buffer_name)
                buffer = NULL;
        }
 
-#ifdef CONFIG_XENO_FASTSYNCH
        /* Find a free buffer in the pool */
        do {
                unsigned long bitmap;
@@ -388,7 +385,6 @@ int rt_print_init(size_t buffer_size, const char 
*buffer_name)
        buffer = (struct print_buffer *)(pool_start + j * pool_buf_size);
 
   not_found:
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        if (!buffer) {
                assert_nrt();
@@ -478,7 +474,6 @@ static void cleanup_buffer(struct print_buffer *buffer)
 
        pthread_mutex_unlock(&buffer_lock);
 
-#ifdef CONFIG_XENO_FASTSYNCH
        /* Return the buffer to the pool */
        {
                unsigned long old_bitmap, bitmap;
@@ -502,7 +497,6 @@ static void cleanup_buffer(struct print_buffer *buffer)
                return;
        }
   dofree:
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        pthread_mutex_lock(&buffer_lock);
 
@@ -677,7 +671,6 @@ static __attribute__ ((constructor)) void 
__rt_print_init(void)
        print_period.tv_sec  = period / 1000;
        print_period.tv_nsec = (period % 1000) * 1000000;
 
-#ifdef CONFIG_XENO_FASTSYNCH
        /* Fill the buffer pool */
        {
                unsigned buffers_count, i;
@@ -733,7 +726,6 @@ static __attribute__ ((constructor)) void 
__rt_print_init(void)
                }
        }
   done:
-#endif /* CONFIG_XENO_FASTSYNCH */
 
        pthread_mutex_init(&buffer_lock, NULL);
        pthread_key_create(&buffer_key, (void (*)(void*))cleanup_buffer);
diff --git a/lib/cobalt/sem_heap.c b/lib/cobalt/sem_heap.c
index 1db92e9..5d8b95c 100644
--- a/lib/cobalt/sem_heap.c
+++ b/lib/cobalt/sem_heap.c
@@ -81,12 +81,10 @@ static void unmap_on_fork(void)
           that access to these addresses will cause a segmentation
           fault.
        */
-#if defined(CONFIG_XENO_FASTSYNCH)
        void *addr = mmap((void *)xeno_sem_heap[PRIVATE],
                          private_hdesc.size, PROT_NONE,
                          MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
        if (addr != (void *)xeno_sem_heap[PRIVATE])
-#endif /* CONFIG_XENO_FASTSYNCH */
                munmap((void *)xeno_sem_heap[PRIVATE], private_hdesc.size);
        xeno_sem_heap[PRIVATE] = 0UL;
        init_private_heap = PTHREAD_ONCE_INIT;
diff --git a/lib/include/xeno_config.h.in b/lib/include/xeno_config.h.in
index cebf430..d9d6d50 100644
--- a/lib/include/xeno_config.h.in
+++ b/lib/include/xeno_config.h.in
@@ -33,9 +33,6 @@
 /* config */
 #undef CONFIG_XENO_DEFAULT_PERIOD
 
-/* config */
-#undef CONFIG_XENO_FASTSYNCH
-
 /* Host system alias */
 #undef CONFIG_XENO_HOST_STRING
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to