[Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners - v2

2008-09-05 Thread Jan Kiszka
Jan Kiszka wrote:
 To improve robustness of the fast mutex implementation in POSIX (and
 later on in native), it is better to track the mutex owner by handle
 instead of kernel object pointer. Therefore, this patch changes
 __xn_sys_current (xeno_set_current) so that it returns
 xnthread_handle(current_thread). It furthermore converts the POSIX mutex
 implementation to pick up and store the lock owner as handle in the
 kernel/user-shared mutex. Finally it ensures that all skins (except for
 RTDM which must not mess around with foreign skins anyway) add their
 threads to the registry so that at least anonymous handles are
 available.
 
 As the value stored in the mutex variable is now an integer, we can
 switch over to xnarch_atomic_t, removing all atomic_intptr users.
 
 
 [ PS: I wonder if we can merge xnmap and xnregistry one day. Maybe there
 is a smart way to combine best of both, overcoming the redundancy. ]

Rebased on top of the claimed-bit-race fix.

---
 include/asm-generic/bits/bind.h|9 -
 include/asm-generic/bits/current.h |5 +-
 include/nucleus/types.h|   13 +++
 ksrc/nucleus/shadow.c  |   17 -
 ksrc/skins/native/Kconfig  |1 
 ksrc/skins/native/task.c   |   14 +++-
 ksrc/skins/posix/Kconfig   |1 
 ksrc/skins/posix/cb_lock.h |   15 ++--
 ksrc/skins/posix/cond.c|   12 --
 ksrc/skins/posix/mutex.c   |   21 +++-
 ksrc/skins/posix/mutex.h   |   64 -
 ksrc/skins/posix/syscall.c |   11 +++---
 ksrc/skins/posix/thread.c  |   16 +
 ksrc/skins/psos+/Kconfig   |2 -
 ksrc/skins/rtai/Kconfig|1 
 ksrc/skins/rtai/task.c |   12 ++
 ksrc/skins/uitron/Kconfig  |1 
 ksrc/skins/uitron/task.c   |   11 ++
 ksrc/skins/vrtx/Kconfig|2 -
 ksrc/skins/vrtx/task.c |   20 ++-
 ksrc/skins/vxworks/Kconfig |2 -
 src/skins/posix/mutex.c|   27 ---
 22 files changed, 201 insertions(+), 76 deletions(-)

Index: b/include/asm-generic/bits/current.h
===
--- a/include/asm-generic/bits/current.h
+++ b/include/asm-generic/bits/current.h
@@ -2,14 +2,15 @@
 #define _XENO_ASM_GENERIC_CURRENT_H
 
 #include pthread.h
+#include nucleus/types.h
 
 extern pthread_key_t xeno_current_key;
 
 extern void xeno_set_current(void);
 
-static inline void *xeno_get_current(void)
+static inline xnhandle_t xeno_get_current(void)
 {
-   return pthread_getspecific(xeno_current_key);
+   return (xnhandle_t)pthread_getspecific(xeno_current_key);
 }
 
 #endif /* _XENO_ASM_GENERIC_CURRENT_H */
Index: b/include/nucleus/types.h
===
--- a/include/nucleus/types.h
+++ b/include/nucleus/types.h
@@ -61,6 +61,19 @@ typedef unsigned long xnhandle_t;
 
 #define XN_NO_HANDLE ((xnhandle_t)0)
 
+#define XN_HANDLE_SPARE0   ((xnhandle_t)0x1000)
+#define XN_HANDLE_SPARE1   ((xnhandle_t)0x2000)
+#define XN_HANDLE_SPARE2   ((xnhandle_t)0x4000)
+#define XN_HANDLE_SPARE3   ((xnhandle_t)0x8000)
+#define XN_HANDLE_SPARE_MASK   ((xnhandle_t)0xf000)
+
+#define xnhandle_mask_spare(handle)  ((handle)  ~XN_HANDLE_SPARE_MASK)
+#define xnhandle_test_spare(handle, bits)  (!!((handle)  (bits)))
+#define xnhandle_set_spare(handle, bits) \
+   do { (handle) |= (bits); } while (0)
+#define xnhandle_clear_spare(handle, bits) \
+   do { (handle) = ~(bits); } while (0)
+
 struct xnintr;
 
 typedef int (*xnisr_t)(struct xnintr *intr);
Index: b/ksrc/nucleus/shadow.c
===
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -52,6 +52,7 @@
 #include nucleus/trace.h
 #include nucleus/stat.h
 #include nucleus/sys_ppd.h
+#include nucleus/registry.h
 #include asm/xenomai/features.h
 #include asm/xenomai/syscall.h
 #include asm/xenomai/bits/shadow.h
@@ -1908,13 +1909,21 @@ static int xnshadow_sys_sem_heap(struct
return __xn_safe_copy_to_user(us_hinfo, hinfo, sizeof(*us_hinfo));
 }
 
+#ifdef CONFIG_XENO_OPT_REGISTRY
 static int xnshadow_sys_current(struct pt_regs *regs)
 {
-   xnthread_t * __user *us_current, *cur = xnshadow_thread(current);
-   us_current = (xnthread_t *__user *) __xn_reg_arg1(regs);
+   xnthread_t *cur = xnshadow_thread(current);
+   xnhandle_t __user *us_handle;
 
-   return __xn_safe_copy_to_user(us_current, cur, sizeof(*us_current));
+   if (!cur)
+   return -EPERM;
+
+   us_handle = (xnhandle_t __user *) __xn_reg_arg1(regs);
+
+   return __xn_safe_copy_to_user(us_handle, xnthread_handle(cur),
+ sizeof(*us_handle));
 }
+#endif /* CONFIG_XENO_OPT_REGISTRY */
 
 static xnsysent_t __systab[] = {
[__xn_sys_migrate] = 

Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners - v2

2008-09-05 Thread Gilles Chanteperdrix
Jan Kiszka wrote:
 @@ -230,18 +230,20 @@ static inline int mutex_save_count(xnthr
  
   mutex = shadow-mutex;
  
 - if (clear_claimed(xnarch_atomic_intptr_get(mutex-owner)) != cur)
 + if (clear_claimed(xnarch_atomic_get(mutex-owner)) !=
 + xnthread_handle(cur))
   return EPERM;
  
   *count_ptr = shadow-lockcnt;
  
 - if (likely(xnarch_atomic_intptr_cmpxchg(mutex-owner, cur, NULL) == 
 cur))
 + if (likely(xnarch_atomic_cmpxchg(mutex-owner, cur, XN_NO_HANDLE) ==
 +xnthread_handle(cur)))
   return 0;
  
   owner = xnsynch_wakeup_one_sleeper(mutex-synchbase);
 - xnarch_atomic_intptr_set
 - (mutex-owner,
 -  set_claimed(owner,xnsynch_nsleepers(mutex-synchbase)));
 + xnarch_atomic_set(mutex-owner,
 +   set_claimed(xnthread_handle(owner),
 +   xnsynch_nsleepers(mutex-synchbase)));

Ok. Can we avoid xnthread_handle() everywhere by storing its result in a
local variable and reusing this local variable, here and in the other
functions where xnthread_handle is used multiple times ?

-- 
 Gilles.

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners - v2

2008-09-05 Thread Jan Kiszka
Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 @@ -230,18 +230,20 @@ static inline int mutex_save_count(xnthr
  
  mutex = shadow-mutex;
  
 -if (clear_claimed(xnarch_atomic_intptr_get(mutex-owner)) != cur)
 +if (clear_claimed(xnarch_atomic_get(mutex-owner)) !=
 +xnthread_handle(cur))
  return EPERM;
  
  *count_ptr = shadow-lockcnt;
  
 -if (likely(xnarch_atomic_intptr_cmpxchg(mutex-owner, cur, NULL) == 
 cur))
 +if (likely(xnarch_atomic_cmpxchg(mutex-owner, cur, XN_NO_HANDLE) ==
 +   xnthread_handle(cur)))
  return 0;
  
  owner = xnsynch_wakeup_one_sleeper(mutex-synchbase);
 -xnarch_atomic_intptr_set
 -(mutex-owner,
 - set_claimed(owner,xnsynch_nsleepers(mutex-synchbase)));
 +xnarch_atomic_set(mutex-owner,
 +  set_claimed(xnthread_handle(owner),
 +  xnsynch_nsleepers(mutex-synchbase)));
 
 Ok. Can we avoid xnthread_handle() everywhere by storing its result in a
 local variable and reusing this local variable, here and in the other
 functions where xnthread_handle is used multiple times ?

True for 'cur' here (will fix), but the other case have already been
optimized as far as possible (i.e. within the respective scope).

Jan

-- 
Siemens AG, Corporate Technology, CT SE 2
Corporate Competence Center Embedded Linux

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners - v2

2008-09-05 Thread Gilles Chanteperdrix
Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 @@ -230,18 +230,20 @@ static inline int mutex_save_count(xnthr
  
 mutex = shadow-mutex;
  
 -   if (clear_claimed(xnarch_atomic_intptr_get(mutex-owner)) != cur)
 +   if (clear_claimed(xnarch_atomic_get(mutex-owner)) !=
 +   xnthread_handle(cur))
 return EPERM;
  
 *count_ptr = shadow-lockcnt;
  
 -   if (likely(xnarch_atomic_intptr_cmpxchg(mutex-owner, cur, NULL) == 
 cur))
 +   if (likely(xnarch_atomic_cmpxchg(mutex-owner, cur, XN_NO_HANDLE) ==
 +  xnthread_handle(cur)))
 return 0;
  
 owner = xnsynch_wakeup_one_sleeper(mutex-synchbase);
 -   xnarch_atomic_intptr_set
 -   (mutex-owner,
 -set_claimed(owner,xnsynch_nsleepers(mutex-synchbase)));
 +   xnarch_atomic_set(mutex-owner,
 + set_claimed(xnthread_handle(owner),
 + xnsynch_nsleepers(mutex-synchbase)));
 Ok. Can we avoid xnthread_handle() everywhere by storing its result in a
 local variable and reusing this local variable, here and in the other
 functions where xnthread_handle is used multiple times ?
 
 True for 'cur' here (will fix), but the other case have already been
 optimized as far as possible (i.e. within the respective scope).

I have found several other spots where xnthread_handle is called
multiple times in the same function. mutex_unlock comes to my mind.

-- 
 Gilles.

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners - v2

2008-09-05 Thread Jan Kiszka
Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 @@ -230,18 +230,20 @@ static inline int mutex_save_count(xnthr
  
mutex = shadow-mutex;
  
 -  if (clear_claimed(xnarch_atomic_intptr_get(mutex-owner)) != cur)
 +  if (clear_claimed(xnarch_atomic_get(mutex-owner)) !=
 +  xnthread_handle(cur))
return EPERM;
  
*count_ptr = shadow-lockcnt;
  
 -  if (likely(xnarch_atomic_intptr_cmpxchg(mutex-owner, cur, NULL) == 
 cur))
 +  if (likely(xnarch_atomic_cmpxchg(mutex-owner, cur, XN_NO_HANDLE) ==
 + xnthread_handle(cur)))
return 0;
  
owner = xnsynch_wakeup_one_sleeper(mutex-synchbase);
 -  xnarch_atomic_intptr_set
 -  (mutex-owner,
 -   set_claimed(owner,xnsynch_nsleepers(mutex-synchbase)));
 +  xnarch_atomic_set(mutex-owner,
 +set_claimed(xnthread_handle(owner),
 +xnsynch_nsleepers(mutex-synchbase)));
 Ok. Can we avoid xnthread_handle() everywhere by storing its result in a
 local variable and reusing this local variable, here and in the other
 functions where xnthread_handle is used multiple times ?
 True for 'cur' here (will fix), but the other case have already been
 optimized as far as possible (i.e. within the respective scope).
 
 I have found several other spots where xnthread_handle is called
 multiple times in the same function. mutex_unlock comes to my mind.

Please point me to the concrete spot. I went again through all
xnthread_handle instances in this patch, checking that they aren't
needed due to owner changes, but only found the one above. Specifically
pthread_mutex_unlock, pse51_mutex_unlock_internal and
__pthread_mutex_unlock have only a single reference or reference
different threads.

Jan

-- 
Siemens AG, Corporate Technology, CT SE 2
Corporate Competence Center Embedded Linux

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners - v2

2008-09-05 Thread Gilles Chanteperdrix
Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 @@ -230,18 +230,20 @@ static inline int mutex_save_count(xnthr
  
   mutex = shadow-mutex;
  
 - if (clear_claimed(xnarch_atomic_intptr_get(mutex-owner)) != cur)
 + if (clear_claimed(xnarch_atomic_get(mutex-owner)) !=
 + xnthread_handle(cur))
   return EPERM;
  
   *count_ptr = shadow-lockcnt;
  
 - if (likely(xnarch_atomic_intptr_cmpxchg(mutex-owner, cur, NULL) == 
 cur))
 + if (likely(xnarch_atomic_cmpxchg(mutex-owner, cur, XN_NO_HANDLE) ==
 +xnthread_handle(cur)))
   return 0;
  
   owner = xnsynch_wakeup_one_sleeper(mutex-synchbase);
 - xnarch_atomic_intptr_set
 - (mutex-owner,
 -  set_claimed(owner,xnsynch_nsleepers(mutex-synchbase)));
 + xnarch_atomic_set(mutex-owner,
 +   set_claimed(xnthread_handle(owner),
 +   xnsynch_nsleepers(mutex-synchbase)));
 Ok. Can we avoid xnthread_handle() everywhere by storing its result in a
 local variable and reusing this local variable, here and in the other
 functions where xnthread_handle is used multiple times ?
 True for 'cur' here (will fix), but the other case have already been
 optimized as far as possible (i.e. within the respective scope).
 I have found several other spots where xnthread_handle is called
 multiple times in the same function. mutex_unlock comes to my mind.
 
 Please point me to the concrete spot. I went again through all
 xnthread_handle instances in this patch, checking that they aren't
 needed due to owner changes, but only found the one above. Specifically
 pthread_mutex_unlock, pse51_mutex_unlock_internal and
 __pthread_mutex_unlock have only a single reference or reference
 different threads.

Ok. I had grepped xnthread_handle and found that it was used twice in
mutex_unlock, but did not checked if it was used for the same thread.


-- 
 Gilles.

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners

2008-09-02 Thread Gilles Chanteperdrix
Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 To improve robustness of the fast mutex implementation in POSIX (and
 later on in native), it is better to track the mutex owner by handle
 instead of kernel object pointer. Therefore, this patch changes
 __xn_sys_current (xeno_set_current) so that it returns
 xnthread_handle(current_thread). It furthermore converts the POSIX mutex
 implementation to pick up and store the lock owner as handle in the
 kernel/user-shared mutex. Finally it ensures that all skins (except for
 RTDM which must not mess around with foreign skins anyway) add their
 threads to the registry so that at least anonymous handles are
 available.

 As the value stored in the mutex variable is now an integer, we can
 switch over to xnarch_atomic_t, removing all atomic_intptr users.
 The current implementation allows RTDM threads to use POSIX skin
 mutexes. I do not see why this should change.
 Such mixup might technically be possible now. But there is neither a
 need nor does it make the resulting driver more portable. I don't want
 to introduce needless thread registration to RTDM just to cover
 theoretical use cases that should not exist in the first place.

 Nevertheless, some sanity check will have to be added to the
 to-be-written generic xnsynch support to catch missing thread handles.
 That make sense for checking future skin implementation as well.
 That is overhead in the hot path, whereas adding the thread registration
 takes place in a non hot path.
 
 It will be a debug check.

Ok. But the other points remain.

-- 
 Gilles.

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


[Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners

2008-09-01 Thread Jan Kiszka
To improve robustness of the fast mutex implementation in POSIX (and
later on in native), it is better to track the mutex owner by handle
instead of kernel object pointer. Therefore, this patch changes
__xn_sys_current (xeno_set_current) so that it returns
xnthread_handle(current_thread). It furthermore converts the POSIX mutex
implementation to pick up and store the lock owner as handle in the
kernel/user-shared mutex. Finally it ensures that all skins (except for
RTDM which must not mess around with foreign skins anyway) add their
threads to the registry so that at least anonymous handles are
available.

As the value stored in the mutex variable is now an integer, we can
switch over to xnarch_atomic_t, removing all atomic_intptr users.


[ PS: I wonder if we can merge xnmap and xnregistry one day. Maybe there
is a smart way to combine best of both, overcoming the redundancy. ]

---
 include/asm-generic/bits/bind.h|9 -
 include/asm-generic/bits/current.h |5 +-
 include/nucleus/types.h|   13 +++
 ksrc/nucleus/shadow.c  |   17 +++--
 ksrc/skins/native/Kconfig  |1 
 ksrc/skins/native/task.c   |   14 +++
 ksrc/skins/posix/Kconfig   |1 
 ksrc/skins/posix/cb_lock.h |   15 ++--
 ksrc/skins/posix/cond.c|   12 +++---
 ksrc/skins/posix/mutex.c   |   21 +++
 ksrc/skins/posix/mutex.h   |   66 +
 ksrc/skins/posix/syscall.c |   11 +++---
 ksrc/skins/posix/thread.c  |   16 
 ksrc/skins/psos+/Kconfig   |2 -
 ksrc/skins/rtai/Kconfig|1 
 ksrc/skins/rtai/task.c |   12 ++
 ksrc/skins/uitron/Kconfig  |1 
 ksrc/skins/uitron/task.c   |   11 ++
 ksrc/skins/vrtx/Kconfig|2 -
 ksrc/skins/vrtx/task.c |   20 ++-
 ksrc/skins/vxworks/Kconfig |2 -
 src/skins/posix/mutex.c|   27 +++
 22 files changed, 205 insertions(+), 74 deletions(-)

Index: b/include/asm-generic/bits/current.h
===
--- a/include/asm-generic/bits/current.h
+++ b/include/asm-generic/bits/current.h
@@ -2,14 +2,15 @@
 #define _XENO_ASM_GENERIC_CURRENT_H
 
 #include pthread.h
+#include nucleus/types.h
 
 extern pthread_key_t xeno_current_key;
 
 extern void xeno_set_current(void);
 
-static inline void *xeno_get_current(void)
+static inline xnhandle_t xeno_get_current(void)
 {
-   return pthread_getspecific(xeno_current_key);
+   return (xnhandle_t)pthread_getspecific(xeno_current_key);
 }
 
 #endif /* _XENO_ASM_GENERIC_CURRENT_H */
Index: b/include/nucleus/types.h
===
--- a/include/nucleus/types.h
+++ b/include/nucleus/types.h
@@ -61,6 +61,19 @@ typedef unsigned long xnhandle_t;
 
 #define XN_NO_HANDLE ((xnhandle_t)0)
 
+#define XN_HANDLE_SPARE0   ((xnhandle_t)0x1000)
+#define XN_HANDLE_SPARE1   ((xnhandle_t)0x2000)
+#define XN_HANDLE_SPARE2   ((xnhandle_t)0x4000)
+#define XN_HANDLE_SPARE3   ((xnhandle_t)0x8000)
+#define XN_HANDLE_SPARE_MASK   ((xnhandle_t)0xf000)
+
+#define xnhandle_mask_spare(handle)  ((handle)  ~XN_HANDLE_SPARE_MASK)
+#define xnhandle_test_spare(handle, bits)  (!!((handle)  (bits)))
+#define xnhandle_set_spare(handle, bits) \
+   do { (handle) |= (bits); } while (0)
+#define xnhandle_clear_spare(handle, bits) \
+   do { (handle) = ~(bits); } while (0)
+
 struct xnintr;
 
 typedef int (*xnisr_t)(struct xnintr *intr);
Index: b/ksrc/nucleus/shadow.c
===
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -52,6 +52,7 @@
 #include nucleus/trace.h
 #include nucleus/stat.h
 #include nucleus/sys_ppd.h
+#include nucleus/registry.h
 #include asm/xenomai/features.h
 #include asm/xenomai/syscall.h
 #include asm/xenomai/bits/shadow.h
@@ -1908,13 +1909,21 @@ static int xnshadow_sys_sem_heap(struct
return __xn_safe_copy_to_user(us_hinfo, hinfo, sizeof(*us_hinfo));
 }
 
+#ifdef CONFIG_XENO_OPT_REGISTRY
 static int xnshadow_sys_current(struct pt_regs *regs)
 {
-   xnthread_t * __user *us_current, *cur = xnshadow_thread(current);
-   us_current = (xnthread_t *__user *) __xn_reg_arg1(regs);
+   xnthread_t *cur = xnshadow_thread(current);
+   xnhandle_t __user *us_handle;
 
-   return __xn_safe_copy_to_user(us_current, cur, sizeof(*us_current));
+   if (!cur)
+   return -EPERM;
+
+   us_handle = (xnhandle_t __user *) __xn_reg_arg1(regs);
+
+   return __xn_safe_copy_to_user(us_handle, xnthread_handle(cur),
+ sizeof(*us_handle));
 }
+#endif /* CONFIG_XENO_OPT_REGISTRY */
 
 static xnsysent_t __systab[] = {
[__xn_sys_migrate] = {xnshadow_sys_migrate, __xn_exec_current},
@@ -1925,7 +1934,9 @@ static xnsysent_t 

Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners

2008-09-01 Thread Gilles Chanteperdrix
Jan Kiszka wrote:
 To improve robustness of the fast mutex implementation in POSIX (and
 later on in native), it is better to track the mutex owner by handle
 instead of kernel object pointer. Therefore, this patch changes
 __xn_sys_current (xeno_set_current) so that it returns
 xnthread_handle(current_thread). It furthermore converts the POSIX mutex
 implementation to pick up and store the lock owner as handle in the
 kernel/user-shared mutex. Finally it ensures that all skins (except for
 RTDM which must not mess around with foreign skins anyway) add their
 threads to the registry so that at least anonymous handles are
 available.
 
 As the value stored in the mutex variable is now an integer, we can
 switch over to xnarch_atomic_t, removing all atomic_intptr users.

The current implementation allows RTDM threads to use POSIX skin
mutexes. I do not see why this should change.

-- 
 Gilles.

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners

2008-09-01 Thread Jan Kiszka
Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 To improve robustness of the fast mutex implementation in POSIX (and
 later on in native), it is better to track the mutex owner by handle
 instead of kernel object pointer. Therefore, this patch changes
 __xn_sys_current (xeno_set_current) so that it returns
 xnthread_handle(current_thread). It furthermore converts the POSIX mutex
 implementation to pick up and store the lock owner as handle in the
 kernel/user-shared mutex. Finally it ensures that all skins (except for
 RTDM which must not mess around with foreign skins anyway) add their
 threads to the registry so that at least anonymous handles are
 available.

 As the value stored in the mutex variable is now an integer, we can
 switch over to xnarch_atomic_t, removing all atomic_intptr users.
 
 The current implementation allows RTDM threads to use POSIX skin
 mutexes. I do not see why this should change.

Such mixup might technically be possible now. But there is neither a
need nor does it make the resulting driver more portable. I don't want
to introduce needless thread registration to RTDM just to cover
theoretical use cases that should not exist in the first place.

Nevertheless, some sanity check will have to be added to the
to-be-written generic xnsynch support to catch missing thread handles.
That make sense for checking future skin implementation as well.

Jan

-- 
Siemens AG, Corporate Technology, CT SE 2
Corporate Competence Center Embedded Linux

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners

2008-09-01 Thread Gilles Chanteperdrix
Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 To improve robustness of the fast mutex implementation in POSIX (and
 later on in native), it is better to track the mutex owner by handle
 instead of kernel object pointer. Therefore, this patch changes
 __xn_sys_current (xeno_set_current) so that it returns
 xnthread_handle(current_thread). It furthermore converts the POSIX mutex
 implementation to pick up and store the lock owner as handle in the
 kernel/user-shared mutex. Finally it ensures that all skins (except for
 RTDM which must not mess around with foreign skins anyway) add their
 threads to the registry so that at least anonymous handles are
 available.

 As the value stored in the mutex variable is now an integer, we can
 switch over to xnarch_atomic_t, removing all atomic_intptr users.
 The current implementation allows RTDM threads to use POSIX skin
 mutexes. I do not see why this should change.
 
 Such mixup might technically be possible now. But there is neither a
 need nor does it make the resulting driver more portable. I don't want
 to introduce needless thread registration to RTDM just to cover
 theoretical use cases that should not exist in the first place.
 
 Nevertheless, some sanity check will have to be added to the
 to-be-written generic xnsynch support to catch missing thread handles.
 That make sense for checking future skin implementation as well.

That is overhead in the hot path, whereas adding the thread registration
takes place in a non hot path.

-- 
 Gilles.

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners

2008-09-01 Thread Jan Kiszka
Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 To improve robustness of the fast mutex implementation in POSIX (and
 later on in native), it is better to track the mutex owner by handle
 instead of kernel object pointer. Therefore, this patch changes
 __xn_sys_current (xeno_set_current) so that it returns
 xnthread_handle(current_thread). It furthermore converts the POSIX mutex
 implementation to pick up and store the lock owner as handle in the
 kernel/user-shared mutex. Finally it ensures that all skins (except for
 RTDM which must not mess around with foreign skins anyway) add their
 threads to the registry so that at least anonymous handles are
 available.

 As the value stored in the mutex variable is now an integer, we can
 switch over to xnarch_atomic_t, removing all atomic_intptr users.
 The current implementation allows RTDM threads to use POSIX skin
 mutexes. I do not see why this should change.
 Such mixup might technically be possible now. But there is neither a
 need nor does it make the resulting driver more portable. I don't want
 to introduce needless thread registration to RTDM just to cover
 theoretical use cases that should not exist in the first place.

 Nevertheless, some sanity check will have to be added to the
 to-be-written generic xnsynch support to catch missing thread handles.
 That make sense for checking future skin implementation as well.
 
 That is overhead in the hot path, whereas adding the thread registration
 takes place in a non hot path.

It will be a debug check.

Jan

-- 
Siemens AG, Corporate Technology, CT SE 2
Corporate Competence Center Embedded Linux

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH 2/9] Switch to handle-based fast mutex owners

2008-09-01 Thread Jan Kiszka
Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 To improve robustness of the fast mutex implementation in POSIX (and
 later on in native), it is better to track the mutex owner by handle
 instead of kernel object pointer. Therefore, this patch changes
 __xn_sys_current (xeno_set_current) so that it returns
 xnthread_handle(current_thread). It furthermore converts the POSIX mutex
 implementation to pick up and store the lock owner as handle in the
 kernel/user-shared mutex. Finally it ensures that all skins (except for
 RTDM which must not mess around with foreign skins anyway) add their
 threads to the registry so that at least anonymous handles are
 available.

 As the value stored in the mutex variable is now an integer, we can
 switch over to xnarch_atomic_t, removing all atomic_intptr users.
 The current implementation allows RTDM threads to use POSIX skin
 mutexes. I do not see why this should change.
 Such mixup might technically be possible now. But there is neither a
 need nor does it make the resulting driver more portable. I don't want
 to introduce needless thread registration to RTDM just to cover
 theoretical use cases that should not exist in the first place.

 Nevertheless, some sanity check will have to be added to the
 to-be-written generic xnsynch support to catch missing thread handles.
 That make sense for checking future skin implementation as well.
 That is overhead in the hot path, whereas adding the thread registration
 takes place in a non hot path.
 
 It will be a debug check.

On the other hand: Once we really have generic fast xnsynch support,
this discussion should become pointless - RTDM will use them for its own
mutexes as well. So let's look at this patch only for the current
situation where there is no need for RTDM threads to have a handle as
they must not mess with foreign skin objects.

Jan

-- 
Siemens AG, Corporate Technology, CT SE 2
Corporate Competence Center Embedded Linux

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core