Re: [Xenomai-core] Native: Fixing auto-cleanup

2009-10-20 Thread Philippe Gerum
On Sun, 2009-10-18 at 19:56 +0200, Jan Kiszka wrote:
 Philippe Gerum wrote:
  On Sun, 2009-10-18 at 14:54 +0200, Jan Kiszka wrote:
  Philippe Gerum wrote:
  On Fri, 2009-10-16 at 19:08 +0200, Jan Kiszka wrote:
  Hi,
 
  our automatic object cleanup on process termination is slightly broken
  for the native skin. The inline and macro magic behind
  __native_*_flush_rq() blindly calls rt_*_delete(), but that's not
  correct for mutexes (we can leak memory and/or corrupt the system heap),
  queues and heaps (we may leak shared heaps).
  Please elaborate regarding both queues and heaps (scenario).
  Master creates heap, slave binds to it, master wants to terminate (or is
  killed, doesn't matter), heap cannot be released as the slave is still
  bound to it, slave terminates but heap object is still reserved on the
  main heap = memory leak (just confirmed with a test case).
  
  This fixes it:
  
  diff --git a/ksrc/skins/native/heap.c b/ksrc/skins/native/heap.c
  index 0a24735..0fcb3c2 100644
  --- a/ksrc/skins/native/heap.c
  +++ b/ksrc/skins/native/heap.c
  @@ -340,6 +340,11 @@ static void __heap_post_release(struct xnheap *h)
  xnpod_schedule();
   
 
 + xeno_mark_deleted(heap);
 

Actually, we need more than this:

diff --git a/ksrc/skins/native/heap.c b/ksrc/skins/native/heap.c
index 0a24735..5d43fa7 100644
--- a/ksrc/skins/native/heap.c
+++ b/ksrc/skins/native/heap.c
@@ -323,6 +323,7 @@ int rt_heap_create(RT_HEAP *heap, const char *name, size_t 
heapsize, int mode)
 static void __heap_post_release(struct xnheap *h)
 {
RT_HEAP *heap = container_of(h, RT_HEAP, heap_base);
+   int resched;
spl_t s;
 
xnlock_get_irqsave(nklock, s);
@@ -332,14 +333,24 @@ static void __heap_post_release(struct xnheap *h)
if (heap-handle)
xnregistry_remove(heap-handle);
 
-   if (xnsynch_destroy(heap-synch_base) == XNSYNCH_RESCHED)
+   xeno_mark_deleted(heap);
+
+   resched = xnsynch_destroy(heap-synch_base);
+
+   xnlock_put_irqrestore(nklock, s);
+
+#ifdef CONFIG_XENO_OPT_PERVASIVE
+   if (heap-cpid) {
+   heap-cpid = 0;
+   xnfree(heap);
+   }
+#endif
+   if (resched)
/*
 * Some task has been woken up as a result of the
 * deletion: reschedule now.
 */
xnpod_schedule();
-
-   xnlock_put_irqrestore(nklock, s);
 }
 
 /**
@@ -404,7 +415,7 @@ int rt_heap_delete_inner(RT_HEAP *heap, void __user 
*mapaddr)
 
/*
 * The heap descriptor has been marked as deleted before we
-* released the superlock thus preventing any sucessful
+* released the superlock thus preventing any successful
 * subsequent calls of rt_heap_delete(), so now we can
 * actually destroy it safely.
 */
diff --git a/ksrc/skins/native/queue.c b/ksrc/skins/native/queue.c
index 527bde8..35e292b 100644
--- a/ksrc/skins/native/queue.c
+++ b/ksrc/skins/native/queue.c
@@ -286,6 +286,7 @@ int rt_queue_create(RT_QUEUE *q,
 static void __queue_post_release(struct xnheap *heap)
 {
RT_QUEUE *q = container_of(heap, RT_QUEUE, bufpool);
+   int resched;
spl_t s;
 
xnlock_get_irqsave(nklock, s);
@@ -295,14 +296,24 @@ static void __queue_post_release(struct xnheap *heap)
if (q-handle)
xnregistry_remove(q-handle);
 
-   if (xnsynch_destroy(q-synch_base) == XNSYNCH_RESCHED)
+   xeno_mark_deleted(q);
+
+   resched = xnsynch_destroy(q-synch_base);
+
+   xnlock_put_irqrestore(nklock, s);
+
+#ifdef CONFIG_XENO_OPT_PERVASIVE
+   if (q-cpid) {
+   q-cpid = 0;
+   xnfree(q);
+   }
+#endif
+   if (resched)
/*
-* Some task has been woken up as a result of
-* the deletion: reschedule now.
+* Some task has been woken up as a result of the
+* deletion: reschedule now.
 */
xnpod_schedule();
-
-   xnlock_put_irqrestore(nklock, s);
 }
 
 /**
@@ -366,7 +377,7 @@ int rt_queue_delete_inner(RT_QUEUE *q, void __user *mapaddr)
 
/*
 * The queue descriptor has been marked as deleted before we
-* released the superlock thus preventing any sucessful
+* released the superlock thus preventing any successful
 * subsequent calls of rt_queue_delete(), so now we can
 * actually destroy the associated heap safely.
 */
diff --git a/ksrc/skins/native/syscall.c b/ksrc/skins/native/syscall.c
index 28c720e..a75ed3b 100644
--- a/ksrc/skins/native/syscall.c
+++ b/ksrc/skins/native/syscall.c
@@ -2073,24 +2073,17 @@ static int __rt_queue_delete(struct pt_regs *regs)
 {
RT_QUEUE_PLACEHOLDER ph;
RT_QUEUE *q;
-   int err;
 
if (__xn_safe_copy_from_user(ph, (void __user *)__xn_reg_arg1(regs),
 sizeof(ph)))
return -EFAULT;
 

Re: [Xenomai-core] Native: Fixing auto-cleanup

2009-10-20 Thread Jan Kiszka
Philippe Gerum wrote:
 Well, I can tell you that there are quite a few corner cases you would
 face in rewriting the queue/heap cleanup code. I'm not saying this
 should not be done, I just won't merge this to 2.5.0 to avoid more
 regressions. Let's fix the obvious for now, such as the missing
 descriptor deallocation in the post-release callback, and schedule a
 global cleanup refactoring for 2.5.1.

I would suggest to let us discuss this based on my patch series that I'm
currently testing (I also wanted to run a xenosim build test, but that
looks like futile effort - no chance to even get it configured on a
x86-64 host). Maybe you may want to cherry pick some of them, but maybe
they will show that there is enough benefit (== code simplification ==
bug risk reduction) to merge them all. Will be posted today.

Jan



signature.asc
Description: OpenPGP digital signature
___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH v2 2/3] nucleus: Include all heaps in statistics

2009-10-20 Thread Philippe Gerum
On Mon, 2009-10-19 at 21:09 +0200, Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
  Jan Kiszka wrote:
  @@ -234,12 +239,65 @@ int xnheap_init(xnheap_t *heap,
   
 appendq(heap-extents, extent-link);
   
  +  vsnprintf(heap-name, sizeof(heap-name), name, args);
  +
  +  spin_lock(heapq_lock);
  +  appendq(heapq, heap-stat_link);
  +  spin_unlock(heapq_lock);
  
  You can not use a Linux spinlock in xnheap_init and xnheap_destroy:
  - this breaks the build for the simulator;
  - callers of xnheap_init and xnheap_destroy are not guaranteed to run on
  the root domain.
 
 Oh, yes, unfortunately. That callers appear to be fixable, but that's
 probably not worth it at this point.

There is nothing to fix here. It's part of the service definition to be
able to call it from primary mode.

  I will have to rewrite
 heap_read_proc to break out of nklock frequently. Also not nice, but
 less invasive.
 
 Jan
 
 ___
 Xenomai-core mailing list
 Xenomai-core@gna.org
 https://mail.gna.org/listinfo/xenomai-core
-- 
Philippe.



___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


[Xenomai-core] [PATCH v3 8/9] native: Fix memory leak on heap/queue auto-deletion

2009-10-20 Thread Jan Kiszka
We are currently leaking user space heap/queue objects when the owner
terminates without deleting them before. Fix it by releasing the objects
in the corresponding cleanup callbacks which are also called on owner
termination.

Signed-off-by: Jan Kiszka jan.kis...@siemens.com
---

 ksrc/skins/native/heap.c|5 +
 ksrc/skins/native/queue.c   |5 +
 ksrc/skins/native/syscall.c |   25 ++---
 3 files changed, 16 insertions(+), 19 deletions(-)

diff --git a/ksrc/skins/native/heap.c b/ksrc/skins/native/heap.c
index f7411e8..886758c 100644
--- a/ksrc/skins/native/heap.c
+++ b/ksrc/skins/native/heap.c
@@ -341,6 +341,11 @@ static void __heap_post_release(struct xnheap *h)
xnpod_schedule();
 
xnlock_put_irqrestore(nklock, s);
+
+#ifdef CONFIG_XENO_OPT_PERVASIVE
+   if (heap-cpid)
+   xnfree(heap);
+#endif
 }
 
 /**
diff --git a/ksrc/skins/native/queue.c b/ksrc/skins/native/queue.c
index 3592a4a..249947a 100644
--- a/ksrc/skins/native/queue.c
+++ b/ksrc/skins/native/queue.c
@@ -303,6 +303,11 @@ static void __queue_post_release(struct xnheap *heap)
xnpod_schedule();
 
xnlock_put_irqrestore(nklock, s);
+
+#ifdef CONFIG_XENO_OPT_PERVASIVE
+   if (q-cpid)
+   xnfree(q);
+#endif
 }
 
 /**
diff --git a/ksrc/skins/native/syscall.c b/ksrc/skins/native/syscall.c
index 28c720e..cb9f075 100644
--- a/ksrc/skins/native/syscall.c
+++ b/ksrc/skins/native/syscall.c
@@ -2073,24 +2073,17 @@ static int __rt_queue_delete(struct pt_regs *regs)
 {
RT_QUEUE_PLACEHOLDER ph;
RT_QUEUE *q;
-   int err;
 
if (__xn_safe_copy_from_user(ph, (void __user *)__xn_reg_arg1(regs),
 sizeof(ph)))
return -EFAULT;
 
q = (RT_QUEUE *)xnregistry_fetch(ph.opaque);
-
if (!q)
-   err = -ESRCH;
-   else {
-   /* Callee will check the queue descriptor for validity again. */
-   err = rt_queue_delete_inner(q, (void __user *)ph.mapbase);
-   if (!err  q-cpid)
-   xnfree(q);
-   }
+   return -ESRCH;
 
-   return err;
+   /* Callee will check the queue descriptor for validity again. */
+   return rt_queue_delete_inner(q, (void __user *)ph.mapbase);
 }
 
 /*
@@ -2604,7 +2597,6 @@ static int __rt_heap_delete(struct pt_regs *regs)
 {
RT_HEAP_PLACEHOLDER ph;
RT_HEAP *heap;
-   int err;
 
if (__xn_safe_copy_from_user(ph, (void __user *)__xn_reg_arg1(regs),
 sizeof(ph)))
@@ -2613,15 +2605,10 @@ static int __rt_heap_delete(struct pt_regs *regs)
heap = (RT_HEAP *)xnregistry_fetch(ph.opaque);
 
if (!heap)
-   err = -ESRCH;
-   else {
-   /* Callee will check the heap descriptor for validity again. */
-   err = rt_heap_delete_inner(heap, (void __user *)ph.mapbase);
-   if (!err  heap-cpid)
-   xnfree(heap);
-   }
+   return -ESRCH;
 
-   return err;
+   /* Callee will check the heap descriptor for validity again. */
+   return rt_heap_delete_inner(heap, (void __user *)ph.mapbase);
 }
 
 /*


___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


[Xenomai-core] [PATCH v3 2/9] nucleus: Use Linux spin lock for heap list management

2009-10-20 Thread Jan Kiszka
No need for hard nklock protection of kheapq and the map counter, a
normal spin lock suffices as all users must run over the root thread
anyway.

Signed-off-by: Jan Kiszka jan.kis...@siemens.com
---

 ksrc/nucleus/heap.c |   26 --
 1 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/ksrc/nucleus/heap.c b/ksrc/nucleus/heap.c
index 9ca2591..9ba5227 100644
--- a/ksrc/nucleus/heap.c
+++ b/ksrc/nucleus/heap.c
@@ -951,8 +951,10 @@ EXPORT_SYMBOL_GPL(xnheap_check_block);
 #include linux/device.h
 #include linux/vmalloc.h
 #include linux/mm.h
+#include linux/spinlock.h
 
 static DEFINE_XNQUEUE(kheapq); /* Shared heap queue. */
+static DEFINE_SPINLOCK(kheapq_lock);
 
 static inline void *__alloc_and_reserve_heap(size_t size, int kmflags)
 {
@@ -1022,14 +1024,13 @@ static void __unreserve_and_free_heap(void *ptr, size_t 
size, int kmflags)
 static void xnheap_vmclose(struct vm_area_struct *vma)
 {
xnheap_t *heap = vma-vm_private_data;
-   spl_t s;
 
-   xnlock_get_irqsave(nklock, s);
+   spin_lock(kheapq_lock);
 
if (atomic_dec_and_test(heap-archdep.numaps)) {
if (heap-archdep.release) {
removeq(kheapq, heap-link);
-   xnlock_put_irqrestore(nklock, s);
+   spin_unlock(kheapq_lock);
__unreserve_and_free_heap(heap-archdep.heapbase,
  xnheap_extentsize(heap),
  heap-archdep.kmflags);
@@ -1038,7 +1039,7 @@ static void xnheap_vmclose(struct vm_area_struct *vma)
}
}
 
-   xnlock_put_irqrestore(nklock, s);
+   spin_unlock(kheapq_lock);
 }
 
 static struct vm_operations_struct xnheap_vmops = {
@@ -1068,9 +1069,8 @@ static int xnheap_ioctl(struct inode *inode,
 {
xnheap_t *heap;
int err = 0;
-   spl_t s;
 
-   xnlock_get_irqsave(nklock, s);
+   spin_lock(kheapq_lock);
 
heap = __validate_heap_addr((void *)arg);
 
@@ -1083,7 +1083,7 @@ static int xnheap_ioctl(struct inode *inode,
 
   unlock_and_exit:
 
-   xnlock_put_irqrestore(nklock, s);
+   spin_unlock(kheapq_lock);
 
return err;
 }
@@ -1148,7 +1148,6 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
 int xnheap_init_mapped(xnheap_t *heap, u_long heapsize, int memflags)
 {
void *heapbase;
-   spl_t s;
int err;
 
/* Caller must have accounted for internal overhead. */
@@ -1172,9 +1171,9 @@ int xnheap_init_mapped(xnheap_t *heap, u_long heapsize, 
int memflags)
heap-archdep.heapbase = heapbase;
heap-archdep.release = NULL;
 
-   xnlock_get_irqsave(nklock, s);
+   spin_lock(kheapq_lock);
appendq(kheapq, heap-link);
-   xnlock_put_irqrestore(nklock, s);
+   spin_unlock(kheapq_lock);
 
return 0;
 }
@@ -1184,20 +1183,19 @@ int xnheap_destroy_mapped(xnheap_t *heap, void 
(*release)(struct xnheap *heap),
 {
int ret = 0, ccheck;
unsigned long len;
-   spl_t s;
 
ccheck = mapaddr ? 1 : 0;
 
-   xnlock_get_irqsave(nklock, s);
+   spin_lock(kheapq_lock);
 
if (atomic_read(heap-archdep.numaps)  ccheck) {
heap-archdep.release = release;
-   xnlock_put_irqrestore(nklock, s);
+   spin_unlock(kheapq_lock);
return -EBUSY;
}
 
removeq(kheapq, heap-link); /* Prevent further mapping. */
-   xnlock_put_irqrestore(nklock, s);
+   spin_unlock(kheapq_lock);
 
len = xnheap_extentsize(heap);
 


___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


[Xenomai-core] [PATCH v3 0/9] heap setup/cleanup fixes, refactorings more

2009-10-20 Thread Jan Kiszka
Third round of this series. This time I dug deeper into the heap
management, trying to simplify its use which should remove a few race
conditions of the existing code.

The central change is patch 5: xnheap_destroy_mapped will no longer
return an error, it will always start deferred deletion in case the
heap's memory is still in use. This simplifies the use a lot, allowing
among other things to drop -EBUSY from the list of possible return codes
of rt_queue/heap_delete.

The series furthermore contains an attempt to fix RTAI's shm code, fixes
the know leakages of rt_mutex/queue/heap auto-deletion and introduces
complete heap statistics (this time without using a Linux spin lock).

Please pull the series (or cherry-pick individual patches) from

git://xenomai.org/xenomai-jki.git for-upstream

if there are no concerns.

Jan Kiszka (9):
  native: Release fastlock to the proper heap
  nucleus: Use Linux spin lock for heap list management
  nucleus: Fix race window in heap mapping procedure
  nucleus: xnheap_destroy does not fail
  nucleus: Avoid returning errors from xnheap_destroy_mapped
  rtai: Try to fix _shm_free
  native: Do not requeue on auto-cleanup errors
  native: Fix memory leak on heap/queue auto-deletion
  nucleus: Include all heaps in statistics

 include/asm-generic/bits/heap.h |2 +-
 include/asm-generic/system.h|2 +-
 include/native/ppd.h|   16 +--
 include/nucleus/heap.h  |   32 +++--
 ksrc/drivers/ipc/iddp.c |3 +-
 ksrc/drivers/ipc/xddp.c |6 +-
 ksrc/nucleus/heap.c |  258 +++
 ksrc/nucleus/module.c   |2 +-
 ksrc/nucleus/pod.c  |5 +-
 ksrc/nucleus/shadow.c   |5 +-
 ksrc/skins/native/heap.c|   41 +++---
 ksrc/skins/native/mutex.c   |   14 ++-
 ksrc/skins/native/pipe.c|4 +-
 ksrc/skins/native/queue.c   |   34 +++---
 ksrc/skins/native/syscall.c |   25 +---
 ksrc/skins/posix/shm.c  |4 +-
 ksrc/skins/psos+/rn.c   |6 +-
 ksrc/skins/rtai/shm.c   |   47 ---
 ksrc/skins/vrtx/heap.c  |6 +-
 ksrc/skins/vrtx/syscall.c   |3 +-
 20 files changed, 317 insertions(+), 198 deletions(-)

[1] http://thread.gmane.org/gmane.linux.real-time.xenomai.devel/6559

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


[Xenomai-core] [PATCH v3 7/9] native: Do not requeue on auto-cleanup errors

2009-10-20 Thread Jan Kiszka
Migrating an object to the global queue in case of an error during
deletion is racy and may paper over potential bugs. Now that the main
reason for this approach is no longer existing (rt_heap/queue_delete
will not return EBUSY anymore), replace the requeueing with basic
consistency checks.

Signed-off-by: Jan Kiszka jan.kis...@siemens.com
---

 include/native/ppd.h |   16 
 1 files changed, 4 insertions(+), 12 deletions(-)

diff --git a/include/native/ppd.h b/include/native/ppd.h
index 3dbda6a..c6e7479 100644
--- a/include/native/ppd.h
+++ b/include/native/ppd.h
@@ -101,19 +101,11 @@ static inline xeno_rholder_t *xeno_get_rholder(void)
xnlock_put_irqrestore(nklock, s);  \
obj = rlink2##__name(holder);   \
err = rt_##__name##_delete(obj);\
+   XENO_ASSERT(NATIVE, !err || err == -EIDRM, );   \
__xeno_trace_release(#__name, obj, err);\
-   if (unlikely(err)) {\
-   if ((__rq) != 
__native_global_rholder.__name##q) { \
-   xnlock_get_irqsave(nklock, s); \
-   nholder = popq((rq), holder);   \
-   
appendq(__native_global_rholder.__name##q, holder); \
-   obj-rqueue = 
__native_global_rholder.__name##q; \
-   }   \
-   } else {\
-   if (__release)  \
-   __xeno_release_obj(obj);\
-   xnlock_get_irqsave(nklock, s); \
-   }   \
+   if (!err  __release)  \
+   __xeno_release_obj(obj);\
+   xnlock_get_irqsave(nklock, s); \
}   \
xnlock_put_irqrestore(nklock, s);  \
} while(0)


___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


[Xenomai-core] [PATCH v3 5/9] nucleus: Avoid returning errors from xnheap_destroy_mapped

2009-10-20 Thread Jan Kiszka
Allowing xnheap_delete_mapped to return an error and then attempting to
recover from it does not work out very well: Corner cases are racy,
intransparent to the user, and proper error handling imposes a lot of
complexity on the caller - if it actually bothers to check the return
value...

Fortunately, there is no reason for this function to fail: If the heap
is still mapped, just install the provide cleanup handler and switch to
deferred removal. If the unmapping fails, we either raced with some
other caller of unmap or user space provided a bogus address, or
something else is wrong. In any case, leaving the cleanup callback
behind is the best we can do anyway.

Removing the return value immediately allows to simplify the callers,
namemly rt_queue_delete and rt_heap_delete.

Note: This is still not 100% waterproof. If we issue
xnheap_destroy_mapped from module cleanup passing a release handler
that belongs to the module text, deferred release will cause a crash.
But this corner case is no new regression, so let's keep the head in the
sand.

Signed-off-by: Jan Kiszka jan.kis...@siemens.com
---

 include/nucleus/heap.h|6 +++---
 ksrc/nucleus/heap.c   |   45 +++--
 ksrc/skins/native/heap.c  |   21 ++---
 ksrc/skins/native/queue.c |   21 ++---
 ksrc/skins/rtai/shm.c |6 +-
 5 files changed, 43 insertions(+), 56 deletions(-)

diff --git a/include/nucleus/heap.h b/include/nucleus/heap.h
index ca691bf..44db738 100644
--- a/include/nucleus/heap.h
+++ b/include/nucleus/heap.h
@@ -204,9 +204,9 @@ int xnheap_init_mapped(xnheap_t *heap,
   u_long heapsize,
   int memflags);
 
-int xnheap_destroy_mapped(xnheap_t *heap,
- void (*release)(struct xnheap *heap),
- void __user *mapaddr);
+void xnheap_destroy_mapped(xnheap_t *heap,
+  void (*release)(struct xnheap *heap),
+  void __user *mapaddr);
 
 #define xnheap_mapped_offset(heap,ptr) \
 (((caddr_t)(ptr)) - ((caddr_t)(heap)-archdep.heapbase))
diff --git a/ksrc/nucleus/heap.c b/ksrc/nucleus/heap.c
index 4958daa..96c46f8 100644
--- a/ksrc/nucleus/heap.c
+++ b/ksrc/nucleus/heap.c
@@ -1173,42 +1173,51 @@ int xnheap_init_mapped(xnheap_t *heap, u_long heapsize, 
int memflags)
return 0;
 }
 
-int xnheap_destroy_mapped(xnheap_t *heap, void (*release)(struct xnheap *heap),
- void __user *mapaddr)
+void xnheap_destroy_mapped(xnheap_t *heap,
+  void (*release)(struct xnheap *heap),
+  void __user *mapaddr)
 {
-   int ret = 0, ccheck;
unsigned long len;
 
-   ccheck = mapaddr ? 1 : 0;
+   /*
+* Trying to unmap user memory without providing a release handler for
+* deferred cleanup is a bug.
+*/
+   XENO_ASSERT(NUCLEUS, !mapaddr || release, /* nop */);
 
spin_lock(kheapq_lock);
 
-   if (heap-archdep.numaps  ccheck) {
-   heap-archdep.release = release;
-   spin_unlock(kheapq_lock);
-   return -EBUSY;
-   }
-
removeq(kheapq, heap-link); /* Prevent further mapping. */
+
+   heap-archdep.release = release;
+
+   if (heap-archdep.numaps == 0)
+   mapaddr = NULL; /* nothing left to unmap */
+   else
+   release = NULL; /* will be called by Linux on unmap */
+
spin_unlock(kheapq_lock);
 
len = xnheap_extentsize(heap);
 
if (mapaddr) {
down_write(current-mm-mmap_sem);
-   heap-archdep.release = NULL;
-   ret = do_munmap(current-mm, (unsigned long)mapaddr, len);
+   do_munmap(current-mm, (unsigned long)mapaddr, len);
up_write(current-mm-mmap_sem);
}
 
-   if (ret == 0) {
+   if (heap-archdep.numaps  0) {
+   /* The release handler is supposed to clean up the rest. */
+   XENO_ASSERT(NUCLEUS, heap-archdep.release, /* nop */);
+   return;
+   }
+
+   if (!mapaddr) {
__unreserve_and_free_heap(heap-archdep.heapbase, len,
  heap-archdep.kmflags);
if (release)
release(heap);
}
-
-   return ret;
 }
 
 static struct file_operations xnheap_fops = {
@@ -1260,11 +1269,11 @@ int xnheap_init_mapped(xnheap_t *heap, u_long heapsize, 
int memflags)
return -ENOMEM;
 }
 
-int xnheap_destroy_mapped(xnheap_t *heap, void (*release)(struct xnheap *heap),
- void __user *mapaddr)
+void xnheap_destroy_mapped(xnheap_t *heap,
+  void (*release)(struct xnheap *heap),
+  void __user *mapaddr)
 {
xnheap_destroy(heap, xnheap_free_extent, NULL);
-   return 0;
 }
 #endif /* !CONFIG_XENO_OPT_PERVASIVE */
 
diff --git 

[Xenomai-core] [PATCH v3 3/9] nucleus: Fix race window in heap mapping procedure

2009-10-20 Thread Jan Kiszka
There is a race window between setting the heap address via the IOCTL
and actually mapping it. A heap that is invalidated after the IOCTL can
still be mapped by user space. Fix this by pushing the heap validation
into xnheap_mmap.

Moreover, make sure that we update archdep.numaps along with declaring
the heap valid. Otherwise xnheap_destroy_mapped may draw wrong
conclusions about the heap mapping state.

As archdep.numaps is now exclusively modified under heapq_lock, we can
switch it to a non-atomic counter.

Signed-off-by: Jan Kiszka jan.kis...@siemens.com
---

 include/asm-generic/bits/heap.h |2 +
 include/asm-generic/system.h|2 +
 ksrc/nucleus/heap.c |   55 +++
 ksrc/skins/native/heap.c|7 +++--
 4 files changed, 33 insertions(+), 33 deletions(-)

diff --git a/include/asm-generic/bits/heap.h b/include/asm-generic/bits/heap.h
index 40d31c8..4d0c41a 100644
--- a/include/asm-generic/bits/heap.h
+++ b/include/asm-generic/bits/heap.h
@@ -27,7 +27,7 @@
 static inline void xnarch_init_heapcb (xnarch_heapcb_t *hcb)
 
 {
-atomic_set(hcb-numaps,0);
+hcb-numaps = 0;
 hcb-kmflags = 0;
 hcb-heapbase = NULL;
 }
diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h
index fcec613..c0f1698 100644
--- a/include/asm-generic/system.h
+++ b/include/asm-generic/system.h
@@ -270,7 +270,7 @@ struct xnheap;
 
 typedef struct xnarch_heapcb {
 
-   atomic_t numaps;/* # of active user-space mappings. */
+   unsigned long numaps;   /* # of active user-space mappings. */
int kmflags;/* Kernel memory flags (0 if vmalloc()). */
void *heapbase; /* Shared heap memory base. */
void (*release)(struct xnheap *heap); /* callback upon last unmap */
diff --git a/ksrc/nucleus/heap.c b/ksrc/nucleus/heap.c
index 9ba5227..0a9bfdf 100644
--- a/ksrc/nucleus/heap.c
+++ b/ksrc/nucleus/heap.c
@@ -1027,7 +1027,7 @@ static void xnheap_vmclose(struct vm_area_struct *vma)
 
spin_lock(kheapq_lock);
 
-   if (atomic_dec_and_test(heap-archdep.numaps)) {
+   if (--heap-archdep.numaps == 0) {
if (heap-archdep.release) {
removeq(kheapq, heap-link);
spin_unlock(kheapq_lock);
@@ -1067,31 +1067,15 @@ static inline xnheap_t *__validate_heap_addr(void *addr)
 static int xnheap_ioctl(struct inode *inode,
struct file *file, unsigned int cmd, unsigned long arg)
 {
-   xnheap_t *heap;
-   int err = 0;
-
-   spin_lock(kheapq_lock);
-
-   heap = __validate_heap_addr((void *)arg);
-
-   if (!heap) {
-   err = -EINVAL;
-   goto unlock_and_exit;
-   }
-
-   file-private_data = heap;
-
-  unlock_and_exit:
-
-   spin_unlock(kheapq_lock);
-
-   return err;
+   file-private_data = (void *)arg;
+   return 0;
 }
 
 static int xnheap_mmap(struct file *file, struct vm_area_struct *vma)
 {
unsigned long offset, size, vaddr;
xnheap_t *heap;
+   int err;
 
if (vma-vm_ops != NULL || file-private_data == NULL)
/* Caller should mmap() once for a given file instance, after
@@ -1103,21 +1087,34 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
 
offset = vma-vm_pgoff  PAGE_SHIFT;
size = vma-vm_end - vma-vm_start;
-   heap = (xnheap_t *)file-private_data;
 
+   spin_lock(kheapq_lock);
+
+   heap = __validate_heap_addr(file-private_data);
+   if (!heap) {
+   spin_unlock(kheapq_lock);
+   return -EINVAL;
+   }
+
+   heap-archdep.numaps++;
+
+   spin_unlock(kheapq_lock);
+
+   err = -ENXIO;
if (offset + size  xnheap_extentsize(heap))
-   return -ENXIO;
+   goto deref_out;
 
if (countq(heap-extents)  1)
/* Cannot map multi-extent heaps, we need the memory
   area we map from to be contiguous. */
-   return -ENXIO;
+   goto deref_out;
 
vma-vm_ops = xnheap_vmops;
vma-vm_private_data = file-private_data;
 
vaddr = (unsigned long)heap-archdep.heapbase + offset;
 
+   err = -EAGAIN;
if ((heap-archdep.kmflags  ~XNHEAP_GFP_NONCACHED) == 0) {
unsigned long maddr = vma-vm_start;
 
@@ -1126,7 +1123,7 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
 
while (size  0) {
if (xnarch_remap_vm_page(vma, maddr, vaddr))
-   return -EAGAIN;
+   goto deref_out;
 
maddr += PAGE_SIZE;
vaddr += PAGE_SIZE;
@@ -1136,13 +1133,15 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
  vma-vm_start,
  virt_to_phys((void *)vaddr),

[Xenomai-core] [PATCH v3 1/9] native: Release fastlock to the proper heap

2009-10-20 Thread Jan Kiszka
Don't assume rt_task_delete is only called for in-kernel users, it may
be triggered via auto-cleanup also on user space objects. So check for
the creator and release the fastlock to the correct heap.

Signed-off-by: Jan Kiszka jan.kis...@siemens.com
---

 ksrc/skins/native/mutex.c |   14 --
 1 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/ksrc/skins/native/mutex.c b/ksrc/skins/native/mutex.c
index 20eb484..6cf7eb1 100644
--- a/ksrc/skins/native/mutex.c
+++ b/ksrc/skins/native/mutex.c
@@ -47,6 +47,7 @@
 #include nucleus/pod.h
 #include nucleus/registry.h
 #include nucleus/heap.h
+#include nucleus/sys_ppd.h
 #include native/task.h
 #include native/mutex.h
 
@@ -316,8 +317,17 @@ int rt_mutex_delete(RT_MUTEX *mutex)
err = rt_mutex_delete_inner(mutex);
 
 #ifdef CONFIG_XENO_FASTSYNCH
-   if (!err)
-   xnfree(mutex-synch_base.fastlock);
+   if (!err) {
+#ifdef CONFIG_XENO_OPT_PERVASIVE
+   if (mutex-cpid) {
+   int global = xnsynch_test_flags(mutex-synch_base,
+   RT_MUTEX_EXPORTED);
+   xnheap_free(xnsys_ppd_get(global)-sem_heap,
+   mutex-synch_base.fastlock);
+   } else
+#endif /* CONFIG_XENO_OPT_PERVASIVE */
+   xnfree(mutex-synch_base.fastlock);
+   }
 #endif /* CONFIG_XENO_FASTSYNCH */
 
return err;


___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


[Xenomai-core] [PATCH v3 6/9] rtai: Try to fix _shm_free

2009-10-20 Thread Jan Kiszka
This is totally untested but should not make things worse than they
already are.

Signed-off-by: Jan Kiszka jan.kis...@siemens.com
---

 ksrc/skins/rtai/shm.c |   31 +++
 1 files changed, 19 insertions(+), 12 deletions(-)

diff --git a/ksrc/skins/rtai/shm.c b/ksrc/skins/rtai/shm.c
index 4c56495..21c3b07 100644
--- a/ksrc/skins/rtai/shm.c
+++ b/ksrc/skins/rtai/shm.c
@@ -260,19 +260,24 @@ void *rt_heap_open(unsigned long name, int size, int 
suprt)
return _shm_alloc(name, size, suprt, 0, opaque);
 }
 
-#ifndef CONFIG_XENO_OPT_PERVASIVE
+#ifdef CONFIG_XENO_OPT_PERVASIVE
+static void __heap_flush_shared(xnheap_t *heap)
+{
+   xnheap_free(kheap, heap);
+}
+#else /* !CONFIG_XENO_OPT_PERVASIVE */
 static void __heap_flush_private(xnheap_t *heap,
 void *heapmem, u_long heapsize, void *cookie)
 {
xnarch_free_host_mem(heapmem, heapsize);
 }
-#endif /* CONFIG_XENO_OPT_PERVASIVE */
+#endif /* !CONFIG_XENO_OPT_PERVASIVE */
 
 static int _shm_free(unsigned long name)
 {
-   int ret = 0;
xnholder_t *holder;
xnshm_a_t *p;
+   int ret;
spl_t s;
 
xnlock_get_irqsave(nklock, s);
@@ -283,27 +288,29 @@ static int _shm_free(unsigned long name)
p = link2shma(holder);
 
if (p-name == name  --p-ref == 0) {
+   removeq(xnshm_allocq, p-link);
if (p-handle)
xnregistry_remove(p-handle);
+
+   xnlock_put_irqrestore(nklock, s);
+
if (p-heap == kheap)
xnheap_free(kheap, p-chunk);
else {
-   /* Should release lock here? 
-* Can destroy_mapped suspend ?
-* [YES!]
-*/
 #ifdef CONFIG_XENO_OPT_PERVASIVE
-   xnheap_destroy_mapped(p-heap, NULL, NULL);
+   xnheap_destroy_mapped(p-heap,
+ __heap_flush_shared,
+ NULL);
 #else /* !CONFIG_XENO_OPT_PERVASIVE */
xnheap_destroy(p-heap,
   __heap_flush_private, NULL);
-#endif /* !CONFIG_XENO_OPT_PERVASIVE */
xnheap_free(kheap, p-heap);
+#endif /* !CONFIG_XENO_OPT_PERVASIVE */
}
-   removeq(xnshm_allocq, p-link);
ret = p-size;
xnheap_free(kheap, p);
-   break;
+
+   return ret;
}
 
holder = nextq(xnshm_allocq, holder);
@@ -311,7 +318,7 @@ static int _shm_free(unsigned long name)
 
xnlock_put_irqrestore(nklock, s);
 
-   return ret;
+   return 0;
 }
 
 int rt_shm_free(unsigned long name)


___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


[Xenomai-core] [PATCH v3 9/9] nucleus: Include all heaps in statistics

2009-10-20 Thread Jan Kiszka
This extends /proc/xenomai/heap with statistics about all currently used
heaps. It takes care to flush nklock while iterating of this potentially
long list.

Signed-off-by: Jan Kiszka jan.kis...@siemens.com
---

 include/nucleus/heap.h|   12 +++-
 ksrc/drivers/ipc/iddp.c   |3 +
 ksrc/drivers/ipc/xddp.c   |6 ++
 ksrc/nucleus/heap.c   |  131 -
 ksrc/nucleus/module.c |2 -
 ksrc/nucleus/pod.c|5 +-
 ksrc/nucleus/shadow.c |5 +-
 ksrc/skins/native/heap.c  |6 +-
 ksrc/skins/native/pipe.c  |4 +
 ksrc/skins/native/queue.c |6 +-
 ksrc/skins/posix/shm.c|4 +
 ksrc/skins/psos+/rn.c |6 +-
 ksrc/skins/rtai/shm.c |7 ++
 ksrc/skins/vrtx/heap.c|6 +-
 ksrc/skins/vrtx/syscall.c |3 +
 15 files changed, 169 insertions(+), 37 deletions(-)

diff --git a/include/nucleus/heap.h b/include/nucleus/heap.h
index 44db738..f653cd7 100644
--- a/include/nucleus/heap.h
+++ b/include/nucleus/heap.h
@@ -115,6 +115,10 @@ typedef struct xnheap {
 
XNARCH_DECL_DISPLAY_CONTEXT();
 
+   xnholder_t stat_link;   /* Link in heapq */
+
+   char name[48];
+
 } xnheap_t;
 
 extern xnheap_t kheap;
@@ -202,7 +206,8 @@ void xnheap_cleanup_proc(void);
 
 int xnheap_init_mapped(xnheap_t *heap,
   u_long heapsize,
-  int memflags);
+  int memflags,
+  const char *name, ...);
 
 void xnheap_destroy_mapped(xnheap_t *heap,
   void (*release)(struct xnheap *heap),
@@ -224,7 +229,10 @@ void xnheap_destroy_mapped(xnheap_t *heap,
 int xnheap_init(xnheap_t *heap,
void *heapaddr,
u_long heapsize,
-   u_long pagesize);
+   u_long pagesize,
+   const char *name, ...);
+
+void xnheap_set_name(xnheap_t *heap, const char *name, ...);
 
 void xnheap_destroy(xnheap_t *heap,
void (*flushfn)(xnheap_t *heap,
diff --git a/ksrc/drivers/ipc/iddp.c b/ksrc/drivers/ipc/iddp.c
index a407946..b6382f1 100644
--- a/ksrc/drivers/ipc/iddp.c
+++ b/ksrc/drivers/ipc/iddp.c
@@ -559,7 +559,8 @@ static int __iddp_bind_socket(struct rtipc_private *priv,
}
 
ret = xnheap_init(sk-privpool,
- poolmem, poolsz, XNHEAP_PAGE_SIZE);
+ poolmem, poolsz, XNHEAP_PAGE_SIZE,
+ ippd: %d, port);
if (ret) {
xnarch_free_host_mem(poolmem, poolsz);
goto fail;
diff --git a/ksrc/drivers/ipc/xddp.c b/ksrc/drivers/ipc/xddp.c
index f62147a..a5dafef 100644
--- a/ksrc/drivers/ipc/xddp.c
+++ b/ksrc/drivers/ipc/xddp.c
@@ -703,7 +703,7 @@ static int __xddp_bind_socket(struct rtipc_private *priv,
}
 
ret = xnheap_init(sk-privpool,
- poolmem, poolsz, XNHEAP_PAGE_SIZE);
+ poolmem, poolsz, XNHEAP_PAGE_SIZE, );
if (ret) {
xnarch_free_host_mem(poolmem, poolsz);
goto fail;
@@ -746,6 +746,10 @@ static int __xddp_bind_socket(struct rtipc_private *priv,
sk-minor = ret;
sa-sipc_port = ret;
sk-name = *sa;
+
+   if (poolsz  0)
+   xnheap_set_name(sk-bufpool, xddp: %d, sa-sipc_port);
+
/* Set default destination if unset at binding time. */
if (sk-peer.sipc_port  0)
sk-peer = *sa;
diff --git a/ksrc/nucleus/heap.c b/ksrc/nucleus/heap.c
index 96c46f8..793d1c5 100644
--- a/ksrc/nucleus/heap.c
+++ b/ksrc/nucleus/heap.c
@@ -76,6 +76,9 @@ EXPORT_SYMBOL_GPL(kheap);
 xnheap_t kstacks;  /* Private stack pool */
 #endif
 
+static DEFINE_XNQUEUE(heapq);  /* Heap list for /proc reporting */
+static unsigned long heapq_rev;
+
 static void init_extent(xnheap_t *heap, xnextent_t *extent)
 {
caddr_t freepage;
@@ -108,7 +111,7 @@ static void init_extent(xnheap_t *heap, xnextent_t *extent)
  */
 
 /*!
- * \fn xnheap_init(xnheap_t *heap,void *heapaddr,u_long heapsize,u_long 
pagesize)
+ * \fn xnheap_init(xnheap_t *heap,void *heapaddr,u_long heapsize,u_long 
pagesize,const char *name,...)
  * \brief Initialize a memory heap.
  *
  * Initializes a memory heap suitable for time-bounded allocation
@@ -145,6 +148,10 @@ static void init_extent(xnheap_t *heap, xnextent_t *extent)
  * best one for your needs. In the current implementation, pagesize
  * must be a power of two in the range [ 8 .. 32768 ] inclusive.
  *
+ * @param name Name displayed in statistic outputs. This parameter can
+ * be a format string, in which case succeeding parameters will be used
+ * to resolve the final name.
+ *
  * @return 0 is returned upon success, or one of the following error
  * codes:
  *
@@ -161,12 +168,13 @@ static void init_extent(xnheap_t *heap, xnextent_t 
*extent)
  * Rescheduling: never.
  */
 
-int 

Re: [Xenomai-core] [PATCH v2 2/3] nucleus: Include all heaps in statistics

2009-10-20 Thread Jan Kiszka
Philippe Gerum wrote:
 On Mon, 2009-10-19 at 21:09 +0200, Jan Kiszka wrote:
 Gilles Chanteperdrix wrote:
 Jan Kiszka wrote:
 @@ -234,12 +239,65 @@ int xnheap_init(xnheap_t *heap,
  
appendq(heap-extents, extent-link);
  
 +  vsnprintf(heap-name, sizeof(heap-name), name, args);
 +
 +  spin_lock(heapq_lock);
 +  appendq(heapq, heap-stat_link);
 +  spin_unlock(heapq_lock);
 You can not use a Linux spinlock in xnheap_init and xnheap_destroy:
 - this breaks the build for the simulator;
 - callers of xnheap_init and xnheap_destroy are not guaranteed to run on
 the root domain.
 Oh, yes, unfortunately. That callers appear to be fixable, but that's
 probably not worth it at this point.
 
 There is nothing to fix here. It's part of the service definition to be
 able to call it from primary mode.

Strictly spoken not. But given that xnheap_init_mapped does not fulfill
this promise and that quite a few users have an either-or use of this
tuple, it doesn't buy us much to allow primary mode.

Jan



signature.asc
Description: OpenPGP digital signature
___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PULL REQUEST] analogy renaming + many bugfixes

2009-10-20 Thread Philippe Gerum
On Tue, 2009-10-20 at 15:22 +0200, Philippe Gerum wrote:
 On Mon, 2009-10-19 at 23:18 +0200, Alexis Berlemont wrote:
  The following changes since commit 8c847c4bf43fa65c3ec541850ecdb7e96113e94f:
Philippe Gerum (1):
  powerpc: fix commit number for 2.6.30.3-DENX
  
  are available in the git repository at:
  
ssh+git://g...@xenomai.org/xenomai-abe.git analogy
  
 
 Please rebase on the current head first, the merge is unclean.
 

Ok, forget about this. The issue was that you changed your dev. branch.
I'm resyncing the trees appropriately.

  Alexis Berlemont (18):
Remove useless wrappers (comedi_copy_*_user())
Initialize the freshly allocated device's private area
Fix obvious typo mistake
Fix some error checkings in analog output command test function
Fix various minor bugs
Fix internal trigger via instruction (we do not need any data in the 
  instruction structure)
Add ai / ao trigger callback
Add a trigger instruction
Replace an info message by an error message
Fix a problem in the mite configuration (only for AI)
Add a missing EXPORT_SYMBOL() comedi_alloc_subd.
Align fake macro declarations with real functions declarations
Fix modules compilations issues
Comedi4RTDM - Analogy (first part)
Comedi4RTDM - Analogy (second part)
Comedi4RTDM - Analogy (third part, kernel side compiles)
Comedi4RTDM - Analogy (last part, user side compiles and runs)
Update *_alloc_subd() after bugfix backport from comedi branch
  
   Makefile.in|1 +
   aclocal.m4 |4 +-
   config/Makefile.in |1 +
   configure  | 5454 
  +++-
   configure.in   |6 +-
   doc/Makefile.in|1 +
   doc/docbook/Makefile.in|1 +
   doc/docbook/custom-stylesheets/Makefile.in |1 +
   doc/docbook/custom-stylesheets/xsl/Makefile.in |1 +
   .../custom-stylesheets/xsl/common/Makefile.in  |1 +
   doc/docbook/custom-stylesheets/xsl/fo/Makefile.in  |1 +
   .../custom-stylesheets/xsl/html/Makefile.in|1 +
   doc/docbook/xenomai/Makefile.in|1 +
   doc/doxygen/Makefile.in|1 +
   doc/man/Makefile.in|1 +
   doc/txt/Makefile.in|1 +
   include/Makefile.am|2 +-
   include/Makefile.in|3 +-
   include/{comedi = analogy}/Makefile.am|6 +-
   include/{comedi = analogy}/Makefile.in|   13 +-
   include/analogy/analogy.h  |  152 +
   .../comedi_driver.h = analogy/analogy_driver.h}   |   16 +-
   include/{comedi = analogy}/buffer.h   |  192 +-
   include/{comedi = analogy}/channel_range.h|  162 +-
   include/{comedi = analogy}/command.h  |   34 +-
   include/{comedi = analogy}/context.h  |   32 +-
   include/{comedi = analogy}/descriptor.h   |   28 +-
   include/{comedi = analogy}/device.h   |   58 +-
   include/{comedi = analogy}/driver.h   |   34 +-
   include/analogy/instruction.h  |  225 +
   include/{comedi = analogy}/ioctl.h|   40 +-
   include/analogy/os_facilities.h|  191 +
   include/analogy/subdevice.h|  271 +
   include/analogy/transfer.h |  105 +
   include/{comedi = analogy}/types.h|   14 +-
   include/asm-arm/Makefile.in|1 +
   include/asm-arm/bits/Makefile.in   |1 +
   include/asm-blackfin/Makefile.in   |1 +
   include/asm-blackfin/bits/Makefile.in  |1 +
   include/asm-generic/Makefile.in|1 +
   include/asm-generic/bits/Makefile.in   |1 +
   include/asm-nios2/Makefile.in  |1 +
   include/asm-nios2/bits/Makefile.in |1 +
   include/asm-powerpc/Makefile.in|1 +
   include/asm-powerpc/bits/Makefile.in   |1 +
   include/asm-sim/Makefile.in|1 +
   include/asm-sim/bits/Makefile.in   |1 +
   include/asm-x86/Makefile.in|1 +
   include/asm-x86/bits/Makefile.in   |1 +
   include/comedi/comedi.h|  151 -
   include/comedi/instruction.h   |  225 -
   include/comedi/os_facilities.h |  211 -
   include/comedi/subdevice.h |  271 -
   include/comedi/transfer.h

Re: [Xenomai-core] Enabling and disabling of all interrupts with priorities

2009-10-20 Thread Philippe Gerum
On Thu, 2009-10-15 at 16:33 -0400, Andreas Glatz wrote:
 Hi,
 
 Our legacy code has two functions to disable and enable interrupts going
 to the CPU core like cli() and sti() in the Linux kernel. Those function pairs
 are used to avoid interruptions in critical code sections by the scheduler
 or interrupts. Interruptions in those code sections can potentially cause 
 race conditions and the failing of the application.
 
 Xenomai doesn't have any equivalent API calls although those API
 calls should be easy to implement:
 

Assuming we are talking about userland API allowing to control the
interrupt state, Xenomai does not have any because it is wrong doing so
in the first place. You may not switch interrupts off from userland,
this won't work as expected.

 Assuming that the ISR Tasks have an priority of XNCORE_MAX_PRIO-1 
 (currently it is XNCORE_MAX_PRIO) we could protect critical code sections 
 by temporarily raising the priority of the task with the critical code 
 section 
 to XNCORE_MAX_PRIO.
 

What you have just described is a typical scheduler lock construct,
which is semantically a bad thing as well, since it prevents the RTOS to
care for priorities, but at least this would not introduce lethal bugs.
You may want to have a look at rt_task_set_mode(0, T_LOCK...), this is
likely what you need to emulate the legacy code construct, provided you
don't care for interrupt preemption, like it seems.

 Furthermore assuming that the critical code doesn't cause a switch to 
 secondary mode, no one prevents the critical code from running. After
 that code section the priority is lowered to the previous level.
 

The builtin scheduler lock support deals properly with execution modes.
Switching to secondary while holding the schedlock would not entail any
issue.

 As one cannot prevent ppl from writing critical code which causes a 
 mode switch, one can only check if a mode switch occured at the
 end of the critical sections and issue a warning if that was the case.
 
 I already have a version which works for me. 
 
 I have some questions though:
 
 1) If Priority -1 is assigned to Linux (ROOT) what is priority 0 for?

0 is part of the regular real-time priority scale, up to 257. See
include/nucleus/sched-rt.h in 2.5.

 2) For what are priorities 100...257 reserved?

RTOS emulators having a different priority scale than a posixish 0-99
range. E.g. VxWorks - 255, 0.

 3) Why did u pick the odd number of 257 as the max. priority?
 

257 is for the ISR server thread, guaranteed to be above all regular
thread priorities.

 I also found out that rt_task_set_priority() returns the old priority and not
 0 (as described in the documentation) if the call succeeds.

True. This doc was fixed in -head, but still needs to be backported to
2.4.

  If you issue
 rt_task_set_priority() in an ISR Task it even returns 257.
 

Changing the ISR server priority may not be a good idea though.

 Regards,
 Andreas 
 ___
 Xenomai-core mailing list
 Xenomai-core@gna.org
 https://mail.gna.org/listinfo/xenomai-core
-- 
Philippe.



___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] RFC: 2.5 todo list.

2009-10-20 Thread Philippe Gerum
On Thu, 2009-10-15 at 15:21 -0400, Andreas Glatz wrote:
 Hi Philippe,
 
 Back at work. See inline replies below.
 
 
  - powerpc32 updates for 2.6.30. Mainly to merge the once 
  experimental
  bits that prevent most alignment faults from triggering a secondary 
  mode
  switch. Andreas told me this works like a charm on 83xx, and I did 
  not
  see any issue on 52xx, 85xx or 86xx either.
 

 Can I get a version of that patch for testing? Is it in your git
 repository?
   
I just pushed this commit to my remote tree (ipipe-2.6.30-powerpc
branch); it should appear in a few hours once mirrored (cron job).
   
  
   I finally had a chance to test the ipipe-2.6.30-powerpc version
   from the git repository. Unfortunately, I noticed that our application
   dies after some time and that this behaviour is related to that
   alignment patch (if I take it out everything runs fine for  2 days).
  
   Currently I'm investigating the reasons for that crash. It has
   something to do with floating point registers not being restored
   properly. Our alignment exceptions are mainly triggered by accesses
   to unaligned floating point data.
  
  Does it work any better with this patch in?
 
 I applied that patch but unfortunately our application still dies. I included
 an application with with you (hopefully) can reproduce the problem 
 which we are seeing. Our system is a MPC8360 with 2.6.30, ipipe 2.7,
 xenomai 2.4.9.1.
 

Confirmed on lite52xx as well. Will work on this.

 Here are the steps:
 
 1) apply ipipe with alignment patch, recompile kernel
 2) comile test1.c (attached) with: 
 gcc -Wall -O2 `xeno-config --xeno-cflags` \
 `xeno-config --xeno-ldflags` \
  -l native -l rtdk  -o test1 test1.c
 3) Start test1 in one shell
 4) Open a second shell and start 'switchbench'
 
 Just when 'switchbench' is running, I get the following
 output from my test application:
 ...
 Missmatch: 0xfff882064000
 Missmatch: 0xfff882064000
 Missmatch: 0xfff882064000
 Missmatch: 0xfff882064000 
 ...
 
 It seems that test1 is interrupted right between
 the lfd and stfd and the floating point regs aren't
 restored properly.
 
 Just a side note: I had to add assembly code to my
 C program to force an alignment exception. gcc seems
 to be smart enough to avoid unaligned access. g++
 on the other hand (especially g++ 3.3.6 which we are 
 using) seems to generate assembly code which causes
 alignment exceptions. So it seems that it's really g++'s 
 fault. We also discovered that our g++ generates buggy 
 floating point assembly code when compiling with the 
 -O2 or -Os option. Currently, we compile our application
 with -msoft-float to avoid those issues which has the
 nice side effect that we also don't get alignment exceptions
 anymore.
 
 Many thanks,
 Andreas
 
 
  diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
  index 32cc3df..a04a5e3 100644
  --- a/arch/powerpc/kernel/process.c
  +++ b/arch/powerpc/kernel/process.c
  @@ -80,7 +80,9 @@ void flush_fp_to_thread(struct task_struct *tsk)
   * FPU, and then when we get scheduled again we would store
   * bogus values for the remaining FP registers.
   */
  -   ipipe_preempt_disable(flags);
  +   if (ipipe_root_domain_p)
  +   preempt_disable();
  +   local_irq_save_hw_cond(flags);
  if (tsk-thread.regs-msr  MSR_FP) {
   #ifdef CONFIG_SMP
  /*
  @@ -94,7 +96,9 @@ void flush_fp_to_thread(struct task_struct *tsk)
   #endif
  giveup_fpu(tsk);
  }
  -   ipipe_preempt_enable(flags);
  +   local_irq_restore_hw_cond(flags);
  +   if (ipipe_root_domain_p)
  +   preempt_enable();
  }
   }
-- 
Philippe.



___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] Some thoughts on Analogy kernel side framework

2009-10-20 Thread Alexis Berlemont
On Saturday 17 October 2009 19:55:41 Philippe Gerum wrote:
 On Mon, 2009-10-12 at 23:52 +0200, Alexis Berlemont wrote:
  On Monday 12 October 2009 11:30:11 you wrote:
   On Mon, 2009-10-12 at 00:51 +0200, Alexis Berlemont wrote:
On Friday 09 October 2009 10:04:07 you wrote:
 On Fri, 2009-10-09 at 00:00 +0200, Alexis Berlemont wrote:
  If I understand well, I have gone too far too soon; the idea
  should be to keep the current framework as it is for 2.5.
 
  However, my former mail was not a definitive plan. The first goal
  was to share ideas. So, do not worry too much. :)

 Maintaining two different frameworks for the same purpose won't
 fly. I'm worried because the future of Comedi/RTDM as merged in the
 2.5 tree just became unclear - to say the least - as from you
 introduced the idea of changing its architecture.

 Your potential user-base has to know whether using the current
 Comedi/RTDM framework for new designs based on 2.5.x will still
 make sense in six months from now, when Analogy eventually emerges.

 In other words, is there any upgrade path planned? What would this
 entail?

 Would one have to rewrite custom DAQ drivers to port them from
 Comedi/RTDM to Analogy, or could rely on a some compat wrapper for
 supporting legacy Comedi/RTDM Driver-Kernel interface on top of the
 Analogy core?

 Would that new architecture bring changes in the applications, i.e.
 what about the impact of such changes on the way userland
 interfaces to the acquisition framework and/or its drivers?

 I would have thought that Comedi/RTDM in the 2.5 tree would become
 Analogy as is, and evolve over time in a careful manner so that
 people always have a reasonable upgrade path. But now, I'm unsure
 whether this is going to be the case, or we would end up with two
 different frameworks. So, what is the _exact_ plan?
   
Ok. I will try to give my rough ideas.
   
Comedi / RTDM is facing many questions:
   
1) Peter Soetens, a Comedi user, once told us that the APIs (kernel
and user) divergences with upstream were going to bring many
troubles. maintaining tasks, difficulties to lure orginal Comedi
users, etc. - Should I ignore what was told that day or should I
find a way to satisfy the main request which was a smooth transition
between Comedi upstream and Comedi/RTDM.
  
   You don't seem to get the point yet: I'm in no way arguing about which
   technical direction you should head your project to, and I have no
   issue with your technical analysis.
  
   The issue I have is with your project management: Comedi/RTDM is
   currently part of the Xenomai tree, scheduled for 2.5 for more than a
   year, and you are now in the back to the future mode, asking people
   their feedback about major changes your would like to see in your
   infrastructure, at a moment when one may have expected it to be stable.
   This won't fly.
  
2) People developing with Comedilib do not seem to be the same guys
working on the drivers. So, even if common users agreed with porting
their applications on the new library interface, they could not
integrate their needed drivers into the Xenomai drivers set. If you
want a clue, have a look at the Comedi mailing list, there are plenty
of mails starting with Is there a Comedi driver for ...
- Should I still be confident that there will be contributions of
drivers ?
  
   If your point is about mentioning that OS implementers should provide a
   stable framework to build over, for others to implement drivers for
   their own devices, well, ok, I was vaguely aware of this; thanks for
   the heads up anyway. Problem is that you have just shot yourself in the
   foot, by tagging Comedi/RTDM has unstable and unusable in the context
   of developing drivers.
  
   If you tell people that you are about to rewrite the kernel side
   significantly before they had a chance to get their feet wet with it
   and consider basing their future work on it, you won't get any
   contribution, obviously. They will wait for your own dust to settle. Or
   they may not wait at all, and walk away.
  
   At any rate, deprecating the current Comedi/RTDM architecture now, only
   a few weeks before Xenomai 2.5 is rolled out, has pretty bad timing, to
   say the least.
  
3) The Comedi framework is too different from other well-known
frameworks. However, I consider that audio and multimedia cards are
not different from acquistion devices. All of them can be divided
into subdevices or components but neither v4l2 nor alsa did implement
subdevices configurations like Comedi did. v4l2 and alsa drivers are
working both with devices fops-like structures but Comedi is working
with per subdevices callbacks far from the Linux driver model. Etc.
- Should I leave our framework like it is ? Are we sure that the

Re: [Xenomai-core] [PATCH v3 9/9] nucleus: Include all heaps in statistics

2009-10-20 Thread Philippe Gerum
On Tue, 2009-10-20 at 13:37 +0200, Jan Kiszka wrote:
 This extends /proc/xenomai/heap with statistics about all currently used
 heaps. It takes care to flush nklock while iterating of this potentially
 long list.
 
 Signed-off-by: Jan Kiszka jan.kis...@siemens.com
 ---
 
  include/nucleus/heap.h|   12 +++-
  ksrc/drivers/ipc/iddp.c   |3 +
  ksrc/drivers/ipc/xddp.c   |6 ++
  ksrc/nucleus/heap.c   |  131 
 -
  ksrc/nucleus/module.c |2 -
  ksrc/nucleus/pod.c|5 +-
  ksrc/nucleus/shadow.c |5 +-
  ksrc/skins/native/heap.c  |6 +-
  ksrc/skins/native/pipe.c  |4 +
  ksrc/skins/native/queue.c |6 +-
  ksrc/skins/posix/shm.c|4 +
  ksrc/skins/psos+/rn.c |6 +-
  ksrc/skins/rtai/shm.c |7 ++
  ksrc/skins/vrtx/heap.c|6 +-
  ksrc/skins/vrtx/syscall.c |3 +
  15 files changed, 169 insertions(+), 37 deletions(-)
 
 diff --git a/include/nucleus/heap.h b/include/nucleus/heap.h
 index 44db738..f653cd7 100644
 --- a/include/nucleus/heap.h
 +++ b/include/nucleus/heap.h
 @@ -115,6 +115,10 @@ typedef struct xnheap {
  
   XNARCH_DECL_DISPLAY_CONTEXT();
  
 + xnholder_t stat_link;   /* Link in heapq */
 +
 + char name[48];

s,48,XNOBJECT_NAME_LEN

 +
  } xnheap_t;
  
  extern xnheap_t kheap;
 @@ -202,7 +206,8 @@ void xnheap_cleanup_proc(void);
  
  int xnheap_init_mapped(xnheap_t *heap,
  u_long heapsize,
 -int memflags);
 +int memflags,
 +const char *name, ...);
  

The va_list is handy, but this breaks the common pattern used throughout
the rest of the nucleus, based on passing pre-formatted labels. So
either we make all creation calls use va_lists (but xnthread would need
more work), or we make xnheap_init_mapped use the not-so-handy current
form.

Actually, providing xnheap_set_name() and a name parameter/va_list to
xnheap_init* is one too many, this clutters an inner interface
uselessly. The latter should go away, assuming that anon heaps may still
exist.

  void xnheap_destroy_mapped(xnheap_t *heap,
  void (*release)(struct xnheap *heap),
 @@ -224,7 +229,10 @@ void xnheap_destroy_mapped(xnheap_t *heap,
  int xnheap_init(xnheap_t *heap,
   void *heapaddr,
   u_long heapsize,
 - u_long pagesize);
 + u_long pagesize,
 + const char *name, ...);
 +
 +void xnheap_set_name(xnheap_t *heap, const char *name, ...);
  
  void xnheap_destroy(xnheap_t *heap,
   void (*flushfn)(xnheap_t *heap,
 diff --git a/ksrc/drivers/ipc/iddp.c b/ksrc/drivers/ipc/iddp.c
 index a407946..b6382f1 100644
 --- a/ksrc/drivers/ipc/iddp.c
 +++ b/ksrc/drivers/ipc/iddp.c
 @@ -559,7 +559,8 @@ static int __iddp_bind_socket(struct rtipc_private *priv,
   }
  
   ret = xnheap_init(sk-privpool,
 -   poolmem, poolsz, XNHEAP_PAGE_SIZE);
 +   poolmem, poolsz, XNHEAP_PAGE_SIZE,
 +   ippd: %d, port);
   if (ret) {
   xnarch_free_host_mem(poolmem, poolsz);
   goto fail;
 diff --git a/ksrc/drivers/ipc/xddp.c b/ksrc/drivers/ipc/xddp.c
 index f62147a..a5dafef 100644
 --- a/ksrc/drivers/ipc/xddp.c
 +++ b/ksrc/drivers/ipc/xddp.c
 @@ -703,7 +703,7 @@ static int __xddp_bind_socket(struct rtipc_private *priv,
   }
  
   ret = xnheap_init(sk-privpool,
 -   poolmem, poolsz, XNHEAP_PAGE_SIZE);
 +   poolmem, poolsz, XNHEAP_PAGE_SIZE, );
   if (ret) {
   xnarch_free_host_mem(poolmem, poolsz);
   goto fail;
 @@ -746,6 +746,10 @@ static int __xddp_bind_socket(struct rtipc_private *priv,
   sk-minor = ret;
   sa-sipc_port = ret;
   sk-name = *sa;
 +
 + if (poolsz  0)
 + xnheap_set_name(sk-bufpool, xddp: %d, sa-sipc_port);
 +
   /* Set default destination if unset at binding time. */
   if (sk-peer.sipc_port  0)
   sk-peer = *sa;
 diff --git a/ksrc/nucleus/heap.c b/ksrc/nucleus/heap.c
 index 96c46f8..793d1c5 100644
 --- a/ksrc/nucleus/heap.c
 +++ b/ksrc/nucleus/heap.c
 @@ -76,6 +76,9 @@ EXPORT_SYMBOL_GPL(kheap);
  xnheap_t kstacks;/* Private stack pool */
  #endif
  
 +static DEFINE_XNQUEUE(heapq);/* Heap list for /proc reporting */
 +static unsigned long heapq_rev;
 +
  static void init_extent(xnheap_t *heap, xnextent_t *extent)
  {
   caddr_t freepage;
 @@ -108,7 +111,7 @@ static void init_extent(xnheap_t *heap, xnextent_t 
 *extent)
   */
  
  /*!
 - * \fn xnheap_init(xnheap_t *heap,void *heapaddr,u_long heapsize,u_long 
 pagesize)
 + * \fn xnheap_init(xnheap_t *heap,void *heapaddr,u_long heapsize,u_long 
 pagesize,const char *name,...)
   * \brief Initialize a memory heap.
   *
   * Initializes a memory heap suitable