Module: xenomai-rpm
Branch: for-upstream
Commit: ada72009aa6a3734b65938b1c2abe00f5c278956
URL:    
http://git.xenomai.org/?p=xenomai-rpm.git;a=commit;h=ada72009aa6a3734b65938b1c2abe00f5c278956

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sun Jun 13 21:49:01 2010 +0200

nucleus: fix heap mapping for nommu

Despite the kernel sees a single backing device with direct mapping
capabilities on nommu targets (/dev/rtheap), we do map different heaps
through it, so we want a brand new mapping region for each of
them. Therefore, we must make sure to request mappings on
non-overlapping areas.

To this end, we request mappings from offsets representing the start
RAM address of the heap memory instead of mapping from offset 0 like
previously.  Otherwise, the kernel could match the same region for
different heaps, for all mmap() requests directed to /dev/rtheap which
appear to be a subset of a previous one, i.e. [offset=0, length <=
largest mapped size to date]. Which does happen most of the time.

Basically, this also means that shared heap mapping on nommu systems
has always been badly broken on all Xenomai releases prior to this
commit. Yeepeee.

For this reason, we do break the nommu ABI to introduce this fix
(i.e. blackfin and nios2), simply because the previous implementation
did not work at all.

---

 include/asm-blackfin/features.h |    2 +-
 include/asm-nios2/features.h    |    2 +-
 include/native/heap.h           |   14 +++------
 include/native/queue.h          |   14 +++------
 include/nucleus/heap.h          |   19 +++++++++++--
 include/vrtx/vrtx.h             |    8 ++---
 ksrc/nucleus/heap.c             |   53 +++++++++++++++++++++++++--------------
 ksrc/nucleus/shadow.c           |   13 ++++++---
 ksrc/skins/native/syscall.c     |    7 +++--
 ksrc/skins/psos+/syscall.c      |    2 +
 ksrc/skins/vrtx/syscall.c       |    4 +-
 src/skins/common/sem_heap.c     |   41 +++++++++++++++++------------
 src/skins/native/heap.c         |    9 +++++-
 src/skins/native/queue.c        |   10 ++++++-
 src/skins/psos+/rn.c            |    9 +++++-
 src/skins/rtai/shm.c            |    8 ++++-
 src/skins/vrtx/heap.c           |    8 ++++-
 src/skins/vrtx/pt.c             |    8 ++++-
 18 files changed, 145 insertions(+), 86 deletions(-)

diff --git a/include/asm-blackfin/features.h b/include/asm-blackfin/features.h
index 9dbee9c..c365c38 100644
--- a/include/asm-blackfin/features.h
+++ b/include/asm-blackfin/features.h
@@ -22,7 +22,7 @@
 #include <asm-generic/xenomai/features.h>
 
 /* The ABI revision level we use on this arch. */
-#define XENOMAI_ABI_REV   3UL
+#define XENOMAI_ABI_REV   4UL
 
 #define XENOMAI_FEAT_DEP  __xn_feat_generic_mask
 
diff --git a/include/asm-nios2/features.h b/include/asm-nios2/features.h
index eb4589a..534c052 100644
--- a/include/asm-nios2/features.h
+++ b/include/asm-nios2/features.h
@@ -36,7 +36,7 @@ static inline void collect_arch_features(struct xnfeatinfo 
*finfo)
 #endif
 
 /* The ABI revision level we use on this arch. */
-#define XENOMAI_ABI_REV   1UL
+#define XENOMAI_ABI_REV   2UL
 
 #define XENOMAI_FEAT_DEP  __xn_feat_generic_mask
 
diff --git a/include/native/heap.h b/include/native/heap.h
index 0c2a7a2..443fd82 100644
--- a/include/native/heap.h
+++ b/include/native/heap.h
@@ -58,15 +58,11 @@ typedef struct rt_heap_info {
 } RT_HEAP_INFO;
 
 typedef struct rt_heap_placeholder {
-
-    xnhandle_t opaque;
-
-    void *opaque2;
-
-    caddr_t mapbase;
-
-    size_t mapsize;
-
+       xnhandle_t opaque;
+       void *opaque2;
+       caddr_t mapbase;
+       size_t mapsize;
+       xnheap_area_decl();
 } RT_HEAP_PLACEHOLDER;
 
 #if defined(__KERNEL__) || defined(__XENO_SIM__)
diff --git a/include/native/queue.h b/include/native/queue.h
index 2951c42..b266c1d 100644
--- a/include/native/queue.h
+++ b/include/native/queue.h
@@ -58,15 +58,11 @@ typedef struct rt_queue_info {
 } RT_QUEUE_INFO;
 
 typedef struct rt_queue_placeholder {
-
-    xnhandle_t opaque;
-
-    void *opaque2;
-
-    caddr_t mapbase;
-
-    size_t mapsize;
-
+       xnhandle_t opaque;
+       void *opaque2;
+       caddr_t mapbase;
+       size_t mapsize;
+       xnheap_area_decl();
 } RT_QUEUE_PLACEHOLDER;
 
 #if defined(__KERNEL__) || defined(__XENO_SIM__)
diff --git a/include/nucleus/heap.h b/include/nucleus/heap.h
index f4ebe11..e837bf2 100644
--- a/include/nucleus/heap.h
+++ b/include/nucleus/heap.h
@@ -208,14 +208,17 @@ void xnheap_destroy_mapped(xnheap_t *heap,
                           void (*release)(struct xnheap *heap),
                           void __user *mapaddr);
 
+#define xnheap_base_memory(heap) \
+       ((caddr_t)(heap)->archdep.heapbase)
+
 #define xnheap_mapped_offset(heap,ptr) \
-(((caddr_t)(ptr)) - ((caddr_t)(heap)->archdep.heapbase))
+       (((caddr_t)(ptr)) - xnheap_base_memory(heap))
 
 #define xnheap_mapped_address(heap,off) \
-(((caddr_t)(heap)->archdep.heapbase) + (off))
+       (xnheap_base_memory(heap) + (off))
 
 #define xnheap_mapped_p(heap) \
-((heap)->archdep.heapbase != NULL)
+       (xnheap_base_memory(heap) != NULL)
 
 #endif /* __KERNEL__ */
 
@@ -279,9 +282,19 @@ int xnheap_check_block(xnheap_t *heap,
 
 #define XNHEAP_DEV_NAME  "/dev/rtheap"
 
+#ifdef CONFIG_MMU
+/* XXX: 2.5.x ABI preserved for MMU-enabled only. */
+#define xnheap_area_decl();
+#define xnheap_area_set(p, val)
+#else
+#define xnheap_area_decl()     unsigned long area
+#define xnheap_area_set(p, val)        (p)->area = (unsigned long)(val)
+#endif
+
 struct xnheap_desc {
        unsigned long handle;
        unsigned int size;
+       xnheap_area_decl();
 };
 
 #endif /* !_XENO_NUCLEUS_HEAP_H */
diff --git a/include/vrtx/vrtx.h b/include/vrtx/vrtx.h
index d43af35..a77d2d1 100644
--- a/include/vrtx/vrtx.h
+++ b/include/vrtx/vrtx.h
@@ -21,7 +21,7 @@
 #ifndef _XENO_VRTX_VRTX_H
 #define _XENO_VRTX_VRTX_H
 
-#include <nucleus/types.h>
+#include <nucleus/heap.h>
 
 #define VRTX_SKIN_MAGIC    0x56525458
 #define VRTX_SKIN_VERSION  6
@@ -67,19 +67,17 @@ typedef struct _TCB {
 } TCB;
 
 typedef struct _vrtx_hdesc {
-
        int hid;
        void *hcb;
        size_t hsize;
-
+       xnheap_area_decl();
 } vrtx_hdesc_t;
 
 typedef struct _vrtx_pdesc {
-
        int pid;
        void *ptcb;
        size_t ptsize;
-
+       xnheap_area_decl();
 } vrtx_pdesc_t;
 
 #if defined(__KERNEL__) || defined(__XENO_SIM__)
diff --git a/ksrc/nucleus/heap.c b/ksrc/nucleus/heap.c
index afcc4f8..f1d2c7b 100644
--- a/ksrc/nucleus/heap.c
+++ b/ksrc/nucleus/heap.c
@@ -1136,8 +1136,8 @@ static int xnheap_ioctl(struct inode *inode,
 static int xnheap_mmap(struct file *file, struct vm_area_struct *vma)
 {
        unsigned long offset, size, vaddr;
-       xnheap_t *heap;
-       int err;
+       struct xnheap *heap;
+       int ret;
 
        if (vma->vm_ops != NULL || file->private_data == NULL)
                /* Caller should mmap() once for a given file instance, after
@@ -1147,9 +1147,6 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
        if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
                return -EINVAL; /* COW unsupported. */
 
-       offset = vma->vm_pgoff << PAGE_SHIFT;
-       size = vma->vm_end - vma->vm_start;
-
        spin_lock(&kheapq_lock);
 
        heap = __validate_heap_addr(file->private_data);
@@ -1163,22 +1160,28 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
        spin_unlock(&kheapq_lock);
 
        vma->vm_private_data = file->private_data;
-
-       err = -ENXIO;
-       if (offset + size > xnheap_extentsize(heap))
-               goto deref_out;
+       vma->vm_ops = &xnheap_vmops;
+       size = vma->vm_end - vma->vm_start;
+       ret = -ENXIO;
 
        if (countq(&heap->extents) > 1)
                /* Cannot map multi-extent heaps, we need the memory
                   area we map from to be contiguous. */
                goto deref_out;
 
-       vma->vm_ops = &xnheap_vmops;
+       offset = vma->vm_pgoff << PAGE_SHIFT;
+       vaddr = (unsigned long)xnheap_base_memory(heap);
 
 #ifdef CONFIG_MMU
-       vaddr = (unsigned long)heap->archdep.heapbase + offset;
+       /*
+        * offset is actually an offset from the start of the heap
+        * memory.
+        */
+       if (offset + size > xnheap_extentsize(heap))
+               goto deref_out;
 
-       err = -EAGAIN;
+       vaddr += offset;
+       ret = -EAGAIN;
        if ((heap->archdep.kmflags & ~XNHEAP_GFP_NONCACHED) == 0) {
                unsigned long maddr = vma->vm_start;
 
@@ -1201,7 +1204,18 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
 
        xnarch_fault_range(vma);
 #else /* !CONFIG_MMU */
-       (void)vaddr;
+       /*
+        * Despite the kernel sees a single backing device with direct
+        * mapping capabilities (/dev/rtheap), we do map different
+        * heaps through it, so we want a brand new mapping region for
+        * each of them. To this end, we must request mappings on
+        * non-overlapping areas. To make sure of this in the nommu
+        * case, we request mappings from offsets representing the
+        * start RAM address of the heap memory.
+        */
+       if (offset + size > vaddr + xnheap_extentsize(heap))
+               goto deref_out;
+
        if ((heap->archdep.kmflags & ~XNHEAP_GFP_NONCACHED) != 0 ||
            heap->archdep.kmflags == XNHEAP_GFP_NONCACHED)
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -1211,7 +1225,8 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
 
 deref_out:
        xnheap_vmclose(vma);
-       return err;
+
+       return ret;
 }
 
 #ifndef CONFIG_MMU
@@ -1221,7 +1236,7 @@ static unsigned long xnheap_get_unmapped_area(struct file 
*file,
                                              unsigned long pgoff,
                                              unsigned long flags)
 {
-       unsigned long uaddr, offset;
+       unsigned long area, offset;
        struct xnheap *heap;
        int ret;
 
@@ -1232,15 +1247,15 @@ static unsigned long xnheap_get_unmapped_area(struct 
file *file,
        if (heap == NULL)
                goto fail;
 
+       area = (unsigned long)xnheap_base_memory(heap);
        offset = pgoff << PAGE_SHIFT;
-       if (offset + len > xnheap_extentsize(heap))
+       if (offset < area ||
+           offset + len > area + xnheap_extentsize(heap))
                goto fail;
 
-       uaddr = (unsigned long)heap->archdep.heapbase + offset;
-
        spin_unlock(&kheapq_lock);
 
-       return uaddr;
+       return offset;
 fail:
        spin_unlock(&kheapq_lock);
 
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index cc313cf..84f6e5c 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -1892,15 +1892,18 @@ static int xnshadow_sys_trace(struct pt_regs *regs)
 
 static int xnshadow_sys_sem_heap(struct pt_regs *regs)
 {
-       struct xnheap_desc hinfo, __user *us_hinfo;
+       struct xnheap_desc hd, __user *u_hd;
+       struct xnheap *heap;
        unsigned global;
 
        global = __xn_reg_arg2(regs);
-       us_hinfo = (struct xnheap_desc __user *) __xn_reg_arg1(regs);
-       hinfo.handle = (unsigned long)&xnsys_ppd_get(global)->sem_heap;
-       hinfo.size = xnheap_extentsize(&xnsys_ppd_get(global)->sem_heap);
+       u_hd = (struct xnheap_desc __user *)__xn_reg_arg1(regs);
+       heap = &xnsys_ppd_get(global)->sem_heap;
+       hd.handle = (unsigned long)heap;
+       hd.size = xnheap_extentsize(heap);
+       xnheap_area_set(&hd, xnheap_base_memory(heap));
 
-       return __xn_safe_copy_to_user(us_hinfo, &hinfo, sizeof(*us_hinfo));
+       return __xn_safe_copy_to_user(u_hd, &hd, sizeof(*u_hd));
 }
 
 static int xnshadow_sys_current(struct pt_regs *regs)
diff --git a/ksrc/skins/native/syscall.c b/ksrc/skins/native/syscall.c
index 320eaaf..56407b8 100644
--- a/ksrc/skins/native/syscall.c
+++ b/ksrc/skins/native/syscall.c
@@ -2060,7 +2060,7 @@ static int __rt_queue_create(struct pt_regs *regs)
        ph.opaque = q->handle;
        ph.opaque2 = &q->bufpool;
        ph.mapsize = xnheap_extentsize(&q->bufpool);
-
+       xnheap_area_set(&ph, xnheap_base_memory(&q->bufpool));
        if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &ph, 
sizeof(ph)))
                return -EFAULT;
 
@@ -2098,7 +2098,7 @@ static int __rt_queue_bind(struct pt_regs *regs)
 
        ph.opaque2 = &q->bufpool;
        ph.mapsize = xnheap_extentsize(&q->bufpool);
-
+       xnheap_area_set(&ph, xnheap_base_memory(&q->bufpool));
        xnlock_put_irqrestore(&nklock, s);
 
        if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &ph, 
sizeof(ph)))
@@ -2584,7 +2584,7 @@ static int __rt_heap_create(struct pt_regs *regs)
        ph.opaque = heap->handle;
        ph.opaque2 = &heap->heap_base;
        ph.mapsize = xnheap_extentsize(&heap->heap_base);
-
+       xnheap_area_set(&ph, xnheap_base_memory(&heap->heap_base));
        if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &ph, 
sizeof(ph)))
                return -EFAULT;
 
@@ -2622,6 +2622,7 @@ static int __rt_heap_bind(struct pt_regs *regs)
 
        ph.opaque2 = &heap->heap_base;
        ph.mapsize = xnheap_extentsize(&heap->heap_base);
+       xnheap_area_set(&ph, xnheap_base_memory(&heap->heap_base));
 
        xnlock_put_irqrestore(&nklock, s);
 
diff --git a/ksrc/skins/psos+/syscall.c b/ksrc/skins/psos+/syscall.c
index 669e52b..ef20b48 100644
--- a/ksrc/skins/psos+/syscall.c
+++ b/ksrc/skins/psos+/syscall.c
@@ -1139,6 +1139,7 @@ static int __rn_create(struct pt_regs *regs)
                u_long allocsz;
                void *rncb;
                u_long mapsize;
+               xnheap_area_decl();
        } rninfo;
        psosrn_t *rn;
        u_long err;
@@ -1165,6 +1166,7 @@ static int __rn_create(struct pt_regs *regs)
                rninfo.rnid = rn->handle;
                rninfo.rncb = &rn->heapbase;
                rninfo.mapsize = xnheap_extentsize(&rn->heapbase);
+               xnheap_area_set(&rninfo, xnheap_base_memory(&rn->heapbase));
                if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg3(regs), 
&rninfo,
                                           sizeof(rninfo)))
                        err = -EFAULT;
diff --git a/ksrc/skins/vrtx/syscall.c b/ksrc/skins/vrtx/syscall.c
index 87849a7..ab3a8e1 100644
--- a/ksrc/skins/vrtx/syscall.c
+++ b/ksrc/skins/vrtx/syscall.c
@@ -867,7 +867,7 @@ static int __sc_hcreate(struct pt_regs *regs)
                hdesc.hid = hid;
                hdesc.hcb = &heap->sysheap;
                hdesc.hsize = xnheap_extentsize(&heap->sysheap);
-
+               xnheap_area_set(&hdesc, xnheap_base_memory(&heap->sysheap));
                xnlock_put_irqrestore(&nklock, s);
 
                if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg3(regs),
@@ -1076,7 +1076,7 @@ static int __sc_pcreate(struct pt_regs *regs)
                pdesc.pid = pid;
                pdesc.ptcb = ptheap;
                pdesc.ptsize = xnheap_extentsize(ptheap);
-
+               xnheap_area_set(&pdesc, xnheap_base_memory(ptheap));
                xnlock_put_irqrestore(&nklock, s);
 
                return __xn_safe_copy_to_user((void __user 
*)__xn_reg_arg4(regs),
diff --git a/src/skins/common/sem_heap.c b/src/skins/common/sem_heap.c
index acb655b..0a66484 100644
--- a/src/skins/common/sem_heap.c
+++ b/src/skins/common/sem_heap.c
@@ -20,8 +20,9 @@ unsigned long xeno_sem_heap[2] = { 0, 0 };
 
 struct xnvdso *nkvdso;
 
-void *xeno_map_heap(unsigned long handle, unsigned int size)
+void *xeno_map_heap(struct xnheap_desc *hd)
 {
+       unsigned long area;
        int fd, ret;
        void *addr;
 
@@ -31,14 +32,20 @@ void *xeno_map_heap(unsigned long handle, unsigned int size)
                return MAP_FAILED;
        }
 
-       ret = ioctl(fd, 0, handle);
+       ret = ioctl(fd, 0, hd->handle);
        if (ret) {
                perror("Xenomai: ioctl");
                return MAP_FAILED;
        }
 
-       addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
-                   MAP_SHARED, fd, 0L);
+#ifdef CONFIG_MMU
+       /* XXX: 2.5.x ABI preserved for MMU-enabled only. */
+       area = 0;
+#else
+       area = hd->area;
+#endif
+       addr = mmap(NULL, hd->size, PROT_READ|PROT_WRITE,
+                   MAP_SHARED, fd, area);
 
        close(fd);
 
@@ -47,40 +54,40 @@ void *xeno_map_heap(unsigned long handle, unsigned int size)
 
 static void *map_sem_heap(unsigned int shared)
 {
-       struct xnheap_desc hinfo;
+       struct xnheap_desc hdesc;
        int ret;
 
-       ret = XENOMAI_SYSCALL2(__xn_sys_sem_heap, &hinfo, shared);
+       ret = XENOMAI_SYSCALL2(__xn_sys_sem_heap, &hdesc, shared);
        if (ret < 0) {
                errno = -ret;
                perror("Xenomai: sys_sem_heap");
                return MAP_FAILED;
        }
 
-       return xeno_map_heap(hinfo.handle, hinfo.size);
+       return xeno_map_heap(&hdesc);
 }
 
-static void unmap_sem_heap(unsigned long heap_addr, unsigned int shared)
+static void unmap_sem_heap(unsigned long addr, unsigned int shared)
 {
-       struct xnheap_desc hinfo;
+       struct xnheap_desc hdesc;
        int ret;
 
-       ret = XENOMAI_SYSCALL2(__xn_sys_sem_heap, &hinfo, shared);
+       ret = XENOMAI_SYSCALL2(__xn_sys_sem_heap, &hdesc, shared);
        if (ret < 0) {
                errno = -ret;
                perror("Xenomai: unmap sem_heap");
                return;
        }
 
-       munmap((void *)heap_addr, hinfo.size);
+       munmap((void *)addr, hdesc.size);
 }
 
 static void remap_on_fork(void)
 {
        unmap_sem_heap(xeno_sem_heap[0], 0);
 
-       xeno_sem_heap[0] = (unsigned long) map_sem_heap(0);
-       if (xeno_sem_heap[0] == (unsigned long) MAP_FAILED) {
+       xeno_sem_heap[0] = (unsigned long)map_sem_heap(0);
+       if (xeno_sem_heap[0] == (unsigned long)MAP_FAILED) {
                perror("Xenomai: mmap local sem heap");
                exit(EXIT_FAILURE);
        }
@@ -105,15 +112,15 @@ static void xeno_init_vdso(void)
 
 static void xeno_init_sem_heaps_inner(void)
 {
-       xeno_sem_heap[0] = (unsigned long) map_sem_heap(0);
-       if (xeno_sem_heap[0] == (unsigned long) MAP_FAILED) {
+       xeno_sem_heap[0] = (unsigned long)map_sem_heap(0);
+       if (xeno_sem_heap[0] == (unsigned long)MAP_FAILED) {
                perror("Xenomai: mmap local sem heap");
                exit(EXIT_FAILURE);
        }
        pthread_atfork(NULL, NULL, remap_on_fork);
 
-       xeno_sem_heap[1] = (unsigned long) map_sem_heap(1);
-       if (xeno_sem_heap[1] == (unsigned long) MAP_FAILED) {
+       xeno_sem_heap[1] = (unsigned long)map_sem_heap(1);
+       if (xeno_sem_heap[1] == (unsigned long)MAP_FAILED) {
                perror("Xenomai: mmap global sem heap");
                exit(EXIT_FAILURE);
        }
diff --git a/src/skins/native/heap.c b/src/skins/native/heap.c
index 2ed929d..29b1329 100644
--- a/src/skins/native/heap.c
+++ b/src/skins/native/heap.c
@@ -28,11 +28,16 @@
 
 extern int __native_muxid;
 
-void *xeno_map_heap(unsigned long handle, unsigned int size);
+void *xeno_map_heap(struct xnheap_desc *hd);
 
 static int __map_heap_memory(RT_HEAP *heap, RT_HEAP_PLACEHOLDER *php)
 {
-       php->mapbase = xeno_map_heap((unsigned long)php->opaque2, php->mapsize);
+       struct xnheap_desc hd;
+
+       hd.handle = (unsigned long)php->opaque2;
+       hd.size = php->mapsize;
+       xnheap_area_set(&hd, php->area);
+       php->mapbase = xeno_map_heap(&hd);
        if (php->mapbase == MAP_FAILED)
                return -errno;
 
diff --git a/src/skins/native/queue.c b/src/skins/native/queue.c
index abf6ab4..d104bd0 100644
--- a/src/skins/native/queue.c
+++ b/src/skins/native/queue.c
@@ -28,11 +28,16 @@
 
 extern int __native_muxid;
 
-void *xeno_map_heap(unsigned long handle, unsigned int size);
+void *xeno_map_heap(struct xnheap_desc *hd);
 
 static int __map_queue_memory(RT_QUEUE *q, RT_QUEUE_PLACEHOLDER *php)
 {
-  php->mapbase = xeno_map_heap((unsigned long)php->opaque2, php->mapsize);
+       struct xnheap_desc hd;
+
+       hd.handle = (unsigned long)php->opaque2;
+       hd.size = php->mapsize;
+       xnheap_area_set(&hd, php->area);
+       php->mapbase = xeno_map_heap(&hd);
        if (php->mapbase == MAP_FAILED)
                return -errno;
 
@@ -59,6 +64,7 @@ int rt_queue_create(RT_QUEUE *q,
                /* If the mapping fails, make sure we don't leave a dandling
                   queue in kernel space -- remove it. */
                XENOMAI_SKINCALL1(__native_muxid, __native_queue_delete, &ph);
+
        return err;
 }
 
diff --git a/src/skins/psos+/rn.c b/src/skins/psos+/rn.c
index 35736ad..f2de162 100644
--- a/src/skins/psos+/rn.c
+++ b/src/skins/psos+/rn.c
@@ -32,15 +32,20 @@ struct rninfo {
        u_long allocsz;
        void *rncb;
        u_long mapsize;
+       xnheap_area_decl();
 };
 
-void *xeno_map_heap(unsigned long handle, unsigned int size);
+void *xeno_map_heap(struct xnheap_desc *hd);
 
 static int __map_heap_memory(const struct rninfo *rnip)
 {
+       struct xnheap_desc hd;
        caddr_t mapbase;
 
-       mapbase = xeno_map_heap((unsigned long)rnip->rncb, rnip->mapsize);
+       hd.handle = (unsigned long)rnip->rncb;
+       hd.size = rnip->mapsize;
+       xnheap_area_set(&hd, rnip->area);
+       mapbase = xeno_map_heap(&hd);
        if (mapbase == MAP_FAILED)
                return -errno;
 
diff --git a/src/skins/rtai/shm.c b/src/skins/rtai/shm.c
index 407ace4..0982e11 100644
--- a/src/skins/rtai/shm.c
+++ b/src/skins/rtai/shm.c
@@ -28,13 +28,17 @@
 
 extern int __rtai_muxid;
 
-void *xeno_map_heap(unsigned long handle, unsigned int size);
+void *xeno_map_heap(struct xnheap_desc *hd);
 
 static void *__map_shm_heap_memory(unsigned long opaque, int mapsize)
 {
+       struct xnheap_desc hd;
        void *mapbase;
 
-       mapbase = xeno_map_heap(opaque, mapsize);
+       hd.handle = opaque;
+       hd.size = mapsize;
+       xnheap_area_set(&hd, 0);
+       mapbase = xeno_map_heap(&hd);
        if (mapbase == MAP_FAILED)
                return NULL;
 
diff --git a/src/skins/vrtx/heap.c b/src/skins/vrtx/heap.c
index ceaa45a..53f387b 100644
--- a/src/skins/vrtx/heap.c
+++ b/src/skins/vrtx/heap.c
@@ -27,13 +27,17 @@
 
 extern int __vrtx_muxid;
 
-void *xeno_map_heap(unsigned long handle, unsigned int size);
+void *xeno_map_heap(struct xnheap_desc *hd);
 
 static int __map_heap_memory(const vrtx_hdesc_t *hdesc)
 {
+       struct xnheap_desc hd;
        caddr_t mapbase;
 
-       mapbase = xeno_map_heap((unsigned long)hdesc->hcb, hdesc->hsize);
+       hd.handle = (unsigned long)hdesc->hcb;
+       hd.size = hdesc->hsize;
+       xnheap_area_set(&hd, hdesc->area);
+       mapbase = xeno_map_heap(&hd);
        if (mapbase == MAP_FAILED)
                return -errno;
 
diff --git a/src/skins/vrtx/pt.c b/src/skins/vrtx/pt.c
index 1e729a9..835299c 100644
--- a/src/skins/vrtx/pt.c
+++ b/src/skins/vrtx/pt.c
@@ -27,13 +27,17 @@
 
 extern int __vrtx_muxid;
 
-void *xeno_map_heap(unsigned long handle, unsigned int size);
+void *xeno_map_heap(struct xnheap_desc *hd);
 
 static int __map_pt_memory(const vrtx_pdesc_t *pdesc)
 {
+       struct xnheap_desc hd;
        caddr_t mapbase;
 
-       mapbase = xeno_map_heap((unsigned long)pdesc->ptcb, pdesc->ptsize);
+       hd.handle = (unsigned long)pdesc->ptcb;
+       hd.size = pdesc->ptsize;
+       xnheap_area_set(&hd, pdesc->area);
+       mapbase = xeno_map_heap(&hd);
        if (mapbase == MAP_FAILED)
                return -errno;
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to