Module: xenomai-rpm
Branch: for-upstream
Commit: 11221fefff6ceddca51d70a48d3fae67e0103d3c
URL:    
http://git.xenomai.org/?p=xenomai-rpm.git;a=commit;h=11221fefff6ceddca51d70a48d3fae67e0103d3c

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sun Jun 13 21:49:01 2010 +0200

nucleus: fix heap mapping for nommu

Despite the kernel sees a single backing device with direct mapping
capabilities on nommu targets (/dev/rtheap), we do map different heaps
through it, so we want a brand new mapping region for each of
them. Therefore, we must make sure to request mappings on
non-overlapping areas.

To this end, we request mappings from offsets representing the start
RAM address of the heap memory instead of mapping from offset 0 like
previously.  Otherwise, the kernel could match the same region for
different heaps, for all mmap() requests directed to /dev/rtheap which
appear to be a subset of a previous one, i.e. [offset=0, length <=
largest mapped size to date]. Which does happen most of the time.

Basically, this also means that shared heap mapping on nommu systems
has always been badly broken on all Xenomai releases prior to this
commit. Yeepeee.

For this reason, we do break the nommu ABI to introduce this fix
(i.e. blackfin and nios2), simply because the previous implementation
did not work at all.

---

 include/asm-blackfin/features.h |    2 +-
 include/asm-nios2/features.h    |    2 +-
 include/nucleus/heap.h          |    1 +
 ksrc/nucleus/heap.c             |   53 +++++++++++++++++++++++++--------------
 ksrc/nucleus/shadow.c           |   17 +++++++++---
 src/skins/common/sem_heap.c     |   45 ++++++++++++++++++++------------
 6 files changed, 77 insertions(+), 43 deletions(-)

diff --git a/include/asm-blackfin/features.h b/include/asm-blackfin/features.h
index 9dbee9c..c365c38 100644
--- a/include/asm-blackfin/features.h
+++ b/include/asm-blackfin/features.h
@@ -22,7 +22,7 @@
 #include <asm-generic/xenomai/features.h>
 
 /* The ABI revision level we use on this arch. */
-#define XENOMAI_ABI_REV   3UL
+#define XENOMAI_ABI_REV   4UL
 
 #define XENOMAI_FEAT_DEP  __xn_feat_generic_mask
 
diff --git a/include/asm-nios2/features.h b/include/asm-nios2/features.h
index eb4589a..534c052 100644
--- a/include/asm-nios2/features.h
+++ b/include/asm-nios2/features.h
@@ -36,7 +36,7 @@ static inline void collect_arch_features(struct xnfeatinfo 
*finfo)
 #endif
 
 /* The ABI revision level we use on this arch. */
-#define XENOMAI_ABI_REV   1UL
+#define XENOMAI_ABI_REV   2UL
 
 #define XENOMAI_FEAT_DEP  __xn_feat_generic_mask
 
diff --git a/include/nucleus/heap.h b/include/nucleus/heap.h
index f4ebe11..da5732e 100644
--- a/include/nucleus/heap.h
+++ b/include/nucleus/heap.h
@@ -282,6 +282,7 @@ int xnheap_check_block(xnheap_t *heap,
 struct xnheap_desc {
        unsigned long handle;
        unsigned int size;
+       unsigned long area;
 };
 
 #endif /* !_XENO_NUCLEUS_HEAP_H */
diff --git a/ksrc/nucleus/heap.c b/ksrc/nucleus/heap.c
index afcc4f8..a152567 100644
--- a/ksrc/nucleus/heap.c
+++ b/ksrc/nucleus/heap.c
@@ -1136,8 +1136,8 @@ static int xnheap_ioctl(struct inode *inode,
 static int xnheap_mmap(struct file *file, struct vm_area_struct *vma)
 {
        unsigned long offset, size, vaddr;
-       xnheap_t *heap;
-       int err;
+       struct xnheap *heap;
+       int ret;
 
        if (vma->vm_ops != NULL || file->private_data == NULL)
                /* Caller should mmap() once for a given file instance, after
@@ -1147,9 +1147,6 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
        if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
                return -EINVAL; /* COW unsupported. */
 
-       offset = vma->vm_pgoff << PAGE_SHIFT;
-       size = vma->vm_end - vma->vm_start;
-
        spin_lock(&kheapq_lock);
 
        heap = __validate_heap_addr(file->private_data);
@@ -1163,22 +1160,28 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
        spin_unlock(&kheapq_lock);
 
        vma->vm_private_data = file->private_data;
-
-       err = -ENXIO;
-       if (offset + size > xnheap_extentsize(heap))
-               goto deref_out;
+       vma->vm_ops = &xnheap_vmops;
+       size = vma->vm_end - vma->vm_start;
+       ret = -ENXIO;
 
        if (countq(&heap->extents) > 1)
                /* Cannot map multi-extent heaps, we need the memory
                   area we map from to be contiguous. */
                goto deref_out;
 
-       vma->vm_ops = &xnheap_vmops;
+       offset = vma->vm_pgoff << PAGE_SHIFT;
+       vaddr = (unsigned long)heap->archdep.heapbase;
 
 #ifdef CONFIG_MMU
-       vaddr = (unsigned long)heap->archdep.heapbase + offset;
+       /*
+        * offset is actually an offset from the start of the heap
+        * memory.
+        */
+       if (offset + size > xnheap_extentsize(heap))
+               goto deref_out;
 
-       err = -EAGAIN;
+       vaddr += offset;
+       ret = -EAGAIN;
        if ((heap->archdep.kmflags & ~XNHEAP_GFP_NONCACHED) == 0) {
                unsigned long maddr = vma->vm_start;
 
@@ -1201,7 +1204,18 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
 
        xnarch_fault_range(vma);
 #else /* !CONFIG_MMU */
-       (void)vaddr;
+       /*
+        * Despite the kernel sees a single backing device with direct
+        * mapping capabilities (/dev/rtheap), we do map different
+        * heaps through it, so we want a brand new mapping region for
+        * each of them. To this end, we must request mappings on
+        * non-overlapping areas. To make sure of this in the nommu
+        * case, we request mappings from offsets representing the
+        * start RAM address of the heap memory.
+        */
+       if (offset + size > vaddr + xnheap_extentsize(heap))
+               goto deref_out;
+
        if ((heap->archdep.kmflags & ~XNHEAP_GFP_NONCACHED) != 0 ||
            heap->archdep.kmflags == XNHEAP_GFP_NONCACHED)
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -1211,7 +1225,8 @@ static int xnheap_mmap(struct file *file, struct 
vm_area_struct *vma)
 
 deref_out:
        xnheap_vmclose(vma);
-       return err;
+
+       return ret;
 }
 
 #ifndef CONFIG_MMU
@@ -1221,7 +1236,7 @@ static unsigned long xnheap_get_unmapped_area(struct file 
*file,
                                              unsigned long pgoff,
                                              unsigned long flags)
 {
-       unsigned long uaddr, offset;
+       unsigned long area, offset;
        struct xnheap *heap;
        int ret;
 
@@ -1232,15 +1247,15 @@ static unsigned long xnheap_get_unmapped_area(struct 
file *file,
        if (heap == NULL)
                goto fail;
 
+       area = (unsigned long)heap->archdep.heapbase;
        offset = pgoff << PAGE_SHIFT;
-       if (offset + len > xnheap_extentsize(heap))
+       if (offset < area ||
+           offset + len > area + xnheap_extentsize(heap))
                goto fail;
 
-       uaddr = (unsigned long)heap->archdep.heapbase + offset;
-
        spin_unlock(&kheapq_lock);
 
-       return uaddr;
+       return offset;
 fail:
        spin_unlock(&kheapq_lock);
 
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index cc313cf..e684844 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -1892,15 +1892,22 @@ static int xnshadow_sys_trace(struct pt_regs *regs)
 
 static int xnshadow_sys_sem_heap(struct pt_regs *regs)
 {
-       struct xnheap_desc hinfo, __user *us_hinfo;
+       struct xnheap_desc hd, __user *u_hd;
+       struct xnheap *heap;
        unsigned global;
 
        global = __xn_reg_arg2(regs);
-       us_hinfo = (struct xnheap_desc __user *) __xn_reg_arg1(regs);
-       hinfo.handle = (unsigned long)&xnsys_ppd_get(global)->sem_heap;
-       hinfo.size = xnheap_extentsize(&xnsys_ppd_get(global)->sem_heap);
+       u_hd = (struct xnheap_desc __user *)__xn_reg_arg1(regs);
+       heap = &xnsys_ppd_get(global)->sem_heap;
+       hd.handle = (unsigned long)heap;
+       hd.size = xnheap_extentsize(heap);
+#ifdef CONFIG_MMU
+       hd.area = 0L;   /* Not used yet to preserve the 2.5.x ABI. */
+#else
+       hd.area = (unsigned long)heap->archdep.heapbase;
+#endif
 
-       return __xn_safe_copy_to_user(us_hinfo, &hinfo, sizeof(*us_hinfo));
+       return __xn_safe_copy_to_user(u_hd, &hd, sizeof(*u_hd));
 }
 
 static int xnshadow_sys_current(struct pt_regs *regs)
diff --git a/src/skins/common/sem_heap.c b/src/skins/common/sem_heap.c
index acb655b..6536966 100644
--- a/src/skins/common/sem_heap.c
+++ b/src/skins/common/sem_heap.c
@@ -20,7 +20,7 @@ unsigned long xeno_sem_heap[2] = { 0, 0 };
 
 struct xnvdso *nkvdso;
 
-void *xeno_map_heap(unsigned long handle, unsigned int size)
+void *xeno_map_heap(struct xnheap_desc *hd)
 {
        int fd, ret;
        void *addr;
@@ -31,14 +31,14 @@ void *xeno_map_heap(unsigned long handle, unsigned int size)
                return MAP_FAILED;
        }
 
-       ret = ioctl(fd, 0, handle);
+       ret = ioctl(fd, 0, hd->handle);
        if (ret) {
                perror("Xenomai: ioctl");
                return MAP_FAILED;
        }
 
-       addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
-                   MAP_SHARED, fd, 0L);
+       addr = mmap(NULL, hd->size, PROT_READ|PROT_WRITE,
+                   MAP_SHARED, fd, hd->area);
 
        close(fd);
 
@@ -47,40 +47,51 @@ void *xeno_map_heap(unsigned long handle, unsigned int size)
 
 static void *map_sem_heap(unsigned int shared)
 {
-       struct xnheap_desc hinfo;
+       struct xnheap_desc hdesc;
        int ret;
 
-       ret = XENOMAI_SYSCALL2(__xn_sys_sem_heap, &hinfo, shared);
+#ifdef CONFIG_MMU
+       /*
+        * XXX: We preserve the 2.5.x ABI for MMU-enabled targets;
+        * pre-2.5.4 releases won't fill in the area information,
+        * others will just clear it. So let's iron the case.  NOMMU
+        * systems will always fill in the proper information, since
+        * we must break the ABI for them, so that heap mapping works,
+        * eventually.
+        */
+       hdesc.area = 0;
+#endif
+       ret = XENOMAI_SYSCALL2(__xn_sys_sem_heap, &hdesc, shared);
        if (ret < 0) {
                errno = -ret;
                perror("Xenomai: sys_sem_heap");
                return MAP_FAILED;
        }
 
-       return xeno_map_heap(hinfo.handle, hinfo.size);
+       return xeno_map_heap(&hdesc);
 }
 
-static void unmap_sem_heap(unsigned long heap_addr, unsigned int shared)
+static void unmap_sem_heap(unsigned long addr, unsigned int shared)
 {
-       struct xnheap_desc hinfo;
+       struct xnheap_desc hdesc;
        int ret;
 
-       ret = XENOMAI_SYSCALL2(__xn_sys_sem_heap, &hinfo, shared);
+       ret = XENOMAI_SYSCALL2(__xn_sys_sem_heap, &hdesc, shared);
        if (ret < 0) {
                errno = -ret;
                perror("Xenomai: unmap sem_heap");
                return;
        }
 
-       munmap((void *)heap_addr, hinfo.size);
+       munmap((void *)addr, hdesc.size);
 }
 
 static void remap_on_fork(void)
 {
        unmap_sem_heap(xeno_sem_heap[0], 0);
 
-       xeno_sem_heap[0] = (unsigned long) map_sem_heap(0);
-       if (xeno_sem_heap[0] == (unsigned long) MAP_FAILED) {
+       xeno_sem_heap[0] = (unsigned long)map_sem_heap(0);
+       if (xeno_sem_heap[0] == (unsigned long)MAP_FAILED) {
                perror("Xenomai: mmap local sem heap");
                exit(EXIT_FAILURE);
        }
@@ -105,15 +116,15 @@ static void xeno_init_vdso(void)
 
 static void xeno_init_sem_heaps_inner(void)
 {
-       xeno_sem_heap[0] = (unsigned long) map_sem_heap(0);
-       if (xeno_sem_heap[0] == (unsigned long) MAP_FAILED) {
+       xeno_sem_heap[0] = (unsigned long)map_sem_heap(0);
+       if (xeno_sem_heap[0] == (unsigned long)MAP_FAILED) {
                perror("Xenomai: mmap local sem heap");
                exit(EXIT_FAILURE);
        }
        pthread_atfork(NULL, NULL, remap_on_fork);
 
-       xeno_sem_heap[1] = (unsigned long) map_sem_heap(1);
-       if (xeno_sem_heap[1] == (unsigned long) MAP_FAILED) {
+       xeno_sem_heap[1] = (unsigned long)map_sem_heap(1);
+       if (xeno_sem_heap[1] == (unsigned long)MAP_FAILED) {
                perror("Xenomai: mmap global sem heap");
                exit(EXIT_FAILURE);
        }


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to