Module: xenomai-forge
Branch: master
Commit: 113ed21e7ab1b5959c647ca7cb5c0f9df833af6a
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=113ed21e7ab1b5959c647ca7cb5c0f9df833af6a

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu Nov 17 10:48:22 2011 +0100

copperplate/heapobj-shared: sanitize heap creation, fix extension

We still do not allow the kernel to remap a memory mapping to a
different base address, so the heap extension feature is likely to be
unreliable because of random failure to remap at the same place. So
either we allow MREMAP_MAYMOVE, which would not be a trivial task in
terms of multi-thread/multi-process synchronization, or we just get
rid of the feature.

This needs more thoughts.

---

 lib/copperplate/heapobj-pshared.c |  243 +++++++++++++++++++------------------
 1 files changed, 126 insertions(+), 117 deletions(-)

diff --git a/lib/copperplate/heapobj-pshared.c 
b/lib/copperplate/heapobj-pshared.c
index 9387807..992aee6 100644
--- a/lib/copperplate/heapobj-pshared.c
+++ b/lib/copperplate/heapobj-pshared.c
@@ -553,101 +553,6 @@ out:
        return ret;
 }
 
-static int create_heap(struct heapobj *hobj, const char *session,
-                      const char *name, size_t size, int flags)
-{
-       struct heap *heap;
-       struct stat sbuf;
-       memoff_t len;
-       int ret, fd;
-
-       /*
-        * A storage page should be obviously larger than an extent
-        * header, but we still make sure of this in debug mode, so
-        * that we can rely on __align_to() for rounding to the
-        * minimum size in production builds, without any further
-        * test (e.g. like size >= sizeof(struct heap_extent)).
-        */
-       assert(HOBJ_PAGE_SIZE > sizeof(struct heap_extent));
-
-       size = __align_to(size, HOBJ_PAGE_SIZE);
-       if (size > HOBJ_MAXEXTSZ)
-               return __bt(-EINVAL);
-
-       if (size - sizeof(struct heap_extent) < HOBJ_PAGE_SIZE * 2)
-               size += HOBJ_PAGE_SIZE * 2;
-
-       len = size + sizeof(*heap);
-
-       if (flags & HOBJ_SHARABLE) {
-               heap = alloc_block(&main_heap, len);
-               if (heap == NULL)
-                       return __bt(-ENOMEM);
-               fd = -1;
-               heap->maplen = len;
-               init_heap(heap, (caddr_t)heap + sizeof(*heap), size);
-               goto finish;
-       }
-
-       if (name)
-               snprintf(hobj->name, sizeof(hobj->name), "%s:%s", session, 
name);
-       else
-               snprintf(hobj->name, sizeof(hobj->name), "%s:%p", session, 
hobj);
-
-       snprintf(hobj->fsname, sizeof(hobj->fsname), "/xeno:%s", hobj->name);
-
-       if (flags & HOBJ_FORCE)
-               __STD(shm_unlink(hobj->fsname));
-
-       fd = __STD(shm_open(hobj->fsname, O_RDWR|O_CREAT, 0600));
-       if (fd < 0)
-               return __bt(-errno);
-
-       ret = flock(fd, LOCK_EX);
-       if (ret) {
-               __STD(close(fd));
-               return __bt(-errno);
-       }
-
-       ret = fstat(fd, &sbuf);
-       if (ret) {
-               __STD(close(fd));
-               return __bt(-errno);
-       }
-
-       if (sbuf.st_size == 0) {
-               ret = __STD(ftruncate(fd, len));
-               if (ret) {
-                       __STD(close(fd));
-                       __STD(shm_unlink(hobj->fsname));
-                       return __bt(-errno);
-               }
-       } else if (sbuf.st_size != len) {
-               __STD(close(fd));
-               return __bt(-EEXIST);
-       }
-
-       heap = __STD(mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0));
-       if (heap == MAP_FAILED) {
-               __STD(close(fd));
-               return __bt(-errno);
-       }
-
-       if (sbuf.st_size == 0 || (heap->cpid && kill(heap->cpid, 0))) {
-               heap->maplen = len;
-               init_heap(heap, (caddr_t)heap + sizeof(*heap), size);
-       }
-
-       flock(fd, LOCK_UN);
-finish:
-       hobj->pool = heap;
-       hobj->size = size;
-       hobj->fd = fd;
-       hobj->flags = flags;
-
-       return 0;
-}
-
 static void pshared_destroy(struct heapobj *hobj)
 {
        struct heap *heap = hobj->pool;
@@ -655,7 +560,7 @@ static void pshared_destroy(struct heapobj *hobj)
 
        __RT(pthread_mutex_destroy(&heap->lock));
 
-       if (hobj->flags & HOBJ_SHARABLE) {
+       if (hobj->flags & HOBJ_SHAREABLE) {
                free_block(&main_heap, heap);
                return;
        }
@@ -676,9 +581,9 @@ static int pshared_extend(struct heapobj *hobj, size_t 
size, void *mem)
        int ret, state;
        caddr_t p;
 
-       if (hobj == &main_pool) /* Cannot extend the main pool. */
+       if (hobj == &main_pool) /* Can't extend the main pool. */
                return __bt(-EINVAL);
-
+               
        if (size <= HOBJ_PAGE_SIZE * 2)
                return __bt(-EINVAL);
 
@@ -689,17 +594,19 @@ static int pshared_extend(struct heapobj *hobj, size_t 
size, void *mem)
                ret = __bt(-errno);
                goto out;
        }
+
        /*
         * We do not allow the kernel to move the mapping address, so
         * it is safe referring to the heap contents while extending
         * it.
         */
-       p = mremap(heap, hobj->size + sizeof(*heap), newsize, 0);
+       p = mremap(heap, heap->maplen, newsize, 0);
        if (p == MAP_FAILED) {
                ret = __bt(-errno);
                goto out;
        }
 
+       heap->maplen = newsize;
        extent = (struct heap_extent *)(p + hobj->size + sizeof(*heap));
        init_extent(heap, extent);
        __list_append(heap, &extent->link, &heap->extents);
@@ -707,7 +614,7 @@ static int pshared_extend(struct heapobj *hobj, size_t 
size, void *mem)
 out:
        write_unlock_safe(&heap->lock, state);
 
-       return __bt(ret);
+       return ret;
 }
 
 static void *pshared_alloc(struct heapobj *hobj, size_t size)
@@ -731,6 +638,109 @@ static size_t pshared_inquire(struct heapobj *hobj)
        return heap->ubytes;
 }
 
+static int create_heap(struct heapobj *hobj, const char *session,
+                      const char *name, size_t size, int flags)
+{
+       struct heap *heap;
+       struct stat sbuf;
+       memoff_t len;
+       int ret, fd;
+
+       /*
+        * A storage page should be obviously larger than an extent
+        * header, but we still make sure of this in debug mode, so
+        * that we can rely on __align_to() for rounding to the
+        * minimum size in production builds, without any further
+        * test (e.g. like size >= sizeof(struct heap_extent)).
+        */
+       assert(HOBJ_PAGE_SIZE > sizeof(struct heap_extent));
+
+       size += internal_overhead(size);
+       size = __align_to(size, HOBJ_PAGE_SIZE);
+       if (size > HOBJ_MAXEXTSZ)
+               return __bt(-EINVAL);
+
+       if (size - sizeof(struct heap_extent) < HOBJ_PAGE_SIZE * 2)
+               size += HOBJ_PAGE_SIZE * 2;
+
+       len = size + sizeof(*heap);
+
+       if (flags & HOBJ_SHAREABLE) {
+               heap = alloc_block(&main_heap, len);
+               if (heap == NULL)
+                       return __bt(-ENOMEM);
+               fd = -1;
+               heap->maplen = len;
+               init_heap(heap, (caddr_t)heap + sizeof(*heap), size);
+               goto finish;
+       }
+
+       if (name)
+               snprintf(hobj->name, sizeof(hobj->name), "%s:%s", session, 
name);
+       else
+               snprintf(hobj->name, sizeof(hobj->name), "%s:%p", session, 
hobj);
+
+       snprintf(hobj->fsname, sizeof(hobj->fsname), "/xeno:%s", hobj->name);
+
+       if (flags & HOBJ_FORCE)
+               __STD(shm_unlink(hobj->fsname));
+
+       fd = __STD(shm_open(hobj->fsname, O_RDWR|O_CREAT, 0600));
+       if (fd < 0)
+               return __bt(-errno);
+
+       ret = flock(fd, LOCK_EX);
+       if (ret)
+               goto errno_fail;
+
+       ret = fstat(fd, &sbuf);
+       if (ret)
+               goto errno_fail;
+
+       if (sbuf.st_size > 0) {
+               heap = __STD(mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, 
fd, 0));
+               if (heap == MAP_FAILED)
+                       goto errno_fail;
+               if (heap->cpid && kill(heap->cpid, 0) == 0) {
+                       if (heap->maplen == len)
+                               goto done;
+                       __STD(close(fd));
+                       return __bt(-EEXIST);
+               }
+               __STD(munmap(heap, len));
+       }
+
+       ret = __STD(ftruncate(fd, len));
+       if (ret)
+               goto unlink_fail;
+
+       heap = __STD(mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0));
+       if (heap == MAP_FAILED)
+               goto unlink_fail;
+
+       heap->maplen = len;
+       init_heap(heap, (caddr_t)heap + sizeof(*heap), size);
+done:
+       flock(fd, LOCK_UN);
+finish:
+       hobj->pool = heap;
+       hobj->size = size;
+       hobj->fd = fd;
+       hobj->flags = flags;
+
+       return 0;
+unlink_fail:
+       ret = __bt(-errno);
+       __STD(shm_unlink(hobj->fsname));
+       goto close_fail;
+errno_fail:
+       ret = __bt(-errno);
+close_fail:
+       __STD(close(fd));
+
+       return ret;
+}
+
 static struct heapobj_ops pshared_ops = {
        .destroy = pshared_destroy,
        .extend = pshared_extend,
@@ -765,7 +775,7 @@ int heapobj_init_shareable(struct heapobj *hobj, const char 
*name,
        hobj->ops = &pshared_ops;
 
        return __bt(create_heap(hobj, __this_node.session_label, name,
-                               size, flags | HOBJ_SHARABLE));
+                               size, flags | HOBJ_SHAREABLE));
 }
 
 int heapobj_init_array(struct heapobj *hobj, const char *name,
@@ -808,23 +818,22 @@ int heapobj_pkg_init_shared(void)
        int ret;
 
        ret = heapobj_init(&main_pool, "main", __this_node.mem_pool, NULL);
-       if (ret) {
-               if (ret == -EEXIST) {
-                       if (__this_node.reset_session)
-                               /* Init failed despite override. */
-                               warning("active session %s is conflicting\n",
-                                       __this_node.session_label);
-                       else
-                               warning("non-matching session %s already 
exists,\n"
-                                       "pass --reset to override.",
-                                       __this_node.session_label);
-               }
-
-               return __bt(ret);
+       if (ret == 0) {
+               __pshared_heap = main_pool.pool;
+               __pshared_catalog = &main_heap.catalog;
+               return 0;
        }
 
-       __pshared_heap = main_pool.pool;
-       __pshared_catalog = &main_heap.catalog;
+       if (ret == -EEXIST) {
+               if (__this_node.reset_session)
+                       /* Init failed despite override. */
+                       warning("active session %s is conflicting\n",
+                               __this_node.session_label);
+               else
+                       warning("non-matching session %s already exists,\n"
+                               "pass --reset to override.",
+                               __this_node.session_label);
+       }
 
-       return 0;
+       return __bt(ret);
 }


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to