Module: xenomai-abe
Branch: analogy
Commit: 8d9040be1c0bb723313936f990515dcd82ac666b
URL:    
http://git.xenomai.org/?p=xenomai-abe.git;a=commit;h=8d9040be1c0bb723313936f990515dcd82ac666b

Author: Jan Kiszka <jan.kis...@siemens.com>
Date:   Sat Oct 24 19:09:43 2009 +0200

nucleus: Avoid returning errors from xnheap_destroy_mapped

Allowing xnheap_delete_mapped to return an error and then attempting
to recover from it does not work out very well: Corner cases are racy,
intransparent to the user, and proper error handling imposes a lot of
complexity on the caller - if it actually bothers to check the return
value...

Fortunately, there is no reason for this function to fail: If the heap
is still mapped, just install the provide cleanup handler and switch
to deferred removal. If the unmapping fails, we either raced with some
other caller of unmap or user space provided a bogus address, or
something else is wrong. In any case, leaving the cleanup callback
behind is the best we can do anyway.

Removing the return value immediately allows to simplify the callers,
namemly rt_queue_delete and rt_heap_delete.

Note: This is still not 100% waterproof. If we issue
xnheap_destroy_mapped from module cleanup passing a release handler
that belongs to the module text, deferred release will cause a crash.
But this corner case is no new regression, so let's keep the head in
the sand.

Signed-off-by: Jan Kiszka <jan.kis...@siemens.com>

---

 include/nucleus/heap.h    |    6 ++--
 ksrc/nucleus/heap.c       |   47 +++++++++++++++++++++++++++-----------------
 ksrc/skins/native/heap.c  |   27 ++++++++-----------------
 ksrc/skins/native/queue.c |   27 ++++++++-----------------
 ksrc/skins/rtai/shm.c     |    6 +----
 5 files changed, 51 insertions(+), 62 deletions(-)

diff --git a/include/nucleus/heap.h b/include/nucleus/heap.h
index ca691bf..44db738 100644
--- a/include/nucleus/heap.h
+++ b/include/nucleus/heap.h
@@ -204,9 +204,9 @@ int xnheap_init_mapped(xnheap_t *heap,
                       u_long heapsize,
                       int memflags);
 
-int xnheap_destroy_mapped(xnheap_t *heap,
-                         void (*release)(struct xnheap *heap),
-                         void __user *mapaddr);
+void xnheap_destroy_mapped(xnheap_t *heap,
+                          void (*release)(struct xnheap *heap),
+                          void __user *mapaddr);
 
 #define xnheap_mapped_offset(heap,ptr) \
 (((caddr_t)(ptr)) - ((caddr_t)(heap)->archdep.heapbase))
diff --git a/ksrc/nucleus/heap.c b/ksrc/nucleus/heap.c
index 4958daa..15db9cd 100644
--- a/ksrc/nucleus/heap.c
+++ b/ksrc/nucleus/heap.c
@@ -990,6 +990,7 @@ static inline void *__alloc_and_reserve_heap(size_t size, 
int kmflags)
                        SetPageReserved(virt_to_page(vaddr));
        }
 
+       printk("%s: ptr %p\n", __FUNCTION__, ptr);
        return ptr;
 }
 
@@ -1000,6 +1001,7 @@ static void __unreserve_and_free_heap(void *ptr, size_t 
size, int kmflags)
        /* Size must be page-aligned. */
 
        vabase = (unsigned long)ptr;
+       printk("%s: ptr %p\n", __FUNCTION__, ptr);
 
        if ((kmflags & ~XNHEAP_GFP_NONCACHED) == 0) {
                for (vaddr = vabase; vaddr < vabase + size; vaddr += PAGE_SIZE)
@@ -1173,42 +1175,51 @@ int xnheap_init_mapped(xnheap_t *heap, u_long heapsize, 
int memflags)
        return 0;
 }
 
-int xnheap_destroy_mapped(xnheap_t *heap, void (*release)(struct xnheap *heap),
-                         void __user *mapaddr)
+void xnheap_destroy_mapped(xnheap_t *heap,
+                          void (*release)(struct xnheap *heap),
+                          void __user *mapaddr)
 {
-       int ret = 0, ccheck;
        unsigned long len;
 
-       ccheck = mapaddr ? 1 : 0;
+       /*
+        * Trying to unmap user memory without providing a release
+        * handler for deferred cleanup is a bug.
+        */
+       XENO_ASSERT(NUCLEUS, mapaddr == NULL || release, /* nop */);
 
        spin_lock(&kheapq_lock);
 
-       if (heap->archdep.numaps > ccheck) {
-               heap->archdep.release = release;
-               spin_unlock(&kheapq_lock);
-               return -EBUSY;
-       }
-
        removeq(&kheapq, &heap->link); /* Prevent further mapping. */
+
+       heap->archdep.release = release;
+
+       if (heap->archdep.numaps == 0)
+               mapaddr = NULL; /* nothing left to unmap */
+       else
+               release = NULL; /* will be called by Linux on unmap */
+
        spin_unlock(&kheapq_lock);
 
        len = xnheap_extentsize(heap);
 
        if (mapaddr) {
                down_write(&current->mm->mmap_sem);
-               heap->archdep.release = NULL;
-               ret = do_munmap(current->mm, (unsigned long)mapaddr, len);
+               do_munmap(current->mm, (unsigned long)mapaddr, len);
                up_write(&current->mm->mmap_sem);
        }
 
-       if (ret == 0) {
+       if (heap->archdep.numaps > 0) {
+               /* The release handler is supposed to clean up the rest. */
+               XENO_ASSERT(NUCLEUS, heap->archdep.release, /* nop */);
+               return;
+       }
+
+       if (mapaddr == NULL) {
                __unreserve_and_free_heap(heap->archdep.heapbase, len,
                                          heap->archdep.kmflags);
                if (release)
                        release(heap);
        }
-
-       return ret;
 }
 
 static struct file_operations xnheap_fops = {
@@ -1260,11 +1271,11 @@ int xnheap_init_mapped(xnheap_t *heap, u_long heapsize, 
int memflags)
        return -ENOMEM;
 }
 
-int xnheap_destroy_mapped(xnheap_t *heap, void (*release)(struct xnheap *heap),
-                         void __user *mapaddr)
+void xnheap_destroy_mapped(xnheap_t *heap,
+                          void (*release)(struct xnheap *heap),
+                          void __user *mapaddr)
 {
        xnheap_destroy(heap, &xnheap_free_extent, NULL);
-       return 0;
 }
 #endif /* !CONFIG_XENO_OPT_PERVASIVE */
 
diff --git a/ksrc/skins/native/heap.c b/ksrc/skins/native/heap.c
index 1abe0ff..eacb3f0 100644
--- a/ksrc/skins/native/heap.c
+++ b/ksrc/skins/native/heap.c
@@ -362,9 +362,6 @@ static void __heap_post_release(struct xnheap *h)
  *
  * @return 0 is returned upon success. Otherwise:
  *
- * - -EBUSY is returned if @a heap is in use by another process and the
- * descriptor is not destroyed.
- *
  * - -EINVAL is returned if @a heap is not a heap descriptor.
  *
  * - -EIDRM is returned if @a heap is a deleted heap descriptor.
@@ -384,7 +381,7 @@ static void __heap_post_release(struct xnheap *h)
 
 int rt_heap_delete_inner(RT_HEAP *heap, void __user *mapaddr)
 {
-       int err = 0;
+       int err;
        spl_t s;
 
        if (!xnpod_root_p())
@@ -410,29 +407,23 @@ int rt_heap_delete_inner(RT_HEAP *heap, void __user 
*mapaddr)
 
        /*
         * The heap descriptor has been marked as deleted before we
-        * released the superlock thus preventing any sucessful
-        * subsequent calls of rt_heap_delete(), so now we can
-        * actually destroy it safely.
+        * released the superlock thus preventing any subsequent call
+        * to rt_heap_delete() to succeed, so now we can actually
+        * destroy it safely.
         */
 
 #ifdef CONFIG_XENO_OPT_PERVASIVE
        if (heap->mode & H_MAPPABLE)
-               err = xnheap_destroy_mapped(&heap->heap_base,
-                                           __heap_post_release, mapaddr);
+               xnheap_destroy_mapped(&heap->heap_base,
+                                     __heap_post_release, mapaddr);
        else
 #endif /* CONFIG_XENO_OPT_PERVASIVE */
+       {
                xnheap_destroy(&heap->heap_base, &__heap_flush_private, NULL);
-
-       xnlock_get_irqsave(&nklock, s);
-
-       if (err)
-               heap->magic = XENO_HEAP_MAGIC;
-       else if (!(heap->mode & H_MAPPABLE))
                __heap_post_release(&heap->heap_base);
+       }
 
-       xnlock_put_irqrestore(&nklock, s);
-
-       return err;
+       return 0;
 }
 
 int rt_heap_delete(RT_HEAP *heap)
diff --git a/ksrc/skins/native/queue.c b/ksrc/skins/native/queue.c
index 845a1b6..211925f 100644
--- a/ksrc/skins/native/queue.c
+++ b/ksrc/skins/native/queue.c
@@ -331,9 +331,6 @@ static void __queue_post_release(struct xnheap *heap)
  * - -EPERM is returned if this service was called from an
  * asynchronous context.
  *
- * - -EBUSY is returned if an attempt is made to delete a shared queue
- * which is still bound to a process.
- *
  * Environments:
  *
  * This service can be called from:
@@ -346,7 +343,7 @@ static void __queue_post_release(struct xnheap *heap)
 
 int rt_queue_delete_inner(RT_QUEUE *q, void __user *mapaddr)
 {
-       int err = 0;
+       int err;
        spl_t s;
 
        if (xnpod_asynch_p())
@@ -371,29 +368,23 @@ int rt_queue_delete_inner(RT_QUEUE *q, void __user 
*mapaddr)
 
        /*
         * The queue descriptor has been marked as deleted before we
-        * released the superlock thus preventing any sucessful
-        * subsequent calls of rt_queue_delete(), so now we can
-        * actually destroy the associated heap safely.
+        * released the superlock thus preventing any subsequent call
+        * to rt_queue_delete() to succeed, so now we can actually
+        * destroy the associated heap safely.
         */
 
 #ifdef CONFIG_XENO_OPT_PERVASIVE
        if (q->mode & Q_SHARED)
-               err = xnheap_destroy_mapped(&q->bufpool,
-                                           __queue_post_release, mapaddr);
+               xnheap_destroy_mapped(&q->bufpool,
+                                     __queue_post_release, mapaddr);
        else
 #endif /* CONFIG_XENO_OPT_PERVASIVE */
+       {
                xnheap_destroy(&q->bufpool, &__queue_flush_private, NULL);
-
-       xnlock_get_irqsave(&nklock, s);
-
-       if (err)
-               q->magic = XENO_QUEUE_MAGIC;
-       else if (!(q->mode & Q_SHARED))
                __queue_post_release(&q->bufpool);
+       }
 
-       xnlock_put_irqrestore(&nklock, s);
-
-       return err;
+       return 0;
 }
 
 int rt_queue_delete(RT_QUEUE *q)
diff --git a/ksrc/skins/rtai/shm.c b/ksrc/skins/rtai/shm.c
index fddf455..4c56495 100644
--- a/ksrc/skins/rtai/shm.c
+++ b/ksrc/skins/rtai/shm.c
@@ -293,13 +293,11 @@ static int _shm_free(unsigned long name)
                                 * [YES!]
                                 */
 #ifdef CONFIG_XENO_OPT_PERVASIVE
-                               ret = xnheap_destroy_mapped(p->heap, NULL, 
NULL);
+                               xnheap_destroy_mapped(p->heap, NULL, NULL);
 #else /* !CONFIG_XENO_OPT_PERVASIVE */
                                xnheap_destroy(p->heap,
                                               &__heap_flush_private, NULL);
 #endif /* !CONFIG_XENO_OPT_PERVASIVE */
-                               if (ret)
-                                       goto unlock_and_exit;
                                xnheap_free(&kheap, p->heap);
                        }
                        removeq(&xnshm_allocq, &p->link);
@@ -311,8 +309,6 @@ static int _shm_free(unsigned long name)
                holder = nextq(&xnshm_allocq, holder);
        }
 
-      unlock_and_exit:
-
        xnlock_put_irqrestore(&nklock, s);
 
        return ret;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to