Module: xenomai-head
Branch: master
Commit: 8d9040be1c0bb723313936f990515dcd82ac666b
URL:
http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=8d9040be1c0bb723313936f990515dcd82ac666b
Author: Jan Kiszka jan.kis...@siemens.com
Date: Sat Oct 24 19:09:43 2009 +0200
nucleus: Avoid returning errors from xnheap_destroy_mapped
Allowing xnheap_delete_mapped to return an error and then attempting
to recover from it does not work out very well: Corner cases are racy,
intransparent to the user, and proper error handling imposes a lot of
complexity on the caller - if it actually bothers to check the return
value...
Fortunately, there is no reason for this function to fail: If the heap
is still mapped, just install the provide cleanup handler and switch
to deferred removal. If the unmapping fails, we either raced with some
other caller of unmap or user space provided a bogus address, or
something else is wrong. In any case, leaving the cleanup callback
behind is the best we can do anyway.
Removing the return value immediately allows to simplify the callers,
namemly rt_queue_delete and rt_heap_delete.
Note: This is still not 100% waterproof. If we issue
xnheap_destroy_mapped from module cleanup passing a release handler
that belongs to the module text, deferred release will cause a crash.
But this corner case is no new regression, so let's keep the head in
the sand.
Signed-off-by: Jan Kiszka jan.kis...@siemens.com
---
include/nucleus/heap.h|6 ++--
ksrc/nucleus/heap.c | 47 +++-
ksrc/skins/native/heap.c | 27 -
ksrc/skins/native/queue.c | 27 -
ksrc/skins/rtai/shm.c |6 +
5 files changed, 51 insertions(+), 62 deletions(-)
diff --git a/include/nucleus/heap.h b/include/nucleus/heap.h
index ca691bf..44db738 100644
--- a/include/nucleus/heap.h
+++ b/include/nucleus/heap.h
@@ -204,9 +204,9 @@ int xnheap_init_mapped(xnheap_t *heap,
u_long heapsize,
int memflags);
-int xnheap_destroy_mapped(xnheap_t *heap,
- void (*release)(struct xnheap *heap),
- void __user *mapaddr);
+void xnheap_destroy_mapped(xnheap_t *heap,
+ void (*release)(struct xnheap *heap),
+ void __user *mapaddr);
#define xnheap_mapped_offset(heap,ptr) \
(((caddr_t)(ptr)) - ((caddr_t)(heap)-archdep.heapbase))
diff --git a/ksrc/nucleus/heap.c b/ksrc/nucleus/heap.c
index 4958daa..15db9cd 100644
--- a/ksrc/nucleus/heap.c
+++ b/ksrc/nucleus/heap.c
@@ -990,6 +990,7 @@ static inline void *__alloc_and_reserve_heap(size_t size,
int kmflags)
SetPageReserved(virt_to_page(vaddr));
}
+ printk(%s: ptr %p\n, __FUNCTION__, ptr);
return ptr;
}
@@ -1000,6 +1001,7 @@ static void __unreserve_and_free_heap(void *ptr, size_t
size, int kmflags)
/* Size must be page-aligned. */
vabase = (unsigned long)ptr;
+ printk(%s: ptr %p\n, __FUNCTION__, ptr);
if ((kmflags ~XNHEAP_GFP_NONCACHED) == 0) {
for (vaddr = vabase; vaddr vabase + size; vaddr += PAGE_SIZE)
@@ -1173,42 +1175,51 @@ int xnheap_init_mapped(xnheap_t *heap, u_long heapsize,
int memflags)
return 0;
}
-int xnheap_destroy_mapped(xnheap_t *heap, void (*release)(struct xnheap *heap),
- void __user *mapaddr)
+void xnheap_destroy_mapped(xnheap_t *heap,
+ void (*release)(struct xnheap *heap),
+ void __user *mapaddr)
{
- int ret = 0, ccheck;
unsigned long len;
- ccheck = mapaddr ? 1 : 0;
+ /*
+* Trying to unmap user memory without providing a release
+* handler for deferred cleanup is a bug.
+*/
+ XENO_ASSERT(NUCLEUS, mapaddr == NULL || release, /* nop */);
spin_lock(kheapq_lock);
- if (heap-archdep.numaps ccheck) {
- heap-archdep.release = release;
- spin_unlock(kheapq_lock);
- return -EBUSY;
- }
-
removeq(kheapq, heap-link); /* Prevent further mapping. */
+
+ heap-archdep.release = release;
+
+ if (heap-archdep.numaps == 0)
+ mapaddr = NULL; /* nothing left to unmap */
+ else
+ release = NULL; /* will be called by Linux on unmap */
+
spin_unlock(kheapq_lock);
len = xnheap_extentsize(heap);
if (mapaddr) {
down_write(current-mm-mmap_sem);
- heap-archdep.release = NULL;
- ret = do_munmap(current-mm, (unsigned long)mapaddr, len);
+ do_munmap(current-mm, (unsigned long)mapaddr, len);
up_write(current-mm-mmap_sem);
}
- if (ret == 0) {
+ if (heap-archdep.numaps 0) {
+ /* The release handler is supposed to clean up the rest. */
+