Module: xenomai-2.4
Branch: master
Commit: dfeec7d0af43ac67e3a004bfc16024d774240eb0
URL:    
http://git.xenomai.org/?p=xenomai-2.4.git;a=commit;h=dfeec7d0af43ac67e3a004bfc16024d774240eb0

Author: Jan Kiszka <jan.kis...@siemens.com>
Date:   Tue Oct 20 13:15:00 2009 +0200

native: Fix memory leak on heap/queue auto-deletion

We are currently leaking user space heap/queue objects when the owner
terminates without deleting them before. Fix it by releasing the objects
in the corresponding cleanup callbacks which are also called on owner
termination.

Signed-off-by: Jan Kiszka <jan.kis...@siemens.com>

---

 ksrc/skins/native/heap.c    |    7 +++++++
 ksrc/skins/native/queue.c   |    7 +++++++
 ksrc/skins/native/syscall.c |   25 ++++++-------------------
 3 files changed, 20 insertions(+), 19 deletions(-)

diff --git a/ksrc/skins/native/heap.c b/ksrc/skins/native/heap.c
index a5dfce9..b78b968 100644
--- a/ksrc/skins/native/heap.c
+++ b/ksrc/skins/native/heap.c
@@ -349,6 +349,13 @@ static void __heap_post_release(struct xnheap *h) /* 
nklock held, IRQs off */
                 * deletion: reschedule now.
                 */
                xnpod_schedule();
+
+       xnlock_put_irqrestore(&nklock, s);
+
+#ifdef CONFIG_XENO_OPT_PERVASIVE
+       if (heap->cpid)
+               xnfree(heap);
+#endif
 }
 
 /**
diff --git a/ksrc/skins/native/queue.c b/ksrc/skins/native/queue.c
index f5788ba..4b85786 100644
--- a/ksrc/skins/native/queue.c
+++ b/ksrc/skins/native/queue.c
@@ -312,6 +312,13 @@ static void __queue_post_release(struct xnheap *heap) /* 
nklock held, IRQs off *
                 * the deletion: reschedule now.
                 */
                xnpod_schedule();
+
+       xnlock_put_irqrestore(&nklock, s);
+
+#ifdef CONFIG_XENO_OPT_PERVASIVE
+       if (q->cpid)
+               xnfree(q);
+#endif
 }
 
 /**
diff --git a/ksrc/skins/native/syscall.c b/ksrc/skins/native/syscall.c
index e5101da..6286bd9 100644
--- a/ksrc/skins/native/syscall.c
+++ b/ksrc/skins/native/syscall.c
@@ -2127,7 +2127,6 @@ static int __rt_queue_delete(struct task_struct *curr, 
struct pt_regs *regs)
 {
        RT_QUEUE_PLACEHOLDER ph;
        RT_QUEUE *q;
-       int err;
 
        if (!__xn_access_ok(curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(ph)))
                return -EFAULT;
@@ -2136,17 +2135,11 @@ static int __rt_queue_delete(struct task_struct *curr, 
struct pt_regs *regs)
                            sizeof(ph));
 
        q = (RT_QUEUE *)xnregistry_fetch(ph.opaque);
-
        if (!q)
-               err = -ESRCH;
-       else {
-               /* Callee will check the queue descriptor for validity again. */
-               err = rt_queue_delete_inner(q, (void __user *)ph.mapbase);
-               if (!err && q->cpid)
-                       xnfree(q);
-       }
+               return -ESRCH;
 
-       return err;
+       /* Callee will check the queue descriptor for validity again. */
+       return rt_queue_delete_inner(q, (void __user *)ph.mapbase);
 }
 
 /*
@@ -2660,7 +2653,6 @@ static int __rt_heap_delete(struct task_struct *curr, 
struct pt_regs *regs)
 {
        RT_HEAP_PLACEHOLDER ph;
        RT_HEAP *heap;
-       int err;
 
        if (!__xn_access_ok(curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(ph)))
                return -EFAULT;
@@ -2671,15 +2663,10 @@ static int __rt_heap_delete(struct task_struct *curr, 
struct pt_regs *regs)
        heap = (RT_HEAP *)xnregistry_fetch(ph.opaque);
 
        if (!heap)
-               err = -ESRCH;
-       else {
-               /* Callee will check the heap descriptor for validity again. */
-               err = rt_heap_delete_inner(heap, (void __user *)ph.mapbase);
-               if (!err && heap->cpid)
-                       xnfree(heap);
-       }
+               return -ESRCH;
 
-       return err;
+       /* Callee will check the heap descriptor for validity again. */
+       return rt_heap_delete_inner(heap, (void __user *)ph.mapbase);
 }
 
 /*


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to