Module: xenomai-forge
Branch: master
Commit: be7f28fd7e0e531ba99f9dad7b24effd14436b34
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=be7f28fd7e0e531ba99f9dad7b24effd14436b34

Author: Philippe Gerum <r...@xenomai.org>
Date:   Wed Jul 31 16:08:08 2013 +0200

cobalt/heap: drop delayed memory release mechanism

We have no more client for xnheap_schedule_free(). Drop this ugly
service, which used to paper over a design issue in the thread
deletion path, which has been fixed lately.

Since this unfortunate code was running while holding the nklock all
the way, this can only help decreasing latency.

---

 include/cobalt/kernel/heap.h |   22 -------------
 kernel/cobalt/heap.c         |   68 ------------------------------------------
 kernel/cobalt/pod.c          |    4 +--
 3 files changed, 1 insertions(+), 93 deletions(-)

diff --git a/include/cobalt/kernel/heap.h b/include/cobalt/kernel/heap.h
index 8dcbd0e..263dd6a 100644
--- a/include/cobalt/kernel/heap.h
+++ b/include/cobalt/kernel/heap.h
@@ -102,8 +102,6 @@ struct xnheap {
                int fcount;
        } buckets[XNHEAP_NBUCKETS];
 
-       struct list_head *idleq[NR_CPUS];
-
        /* # of active user-space mappings. */
        unsigned long numaps;
        /* Kernel memory flags (0 if vmalloc()). */
@@ -158,7 +156,6 @@ static inline size_t xnheap_internal_overhead(size_t hsize, 
size_t psize)
 
 #define xnmalloc(size)     xnheap_alloc(&kheap,size)
 #define xnfree(ptr)        xnheap_free(&kheap,ptr)
-#define xnfreesync()       xnheap_finalize_free(&kheap)
 
 static inline size_t xnheap_rounded_size(size_t hsize, size_t psize)
 {
@@ -241,25 +238,6 @@ int xnheap_test_and_free(struct xnheap *heap,
 int xnheap_free(struct xnheap *heap,
                void *block);
 
-void xnheap_schedule_free(struct xnheap *heap,
-                         void *block,
-                         struct list_head *link);
-
-void xnheap_finalize_free_inner(struct xnheap *heap,
-                               int cpu);
-
-static inline void xnheap_finalize_free(struct xnheap *heap)
-{
-       int cpu = ipipe_processor_id();
-
-       XENO_ASSERT(NUCLEUS,
-                   spltest() != 0,
-                   xnpod_fatal("%s called in unsafe context", __FUNCTION__));
-
-       if (heap->idleq[cpu])
-               xnheap_finalize_free_inner(heap, cpu);
-}
-
 int xnheap_check_block(struct xnheap *heap,
                       void *block);
 
diff --git a/kernel/cobalt/heap.c b/kernel/cobalt/heap.c
index 3b9a51c..478fbff 100644
--- a/kernel/cobalt/heap.c
+++ b/kernel/cobalt/heap.c
@@ -270,7 +270,6 @@ int xnheap_init(struct xnheap *heap,
 {
        unsigned long hdrsize, shiftsize, pageshift;
        struct xnextent *extent;
-       unsigned int cpu;
        spl_t s;
 
        /*
@@ -326,9 +325,6 @@ int xnheap_init(struct xnheap *heap,
        heap->ubytes = 0;
        heap->maxcont = heap->npages * pagesize;
 
-       for_each_online_cpu(cpu)
-               heap->idleq[cpu] = NULL;
-
        INIT_LIST_HEAD(&heap->extents);
        heap->nrextents = 1;
        xnlock_init(&heap->lock);
@@ -993,70 +989,6 @@ int xnheap_extend(struct xnheap *heap, void *extaddr, 
unsigned long extsize)
 }
 EXPORT_SYMBOL_GPL(xnheap_extend);
 
-/*!
- * \fn int xnheap_schedule_free(struct xnheap *heap, void *block, struct 
list_head *link)
- * \brief Schedule a memory block for release.
- *
- * This routine schedules a block for release by
- * xnheap_finalize_free(). This service is useful to lazily free
- * blocks of heap memory when immediate release is not an option,
- * e.g. when active references are still pending on the object for a
- * short time after the call. xnheap_finalize_free() is expected to be
- * eventually called by the client code at some point in the future
- * when actually freeing the idle objects is deemed safe.
- *
- * @param heap The descriptor address of the heap to release memory
- * to.
- *
- * @param block The address of the region to be returned to the heap.
- *
- * @param link The address of a link member, likely but not
- * necessarily within the released object, which will be used by the
- * heap manager to hold the block in the queue of idle objects.
- *
- * Environments:
- *
- * This service can be called from:
- *
- * - Kernel module initialization/cleanup code
- * - Interrupt service routine
- * - Kernel-based task
- * - User-space task
- *
- * Rescheduling: never.
- */
-
-void xnheap_schedule_free(struct xnheap *heap, void *block, struct list_head 
*link)
-{
-       unsigned int cpu;
-       spl_t s;
-
-       xnlock_get_irqsave(&heap->lock, s);
-       /*
-        * NOTE: we only need a one-way linked list for remembering
-        * the idle objects through the 'next' field, so the 'last'
-        * field of the link is used to point at the beginning of the
-        * freed memory.
-        */
-       cpu = ipipe_processor_id();
-       link->prev = block;
-       link->next = heap->idleq[cpu];
-       heap->idleq[cpu] = link;
-       xnlock_put_irqrestore(&heap->lock, s);
-}
-EXPORT_SYMBOL_GPL(xnheap_schedule_free);
-
-void xnheap_finalize_free_inner(struct xnheap *heap, int cpu)
-{
-       struct list_head *link;
-
-       while ((link = heap->idleq[cpu]) != NULL) {
-               heap->idleq[cpu] = link->next;
-               xnheap_free(heap, link->prev);
-       }
-}
-EXPORT_SYMBOL_GPL(xnheap_finalize_free_inner);
-
 int xnheap_check_block(struct xnheap *heap, void *block)
 {
        unsigned long pagenum, boffset;
diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index d90a2f6..9b948b2 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -1783,10 +1783,8 @@ reschedule:
                   prev, xnthread_name(prev),
                   next, xnthread_name(next));
 
-       if (xnthread_test_state(next, XNROOT)) {
+       if (xnthread_test_state(next, XNROOT))
                xnsched_reset_watchdog(sched);
-               xnfreesync();
-       }
 
        sched->curr = next;
        shadow = 1;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to