Module: xenomai-forge
Branch: master
Commit: 48d3faa4d26424acbe0ae4076277c41f46162ab7
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=48d3faa4d26424acbe0ae4076277c41f46162ab7

Author: Philippe Gerum <r...@xenomai.org>
Date:   Fri Apr 19 10:15:04 2013 +0200

copperplate/heapobj: sanitize shared heap management

Simplify heap management when --enable-pshared is in effect:

1. create a separate backing store file in tmpfs only for the main
heap, for sharing between processes in the same session. All other
shared heaps are nested in the main heap.

2. Get rid of *_shared() call forms from the API, assuming that
heapobj_init() is implicitly asking for shared memory when
--enable-pshared is in effect. Only *_private() call forms are
required to force a private heap allocation, even when
--enable-pshared is set.

3. Introduce __heapobj_init() and __heap_init_private() for accepting
a user-defined heap memory area (when applicable. Conversely, regular
heapobj_init() and heap_init_private() calls allocate the storage area
internally.

4. Introduce heapobj_bind_session() for obtaining a mapping on the
main heap of a given session.

---

 include/copperplate/hash.h        |    2 +-
 include/copperplate/heapobj.h     |  155 +++++++++-----
 include/copperplate/reference.h   |    4 +-
 include/copperplate/shared-list.h |   65 ++++---
 include/copperplate/threadobj.h   |   10 +-
 lib/alchemy/heap.c                |    2 +-
 lib/alchemy/queue.c               |    8 +-
 lib/copperplate/heapobj-malloc.c  |    6 +-
 lib/copperplate/heapobj-pshared.c |  407 +++++++++++++++++++++++--------------
 lib/copperplate/heapobj-tlsf.c    |    6 +-
 lib/copperplate/internal.h        |   26 +++
 lib/copperplate/threadobj.c       |   21 ++-
 lib/psos/rn.c                     |    2 +-
 lib/vxworks/memPartLib.c          |    2 +-
 lib/vxworks/msgQLib.c             |    4 +-
 15 files changed, 468 insertions(+), 252 deletions(-)

diff --git a/include/copperplate/hash.h b/include/copperplate/hash.h
index 1ad712f..d43cfb4 100644
--- a/include/copperplate/hash.h
+++ b/include/copperplate/hash.h
@@ -74,7 +74,7 @@ int __hash_enter(struct hash_table *t,
 
 static inline void hash_init(struct hash_table *t)
 {
-       __hash_init(__pshared_heap, t);
+       __hash_init(__main_heap, t);
 }
 
 void hash_destroy(struct hash_table *t);
diff --git a/include/copperplate/heapobj.h b/include/copperplate/heapobj.h
index ed8e202..2dad005 100644
--- a/include/copperplate/heapobj.h
+++ b/include/copperplate/heapobj.h
@@ -27,40 +27,36 @@
 #include <pthread.h>
 #include <xeno_config.h>
 #include <copperplate/reference.h>
+#include <copperplate/lock.h>
+#include <copperplate/list.h>
 #include <copperplate/wrappers.h>
 #include <copperplate/debug.h>
 
-struct heapobj;
-
-#ifdef CONFIG_XENO_PSHARED
-
 struct heapobj {
        void *pool;
        size_t size;
-       char name[64];
-       char fsname[64];
-       int fd;
-       int flags;
+       char name[32];
+#ifdef CONFIG_XENO_PSHARED
+       char fsname[256];
+#endif
 };
 
-#else /* !CONFIG_XENO_PSHARED */
-
-struct heapobj {
-       void *pool;
-       size_t size;
-       char name[64];
+struct sysgroup {
+       int thread_count;
+       struct list thread_list;
+       int heap_count;
+       struct list heap_list;
+       pthread_mutex_t lock;
 };
 
-#endif /* !CONFIG_XENO_PSHARED */
-
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 int heapobj_pkg_init_private(void);
 
-int heapobj_init_private(struct heapobj *hobj, const char *name,
-                        size_t size, void *mem);
+int __heapobj_init_private(struct heapobj *hobj, const char *name,
+                          size_t size, void *mem);
 
 int heapobj_init_array_private(struct heapobj *hobj, const char *name,
                               size_t size, int elems);
@@ -210,26 +206,25 @@ static inline char *pvstrdup(const char *ptr)
 
 #ifdef CONFIG_XENO_PSHARED
 
-/*
- * The heap control block is always heading the shared memory segment,
- * so that any process can access this information right after the
- * segment is mmapped. This also ensures that offset 0 will never
- * refer to a valid page or block.
- */
-extern void *__pshared_heap;
-#define main_heap              (*((struct heap *)__pshared_heap))
+extern void *__main_heap;
+
+extern struct hash_table *__main_catalog;
+#define main_catalog   (*((struct hash_table *)__main_catalog))
 
-extern struct hash_table *__pshared_catalog;
-#define main_catalog           (*((struct hash_table *)__pshared_catalog))
+extern struct sysgroup *__main_sysgroup;
+
+struct sysgroup_memspec {
+       struct holder next;
+};
 
 static inline void *mainheap_ptr(memoff_t off)
 {
-       return off ? (void *)__memptr(__pshared_heap, off) : NULL;
+       return off ? (void *)__memptr(__main_heap, off) : NULL;
 }
 
 static inline memoff_t mainheap_off(void *addr)
 {
-       return addr ? (memoff_t)__memoff(__pshared_heap, addr) : 0;
+       return addr ? (memoff_t)__memoff(__main_heap, addr) : 0;
 }
 
 /*
@@ -244,7 +239,7 @@ static inline memoff_t mainheap_off(void *addr)
                type handle;                                            \
                assert(__builtin_types_compatible_p(typeof(type), unsigned 
long) || \
                       __builtin_types_compatible_p(typeof(type), uintptr_t)); \
-               assert(ptr == NULL || __memchk(__pshared_heap, ptr));   \
+               assert(ptr == NULL || __memchk(__main_heap, ptr));      \
                handle = (type)mainheap_off(ptr);                       \
                handle|1;                                               \
        })
@@ -262,20 +257,62 @@ static inline memoff_t mainheap_off(void *addr)
                ptr;                                                    \
        })
 
+static inline void
+__sysgroup_add(struct sysgroup_memspec *obj, struct list *q, int *countp)
+{
+       write_lock_nocancel(&__main_sysgroup->lock);
+       (*countp)++;
+       list_append(&obj->next, q);
+       write_unlock(&__main_sysgroup->lock);
+}
+
+#define sysgroup_add(__group, __obj)   \
+       __sysgroup_add(__obj, &(__main_sysgroup->__group ## _list),     \
+                      &(__main_sysgroup->__group ## _count))
+
+static inline void
+__sysgroup_remove(struct sysgroup_memspec *obj, int *countp)
+{
+       write_lock_nocancel(&__main_sysgroup->lock);
+       (*countp)--;
+       list_remove(&obj->next);
+       write_unlock(&__main_sysgroup->lock);
+}
+
+#define sysgroup_remove(__group, __obj)        \
+       __sysgroup_remove(__obj, &(__main_sysgroup->__group ## _count))
+
+static inline void sysgroup_lock(void)
+{
+       read_lock_nocancel(&__main_sysgroup->lock);
+}
+
+static inline void sysgroup_unlock(void)
+{
+       read_unlock(&__main_sysgroup->lock);
+}
+
+#define sysgroup_count(__group)        \
+       (__main_sysgroup->__group ## _count)
+
+#define for_each_sysgroup(__obj, __group)      \
+       list_for_each_entry(__obj, &(__main_sysgroup->__group ## _list), next)
+
 int heapobj_pkg_init_shared(void);
 
 int heapobj_init(struct heapobj *hobj, const char *name,
-                size_t size, void *mem);
+                size_t size);
+
+static inline int __heapobj_init(struct heapobj *hobj, const char *name,
+                                size_t size, void *unused)
+{
+       /* Can't work on user-defined memory in shared mode. */
+       return heapobj_init(hobj, name, size);
+}
 
 int heapobj_init_array(struct heapobj *hobj, const char *name,
                       size_t size, int elems);
 
-int heapobj_init_shareable(struct heapobj *hobj, const char *name,
-                          size_t size);
-
-int heapobj_init_array_shareable(struct heapobj *hobj, const char *name,
-                                size_t size, int elems);
-
 void heapobj_destroy(struct heapobj *hobj);
 
 int heapobj_extend(struct heapobj *hobj,
@@ -292,6 +329,10 @@ size_t heapobj_validate(struct heapobj *hobj,
 
 size_t heapobj_inquire(struct heapobj *hobj);
 
+int heapobj_bind_session(const char *session);
+
+void heapobj_unbind_session(void);
+
 void *xnmalloc(size_t size);
 
 void xnfree(void *ptr);
@@ -300,6 +341,9 @@ char *xnstrdup(const char *ptr);
 
 #else /* !CONFIG_XENO_PSHARED */
 
+struct sysgroup_memspec {
+};
+
 /*
  * Whether an object is laid in some shared heap. Never if pshared
  * mode is disabled.
@@ -314,7 +358,7 @@ static inline int pshared_check(void *heap, void *addr)
                type handle;                                            \
                assert(__builtin_types_compatible_p(typeof(type), unsigned 
long) || \
                       __builtin_types_compatible_p(typeof(type), uintptr_t)); \
-               assert(ptr == NULL || __memchk(__pshared_heap, ptr));   \
+               assert(ptr == NULL || __memchk(__main_heap, ptr));      \
                handle = (type)ptr;                                     \
                handle;                                                 \
        })
@@ -327,15 +371,24 @@ static inline int pshared_check(void *heap, void *addr)
                ptr;                                                    \
        })
 
+#define sysgroup_add(__group, __obj)   do { } while (0)
+#define sysgroup_remove(__group, __obj)        do { } while (0)
+
 static inline int heapobj_pkg_init_shared(void)
 {
        return 0;
 }
 
+static inline int __heapobj_init(struct heapobj *hobj, const char *name,
+                                size_t size, void *mem)
+{
+       return __heapobj_init_private(hobj, name, size, mem);
+}
+
 static inline int heapobj_init(struct heapobj *hobj, const char *name,
-                              size_t size, void *mem)
+                              size_t size)
 {
-       return heapobj_init_private(hobj, name, size, mem);
+       return __heapobj_init_private(hobj, name, size, NULL);
 }
 
 static inline int heapobj_init_array(struct heapobj *hobj, const char *name,
@@ -344,19 +397,6 @@ static inline int heapobj_init_array(struct heapobj *hobj, 
const char *name,
        return heapobj_init_array_private(hobj, name, size, elems);
 }
 
-static inline int heapobj_init_shareable(struct heapobj *hobj,
-                                        const char *name, size_t size)
-{
-       return heapobj_init(hobj, name, size, NULL);
-}
-
-static inline int heapobj_init_array_shareable(struct heapobj *hobj,
-                                              const char *name,
-                                              size_t size, int elems)
-{
-       return heapobj_init_array(hobj, name, size, elems);
-}
-
 static inline void heapobj_destroy(struct heapobj *hobj)
 {
        pvheapobj_destroy(hobj);
@@ -391,6 +431,13 @@ static inline size_t heapobj_inquire(struct heapobj *hobj)
        return pvheapobj_inquire(hobj);
 }
 
+static inline int heapobj_bind_session(const char *session)
+{
+       return -ENOSYS;
+}
+
+static inline void heapobj_unbind_session(void) { }
+
 static inline void *xnmalloc(size_t size)
 {
        return pvmalloc(size);
diff --git a/include/copperplate/reference.h b/include/copperplate/reference.h
index b817ac5..05dbde4 100644
--- a/include/copperplate/reference.h
+++ b/include/copperplate/reference.h
@@ -89,7 +89,7 @@ static inline int __fnref_nofn(void *fnaddr)
        }
 #define fnref_declare(l, s)    extern int __refvar(l, s)
 
-extern void *__pshared_heap;
+extern void *__main_heap;
 
 int pshared_check(void *heap, void *addr);
 
@@ -122,7 +122,7 @@ int __fnref_register(const char *libname,
 #define fnref_register(l, s)
 #define fnref_declare(l, s)
 
-#define __pshared_heap NULL
+#define __main_heap    NULL
 
 #define __memoff(base, addr)   (addr)
 #define __memptr(base, off)    (off)
diff --git a/include/copperplate/shared-list.h 
b/include/copperplate/shared-list.h
index 623bc30..6f65ad6 100644
--- a/include/copperplate/shared-list.h
+++ b/include/copperplate/shared-list.h
@@ -36,16 +36,21 @@ struct list {
        struct holder head;
 };
 
-static inline void __inith(void *heap, struct holder *holder)
+static inline void __inith_nocheck(void *heap, struct holder *holder)
 {
-       assert(__hchk(heap, holder));
        holder->next = __hoff(heap, holder);
        holder->prev = __hoff(heap, holder);
 }
 
+static inline void __inith(void *heap, struct holder *holder)
+{
+       assert(__hchk(heap, holder));
+       __inith_nocheck(heap, holder);
+}
+
 static inline void inith(struct holder *holder)
 {
-       __inith(__pshared_heap, holder);
+       __inith(__main_heap, holder);
 }
 
 static inline void __ath(void *heap, struct holder *head,
@@ -60,7 +65,7 @@ static inline void __ath(void *heap, struct holder *head,
 
 static inline void ath(struct holder *head, struct holder *holder)
 {
-       __ath(__pshared_heap, head, holder);
+       __ath(__main_heap, head, holder);
 }
 
 static inline void __dth(void *heap, struct holder *holder)
@@ -71,7 +76,7 @@ static inline void __dth(void *heap, struct holder *holder)
 
 static inline void dth(struct holder *holder)
 {
-       __dth(__pshared_heap, holder);
+       __dth(__main_heap, holder);
 }
 
 static inline void __list_init(void *heap, struct list *list)
@@ -79,9 +84,14 @@ static inline void __list_init(void *heap, struct list *list)
        __inith(heap, &list->head);
 }
 
+static inline void __list_init_nocheck(void *heap, struct list *list)
+{
+       __inith_nocheck(heap, &list->head);
+}
+
 static inline void list_init(struct list *list)
 {
-       __list_init(__pshared_heap, list);
+       __list_init(__main_heap, list);
 }
 
 static inline void __holder_init(void *heap, struct holder *holder)
@@ -89,6 +99,11 @@ static inline void __holder_init(void *heap, struct holder 
*holder)
        __inith(heap, holder);
 }
 
+static inline void __holder_init_nocheck(void *heap, struct holder *holder)
+{
+       __inith_nocheck(heap, holder);
+}
+
 static inline void holder_init(struct holder *holder)
 {
        inith(holder);
@@ -106,7 +121,7 @@ static inline int __holder_linked(void *heap, const struct 
holder *holder)
  */
 static inline int holder_linked(const struct holder *holder)
 {
-       return __holder_linked(__pshared_heap, holder);
+       return __holder_linked(__main_heap, holder);
 }
 
 static inline void __list_prepend(void *heap, struct holder *holder,
@@ -117,7 +132,7 @@ static inline void __list_prepend(void *heap, struct holder 
*holder,
 
 static inline void list_prepend(struct holder *holder, struct list *list)
 {
-       __list_prepend(__pshared_heap, holder, list);
+       __list_prepend(__main_heap, holder, list);
 }
 
 static inline void __list_append(void *heap, struct holder *holder,
@@ -128,7 +143,7 @@ static inline void __list_append(void *heap, struct holder 
*holder,
 
 static inline void list_append(struct holder *holder, struct list *list)
 {
-       __list_append(__pshared_heap, holder, list);
+       __list_append(__main_heap, holder, list);
 }
 
 static inline void __list_insert(void *heap, struct holder *next, struct 
holder *prev)
@@ -138,7 +153,7 @@ static inline void __list_insert(void *heap, struct holder 
*next, struct holder
 
 static inline void list_insert(struct holder *next, struct holder *prev)
 {
-       __list_insert(__pshared_heap, next, prev);
+       __list_insert(__main_heap, next, prev);
 }
 
 static inline void __list_join(void *heap, struct list *lsrc,
@@ -158,7 +173,7 @@ static inline void __list_join(void *heap, struct list 
*lsrc,
 
 static inline void list_join(struct list *lsrc, struct list *ldst)
 {
-       __list_join(__pshared_heap, lsrc, ldst);
+       __list_join(__main_heap, lsrc, ldst);
 }
 
 static inline void __list_remove(void *heap, struct holder *holder)
@@ -168,7 +183,7 @@ static inline void __list_remove(void *heap, struct holder 
*holder)
 
 static inline void list_remove(struct holder *holder)
 {
-       __list_remove(__pshared_heap, holder);
+       __list_remove(__main_heap, holder);
 }
 
 static inline void __list_remove_init(void *heap, struct holder *holder)
@@ -179,7 +194,7 @@ static inline void __list_remove_init(void *heap, struct 
holder *holder)
 
 static inline void list_remove_init(struct holder *holder)
 {
-       __list_remove_init(__pshared_heap, holder);
+       __list_remove_init(__main_heap, holder);
 }
 
 static inline int __list_empty(void *heap, const struct list *list)
@@ -189,7 +204,7 @@ static inline int __list_empty(void *heap, const struct 
list *list)
 
 static inline int list_empty(const struct list *list)
 {
-       return __list_empty(__pshared_heap, list);
+       return __list_empty(__main_heap, list);
 }
 
 static inline struct holder *__list_pop(void *heap, struct list *list)
@@ -201,7 +216,7 @@ static inline struct holder *__list_pop(void *heap, struct 
list *list)
 
 static inline struct holder *list_pop(struct list *list)
 {
-       return __list_pop(__pshared_heap, list);
+       return __list_pop(__main_heap, list);
 }
 
 static inline int __list_heading_p(void *heap, const struct holder *holder,
@@ -213,7 +228,7 @@ static inline int __list_heading_p(void *heap, const struct 
holder *holder,
 static inline int list_heading_p(const struct holder *holder,
                                 const struct list *list)
 {
-       return __list_heading_p(__pshared_heap, holder, list);
+       return __list_heading_p(__main_heap, holder, list);
 }
 
 #define list_entry(ptr, type, member)                          \
@@ -223,34 +238,34 @@ static inline int list_heading_p(const struct holder 
*holder,
        list_entry(__hptr((heap), (list)->head.next), type, member)
 
 #define list_first_entry(list, type, member)                   \
-       __list_first_entry(__pshared_heap, list, type, member)
+       __list_first_entry(__main_heap, list, type, member)
 
 #define __list_last_entry(heap, list, type, member)            \
        list_entry(__hptr((heap), (list)->head.prev), type, member)
 
 #define list_last_entry(list, type, member)                    \
-       __list_last_entry(__pshared_heap, list, type, member)
+       __list_last_entry(__main_heap, list, type, member)
 
 #define __list_pop_entry(heap, list, type, member) ({                  \
                        struct holder *__holder = __list_pop((heap), list); \
                        list_entry(__holder, type, member); })
 
 #define list_pop_entry(list, type, member)                             \
-       __list_pop_entry(__pshared_heap, list, type, member)
+       __list_pop_entry(__main_heap, list, type, member)
 
 #define __list_for_each(heap, pos, list)                               \
        for (pos = __hptr((heap), (list)->head.next);                   \
             pos != &(list)->head; pos = __hptr((heap), (pos)->next))
 
 #define list_for_each(pos, list)                                       \
-       __list_for_each(__pshared_heap, pos, list)
+       __list_for_each(__main_heap, pos, list)
 
 #define __list_for_each_reverse(heap, pos, list)                       \
        for (pos = __hptr((heap), (list)->head.prev);                   \
             pos != &(list)->head; pos = __hptr((heap), (pos)->prev))
 
 #define list_for_each_reverse(pos, list)                               \
-       __list_for_each_reverse(__pshared_heap, pos, list)
+       __list_for_each_reverse(__main_heap, pos, list)
 
 #define __list_for_each_safe(heap, pos, tmp, list)                     \
        for (pos = __hptr((heap), (list)->head.next),                   \
@@ -259,7 +274,7 @@ static inline int list_heading_p(const struct holder 
*holder,
             pos = tmp, tmp = __hptr((heap), (pos)->next))
 
 #define list_for_each_safe(pos, tmp, list)                             \
-       __list_for_each_safe(__pshared_heap, pos, tmp, list)
+       __list_for_each_safe(__main_heap, pos, tmp, list)
 
 #define __list_for_each_entry(heap, pos, list, member)                 \
        for (pos = list_entry(__hptr((heap), (list)->head.next),        \
@@ -269,7 +284,7 @@ static inline int list_heading_p(const struct holder 
*holder,
                              typeof(*pos), member))
 
 #define list_for_each_entry(pos, list, member)                         \
-       __list_for_each_entry(__pshared_heap, pos, list, member)
+       __list_for_each_entry(__main_heap, pos, list, member)
 
 #define __list_for_each_entry_safe(heap, pos, tmp, list, member)       \
        for (pos = list_entry(__hptr((heap), (list)->head.next),        \
@@ -281,7 +296,7 @@ static inline int list_heading_p(const struct holder 
*holder,
                                         typeof(*pos), member))
 
 #define list_for_each_entry_safe(pos, tmp, list, member)               \
-       __list_for_each_entry_safe(__pshared_heap, pos, tmp, list, member)
+       __list_for_each_entry_safe(__main_heap, pos, tmp, list, member)
 
 #define __list_for_each_entry_reverse(heap, pos, list, member)         \
        for (pos = list_entry(__hptr((heap), (list)->head.prev),        \
@@ -291,6 +306,6 @@ static inline int list_heading_p(const struct holder 
*holder,
                              typeof(*pos), member))
 
 #define list_for_each_entry_reverse(pos, list, member)                 \
-       __list_for_each_entry_reverse(__pshared_heap, pos, list, member)
+       __list_for_each_entry_reverse(__main_heap, pos, list, member)
 
 #endif /* !_COPPERPLATE_SHARED_LIST_H */
diff --git a/include/copperplate/threadobj.h b/include/copperplate/threadobj.h
index 9f17035..a35f6ab 100644
--- a/include/copperplate/threadobj.h
+++ b/include/copperplate/threadobj.h
@@ -121,17 +121,18 @@ struct threadobj {
        pthread_t tid;
        pthread_mutex_t lock;
 
-       void (*finalizer)(struct threadobj *thobj);
-       void (*suspend_hook)(struct threadobj *thobj, int status);
-       int *errno_pointer;
        int schedlock_depth;
        int cancel_state;
        int status;
        int policy;
        int priority;
        pid_t cnode;
-       const char *name;
+       pid_t pid;
+       char name[32];
 
+       void (*finalizer)(struct threadobj *thobj);
+       void (*suspend_hook)(struct threadobj *thobj, int status);
+       int *errno_pointer;
        /* Those members belong exclusively to the syncobj code. */
        struct syncobj *wait_sobj;
        struct holder wait_link;
@@ -145,6 +146,7 @@ struct threadobj {
        struct timespec tslice;
        pthread_cond_t barrier;
        struct traceobj *tracer;
+       struct sysgroup_memspec memspec;
        struct backtrace_data btd;
 };
 
diff --git a/lib/alchemy/heap.c b/lib/alchemy/heap.c
index eb26d25..bf0d3ec 100644
--- a/lib/alchemy/heap.c
+++ b/lib/alchemy/heap.c
@@ -141,7 +141,7 @@ int rt_heap_create(RT_HEAP *heap,
         * The memory pool has to be part of the main heap for proper
         * sharing between processes.
         */
-       if (heapobj_init_shareable(&hcb->hobj, NULL, heapsz)) {
+       if (heapobj_init(&hcb->hobj, NULL, heapsz)) {
                xnfree(hcb);
                goto out;
        }
diff --git a/lib/alchemy/queue.c b/lib/alchemy/queue.c
index 1b06069..38691b1 100644
--- a/lib/alchemy/queue.c
+++ b/lib/alchemy/queue.c
@@ -132,12 +132,10 @@ int rt_queue_create(RT_QUEUE *queue, const char *name,
         * sharing between processes.
         */
        if (qlimit == Q_UNLIMITED)
-               ret = heapobj_init_shareable(&qcb->hobj, qcb->name,
-                                            poolsize);
+               ret = heapobj_init(&qcb->hobj, qcb->name, poolsize);
        else
-               ret = heapobj_init_array_shareable(&qcb->hobj, qcb->name,
-                                                  poolsize / qlimit,
-                                                  qlimit);
+               ret = heapobj_init_array(&qcb->hobj, qcb->name,
+                                        poolsize / qlimit, qlimit);
        if (ret) {
                xnfree(qcb);
                goto out;
diff --git a/lib/copperplate/heapobj-malloc.c b/lib/copperplate/heapobj-malloc.c
index a39abe4..96814b9 100644
--- a/lib/copperplate/heapobj-malloc.c
+++ b/lib/copperplate/heapobj-malloc.c
@@ -23,8 +23,8 @@
 #include "copperplate/heapobj.h"
 #include "copperplate/debug.h"
 
-int heapobj_init_private(struct heapobj *hobj, const char *name,
-                        size_t size, void *mem)
+int __heapobj_init_private(struct heapobj *hobj, const char *name,
+                          size_t size, void *mem)
 {
        /*
         * There is no local pool when working with malloc, we just
@@ -45,7 +45,7 @@ int heapobj_init_private(struct heapobj *hobj, const char 
*name,
 int heapobj_init_array_private(struct heapobj *hobj, const char *name,
                               size_t size, int elems)
 {
-       return __bt(heapobj_init_private(hobj, name, size * elems, NULL));
+       return __bt(__heapobj_init_private(hobj, name, size * elems, NULL));
 }
 
 int heapobj_pkg_init_private(void)
diff --git a/lib/copperplate/heapobj-pshared.c 
b/lib/copperplate/heapobj-pshared.c
index 5e05446..9b28b97 100644
--- a/lib/copperplate/heapobj-pshared.c
+++ b/lib/copperplate/heapobj-pshared.c
@@ -46,23 +46,9 @@
 #define HOBJ_PAGE_MASK         (~(HOBJ_PAGE_SIZE-1))
 #define HOBJ_PAGE_ALIGN(addr)  (((addr)+HOBJ_PAGE_SIZE-1)&HOBJ_PAGE_MASK)
 
-#define HOBJ_MINLOG2    3
-#define HOBJ_MAXLOG2    22     /* Must hold pagemap::bcount objects */
 #define HOBJ_MINALIGNSZ (1U << 4) /* i.e. 16 bytes */
-#define HOBJ_NBUCKETS   (HOBJ_MAXLOG2 - HOBJ_MINLOG2 + 2)
 #define HOBJ_MAXEXTSZ   (1U << 31) /* i.e. 2Gb */
 
-#define HOBJ_FORCE      0x1    /* Force cleanup */
-#define HOBJ_SHAREABLE  0x2    /* Allocate from main heap to allow sharing */
-
-/*
- * The base address of the shared memory segment, as seen by each
- * individual process.
- */
-void *__pshared_heap;
-
-struct hash_table *__pshared_catalog;
-
 enum {
        page_free =0,
        page_cont =1,
@@ -74,7 +60,7 @@ struct page_map {
        unsigned int bcount : 24; /* Number of active blocks. */
 };
 
-struct heap_extent {
+struct shared_extent {
        struct holder link;
        memoff_t membase;       /* Base address of the page array */
        memoff_t memlim;        /* Memory limit of page array */
@@ -83,26 +69,33 @@ struct heap_extent {
 };
 
 /*
- * The struct below has to live in shared memory; no direct reference
- * to process local memory in there.
+ * The main heap consists of a shared heap at its core, with
+ * additional session-wide information.
  */
-struct heap {
-       pthread_mutex_t lock;
-       struct list extents;
-       size_t extentsize;
-       size_t hdrsize;
-       size_t npages;
-       size_t ubytes;
-       size_t maxcont;
-       struct {
-               memoff_t freelist;
-               int fcount;
-       } buckets[HOBJ_NBUCKETS];
+struct session_heap {
+       struct shared_heap base;
        int cpid;
        memoff_t maplen;
        struct hash_table catalog;
+       struct sysgroup sysgroup;
 };
 
+/*
+ * The base address of the shared memory heap, as seen by each
+ * individual process. Its control block is always first, so that
+ * different processes can access this information right after the
+ * segment is mmapped. This also ensures that offset 0 will never
+ * refer to a valid page or block.
+ */
+void *__main_heap;
+#define main_heap      (*(struct session_heap *)__main_heap)
+
+/* A table of shared clusters for the session. */
+struct hash_table *__main_catalog;
+
+/* Pointer to the system list group. */
+struct sysgroup *__main_sysgroup;
+
 static struct heapobj main_pool;
 
 #define __moff(h, p)           ((caddr_t)(p) - (caddr_t)(h))
@@ -123,17 +116,17 @@ static inline size_t internal_overhead(size_t hsize)
           o * (p + m) = h * m + e * p
           o = (h * m + e *p) / (p + m)
        */
-       return __align_to((sizeof(struct heap_extent) * HOBJ_PAGE_SIZE
+       return __align_to((sizeof(struct shared_extent) * HOBJ_PAGE_SIZE
                           + sizeof(struct page_map) * hsize)
                          / (HOBJ_PAGE_SIZE + sizeof(struct page_map)), 
HOBJ_PAGE_SIZE);
 }
 
-static void init_extent(struct heap *heap, struct heap_extent *extent)
+static void init_extent(struct shared_heap *heap, struct shared_extent *extent)
 {
        caddr_t freepage;
        int n, lastpgnum;
 
-       __holder_init(heap, &extent->link);
+       __holder_init_nocheck(heap, &extent->link);
 
        /* The initial extent starts right after the header. */
        extent->membase = __moff(heap, extent) + heap->hdrsize;
@@ -156,11 +149,14 @@ static void init_extent(struct heap *heap, struct 
heap_extent *extent)
        extent->freelist = extent->membase;
 }
 
-static void init_heap(struct heap *heap, void *mem, size_t size)
+static void init_heap(struct shared_heap *heap, const char *name,
+                     void *mem, size_t size)
 {
-       struct heap_extent *extent;
+       struct shared_extent *extent;
        pthread_mutexattr_t mattr;
 
+       strncpy(heap->name, name, sizeof(heap->name) - 1);
+       heap->name[sizeof(heap->name) - 1] = '\0';
        heap->extentsize = size;
        heap->hdrsize = internal_overhead(size);
        heap->npages = (size - heap->hdrsize) >> HOBJ_PAGE_SHIFT;
@@ -172,10 +168,10 @@ static void init_heap(struct heap *heap, void *mem, 
size_t size)
         */
        assert(heap->npages >= 2);
 
-       heap->cpid = copperplate_get_tid();
        heap->ubytes = 0;
-       heap->maxcont = heap->npages * HOBJ_PAGE_SIZE;
-       __list_init(heap, &heap->extents);
+       heap->total = heap->npages * HOBJ_PAGE_SIZE;
+       heap->maxcont = heap->total;
+       __list_init_nocheck(heap, &heap->extents);
 
        __RT(pthread_mutexattr_init(&mattr));
        __RT(pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT));
@@ -186,16 +182,34 @@ static void init_heap(struct heap *heap, void *mem, 
size_t size)
        memset(heap->buckets, 0, sizeof(heap->buckets));
        extent = mem;
        init_extent(heap, extent);
-
-       __hash_init(heap, &heap->catalog);
        __list_append(heap, &extent->link, &heap->extents);
 }
 
-static caddr_t get_free_range(struct heap *heap, size_t bsize, int log2size)
+static void init_main_heap(struct session_heap *m_heap, void *mem, size_t size)
+{
+       pthread_mutexattr_t mattr;
+
+       init_heap(&m_heap->base, "main", mem, size);
+       m_heap->cpid = copperplate_get_tid();
+
+       __RT(pthread_mutexattr_init(&mattr));
+       __RT(pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT));
+       __RT(pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED));
+       __RT(pthread_mutex_init(&m_heap->sysgroup.lock, &mattr));
+       __RT(pthread_mutexattr_destroy(&mattr));
+
+       __hash_init(m_heap, &m_heap->catalog);
+       m_heap->sysgroup.thread_count = 0;
+       __list_init(m_heap, &m_heap->sysgroup.thread_list);
+       m_heap->sysgroup.heap_count = 0;
+       __list_init(m_heap, &m_heap->sysgroup.heap_list);
+}
+
+static caddr_t get_free_range(struct shared_heap *heap, size_t bsize, int 
log2size)
 {
        caddr_t block, eblock, freepage, lastpage, headpage, freehead = NULL;
+       struct shared_extent *extent;
        size_t pnum, pcont, fcont;
-       struct heap_extent *extent;
 
        __list_for_each_entry(heap, extent, &heap->extents, link) {
                freepage = __mref_check(heap, extent->freelist);
@@ -290,9 +304,9 @@ align_alloc_size(size_t size)
        return __align_to(size, HOBJ_MINALIGNSZ);
 }
 
-static void *alloc_block(struct heap *heap, size_t size)
+static void *alloc_block(struct shared_heap *heap, size_t size)
 {
-       struct heap_extent *extent;
+       struct shared_extent *extent;
        int log2size, ilog;
        size_t pnum, bsize;
        caddr_t block;
@@ -363,12 +377,12 @@ done:
        return block;
 }
 
-static int free_block(struct heap *heap, void *block)
+static int free_block(struct shared_heap *heap, void *block)
 {
        caddr_t freepage, lastpage, nextpage, tailpage, freeptr;
        int log2size, ret = 0, nblocks, xpage, ilog;
        size_t pnum, pcont, boffset, bsize, npages;
-       struct heap_extent *extent;
+       struct shared_extent *extent;
        memoff_t *tailptr;
 
        write_lock_nocancel(&heap->lock);
@@ -516,10 +530,10 @@ out:
        return __bt(ret);
 }
 
-static size_t check_block(struct heap *heap, void *block)
+static size_t check_block(struct shared_heap *heap, void *block)
 {
        size_t pnum, boffset, bsize, ret = 0;
-       struct heap_extent *extent;
+       struct shared_extent *extent;
        int ptype;
 
        read_lock_nocancel(&heap->lock);
@@ -553,10 +567,12 @@ out:
        return ret;
 }
 
-static int create_heap(struct heapobj *hobj, const char *session,
-                      const char *name, size_t size, int flags)
+static int create_main_heap(void)
 {
-       struct heap *heap;
+       const char *session = __node_info.session_label;
+       size_t size = __node_info.mem_pool;
+       struct heapobj *hobj = &main_pool;
+       struct session_heap *m_heap;
        struct stat sbuf;
        memoff_t len;
        int ret, fd;
@@ -566,38 +582,24 @@ static int create_heap(struct heapobj *hobj, const char 
*session,
         * header, but we still make sure of this in debug mode, so
         * that we can rely on __align_to() for rounding to the
         * minimum size in production builds, without any further
-        * test (e.g. like size >= sizeof(struct heap_extent)).
+        * test (e.g. like size >= sizeof(struct shared_extent)).
         */
-       assert(HOBJ_PAGE_SIZE > sizeof(struct heap_extent));
+       assert(HOBJ_PAGE_SIZE > sizeof(struct shared_extent));
 
        size += internal_overhead(size);
        size = __align_to(size, HOBJ_PAGE_SIZE);
        if (size > HOBJ_MAXEXTSZ)
                return __bt(-EINVAL);
 
-       if (size - sizeof(struct heap_extent) < HOBJ_PAGE_SIZE * 2)
+       if (size - sizeof(struct shared_extent) < HOBJ_PAGE_SIZE * 2)
                size += HOBJ_PAGE_SIZE * 2;
 
-       len = size + sizeof(*heap);
-
-       if (flags & HOBJ_SHAREABLE) {
-               heap = alloc_block(&main_heap, len);
-               if (heap == NULL)
-                       return __bt(-ENOMEM);
-               fd = -1;
-               heap->maplen = len;
-               init_heap(heap, (caddr_t)heap + sizeof(*heap), size);
-               goto finish;
-       }
-
-       if (name)
-               snprintf(hobj->name, sizeof(hobj->name), "%s:%s", session, 
name);
-       else
-               snprintf(hobj->name, sizeof(hobj->name), "%s:%p", session, 
hobj);
+       len = size + sizeof(*m_heap);
 
+       snprintf(hobj->name, sizeof(hobj->name), "%s.main-heap", session);
        snprintf(hobj->fsname, sizeof(hobj->fsname), "/xeno:%s", hobj->name);
 
-       if (flags & HOBJ_FORCE)
+       if (__node_info.reset_session)
                shm_unlink(hobj->fsname);
 
        fd = shm_open(hobj->fsname, O_RDWR|O_CREAT, 0600);
@@ -613,35 +615,43 @@ static int create_heap(struct heapobj *hobj, const char 
*session,
                goto errno_fail;
 
        if (sbuf.st_size > 0) {
-               heap = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
-               if (heap == MAP_FAILED)
+               m_heap = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 
0);
+               if (m_heap == MAP_FAILED)
                        goto errno_fail;
-               if (heap->cpid && kill(heap->cpid, 0) == 0) {
-                       if (heap->maplen == len)
+               if (m_heap->cpid && kill(m_heap->cpid, 0) == 0) {
+                       if (m_heap->maplen == len) {
+                               hobj->pool = &m_heap->base;
+                               __main_heap = m_heap;
+                               __main_sysgroup = &m_heap->sysgroup;
                                goto done;
+                       }
+                       munmap(m_heap, len);
                        __STD(close(fd));
                        return __bt(-EEXIST);
                }
-               munmap(heap, len);
+               munmap(m_heap, len);
        }
 
        ret = ftruncate(fd, len);
        if (ret)
                goto unlink_fail;
 
-       heap = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
-       if (heap == MAP_FAILED)
+       m_heap = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+       if (m_heap == MAP_FAILED)
                goto unlink_fail;
 
-       heap->maplen = len;
-       init_heap(heap, (caddr_t)heap + sizeof(*heap), size);
+       m_heap->maplen = len;
+       hobj->pool = &m_heap->base; /* Must be set prior to calling 
init_main_heap() */
+       init_main_heap(m_heap, (caddr_t)m_heap + sizeof(*m_heap), size);
+       /* We need these globals set up before updating a sysgroup. */
+       __main_heap = m_heap;
+       __main_sysgroup = &m_heap->sysgroup;
+       sysgroup_add(heap, &m_heap->base.memspec);
 done:
        flock(fd, LOCK_UN);
-finish:
-       hobj->pool = heap;
+       __STD(close(fd));
        hobj->size = size;
-       hobj->fd = fd;
-       hobj->flags = flags;
+       __main_catalog = &m_heap->catalog;
 
        return 0;
 unlink_fail:
@@ -656,71 +666,178 @@ close_fail:
        return ret;
 }
 
-int pshared_check(void *__heap, void *__addr)
+static int bind_main_heap(const char *session)
 {
-       struct heap *heap = __heap;
-       return __addr >= __heap && __addr < __heap + heap->maplen;
+       struct heapobj *hobj = &main_pool;
+       struct session_heap *m_heap;
+       struct stat sbuf;
+       memoff_t len;
+       int ret, fd;
+
+       /* No error tracking, this is for internal users. */
+
+       snprintf(hobj->name, sizeof(hobj->name), "%s.main-heap", session);
+       snprintf(hobj->fsname, sizeof(hobj->fsname), "/xeno:%s", hobj->name);
+
+       fd = shm_open(hobj->fsname, O_RDWR, 0400);
+       if (fd < 0)
+               return -errno;
+
+       ret = flock(fd, LOCK_EX);
+       if (ret)
+               goto errno_fail;
+
+       ret = fstat(fd, &sbuf);
+       if (ret)
+               goto errno_fail;
+
+       len = sbuf.st_size;
+       if (len < sizeof(*m_heap)) {
+               ret = -EINVAL;
+               goto fail;
+       }
+
+       m_heap = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+       if (m_heap == MAP_FAILED)
+               goto errno_fail;
+
+       __STD(close(fd));
+
+       if (m_heap->cpid == 0 || kill(m_heap->cpid, 0)) {
+               munmap(m_heap, len);
+               return -ENOENT;
+       }
+
+       hobj->pool = &m_heap->base;
+       hobj->size = len - sizeof(*m_heap);
+       __main_heap = m_heap;
+       __main_catalog = &m_heap->catalog;
+       __main_sysgroup = &m_heap->sysgroup;
+
+       return 0;
+
+errno_fail:
+       ret = -errno;
+fail:
+       __STD(close(fd));
+
+       return ret;
 }
 
-int heapobj_init(struct heapobj *hobj, const char *name,
-                size_t size, void *mem)
+int pshared_check(void *__heap, void *__addr)
 {
-       int flags = __node_info.reset_session ? HOBJ_FORCE : 0;
+       struct shared_heap *heap = __heap;
+       struct shared_extent *extent;
+       struct session_heap *m_heap;
+
+       /*
+        * Fast check for the main heap: we have a single extent for
+        * this one, so the address shall fall into the file-backed
+        * memory range.
+        */
+       if (heap == main_pool.pool) {
+               m_heap = container_of(heap, struct session_heap, base);
+               return __addr >= (void *)m_heap &&
+                       __addr < (void *)m_heap + m_heap->maplen;
+       }
+
+       /*
+        * Secondary (nested) heap: some refs may fall into the
+        * header, check for this first.
+        */
+       if (__addr >= __heap && __addr < __heap + sizeof(*heap))
+               return 1;
+
+       /*
+        * This address must be referring to some payload data within
+        * the nested heap, check that it falls into one of the heap
+        * extents.
+        */
+       assert(!list_empty(&heap->extents));
+
+       __list_for_each_entry(heap, extent, &heap->extents, link) {
+               if (__moff(heap, __addr) >= extent->membase &&
+                   __moff(heap, __addr) < extent->memlim)
+                       return 1;
+       }
 
-       return __bt(create_heap(hobj, __node_info.session_label, name,
-                               size, flags));
+       return 0;
 }
 
-int heapobj_init_shareable(struct heapobj *hobj, const char *name,
-                          size_t size)
+int heapobj_init(struct heapobj *hobj, const char *name, size_t size)
 {
-       int flags = __node_info.reset_session ? HOBJ_FORCE : 0;
+       const char *session = __node_info.session_label;
+       struct shared_heap *heap;
+       size_t len;
+
+       size += internal_overhead(size);
+       size = __align_to(size, HOBJ_PAGE_SIZE);
+       if (size > HOBJ_MAXEXTSZ)
+               return __bt(-EINVAL);
+
+       if (size - sizeof(struct shared_extent) < HOBJ_PAGE_SIZE * 2)
+               size += HOBJ_PAGE_SIZE * 2;
 
-       return __bt(create_heap(hobj, __node_info.session_label, name,
-                               size, flags | HOBJ_SHAREABLE));
+       len = size + sizeof(*heap);
+
+       /*
+        * Create a heap nested in the main shared heap to hold data
+        * we can share among processes which belong to the same
+        * session.
+        */
+       heap = alloc_block(&main_heap.base, len);
+       if (heap == NULL) {
+               warning("%s() failed for %Zu bytes, raise --mem-pool-size?",
+                       __func__);
+               return __bt(-ENOMEM);
+       }
+
+       if (name)
+               snprintf(hobj->name, sizeof(hobj->name), "%s.%s", session, 
name);
+       else
+               snprintf(hobj->name, sizeof(hobj->name), "%s.%p", session, 
hobj);
+
+       hobj->pool = heap;
+       hobj->size = size;
+       init_heap(heap, hobj->name, (caddr_t)heap + sizeof(*heap), size);
+       sysgroup_add(heap, &heap->memspec);
+
+       return 0;
 }
 
 int heapobj_init_array(struct heapobj *hobj, const char *name,
                       size_t size, int elems)
 {
        size = align_alloc_size(size);
-       return __bt(heapobj_init(hobj, name, size * elems, NULL));
-}
-
-int heapobj_init_array_shareable(struct heapobj *hobj, const char *name,
-                                size_t size, int elems)
-{
-       size = align_alloc_size(size);
-       return __bt(heapobj_init_shareable(hobj, name, size * elems));
+       return __bt(heapobj_init(hobj, name, size * elems));
 }
 
 void heapobj_destroy(struct heapobj *hobj)
 {
-       struct heap *heap = hobj->pool;
+       struct shared_heap *heap = hobj->pool;
        int cpid;
 
        __RT(pthread_mutex_destroy(&heap->lock));
 
-       if (hobj->flags & HOBJ_SHAREABLE) {
-               free_block(&main_heap, heap);
+       if (hobj != &main_pool) {
+               sysgroup_remove(heap, &heap->memspec);
+               free_block(&main_heap.base, heap);
                return;
        }
 
-       cpid = heap->cpid;
-       munmap(heap, hobj->size + sizeof(*heap));
-       __STD(close(hobj->fd));
+       __RT(pthread_mutex_destroy(&main_heap.sysgroup.lock));
+       cpid = main_heap.cpid;
+       munmap(&main_heap, main_heap.maplen);
 
        if (cpid == copperplate_get_tid() || (cpid && kill(cpid, 0)))
                shm_unlink(hobj->fsname);
 }
 
-int heapobj_extend(struct heapobj *hobj, size_t size, void *mem)
+int heapobj_extend(struct heapobj *hobj, size_t size, void *unused)
 {
-       struct heap *heap = hobj->pool;
-       struct heap_extent *extent;
-       size_t newsize;
-       int ret, state;
-       caddr_t p;
+       struct shared_heap *heap = hobj->pool;
+       struct shared_extent *extent;
+       int state;
 
        if (hobj == &main_pool) /* Can't extend the main pool. */
                return __bt(-EINVAL);
@@ -728,34 +845,19 @@ int heapobj_extend(struct heapobj *hobj, size_t size, 
void *mem)
        if (size <= HOBJ_PAGE_SIZE * 2)
                return __bt(-EINVAL);
 
+       size = align_alloc_size(size);
        write_lock_safe(&heap->lock, state);
-       newsize = size + hobj->size + sizeof(*heap) + sizeof(*extent);
-       ret = ftruncate(hobj->fd, newsize);
-       if (ret) {
-               ret = __bt(-errno);
-               goto out;
-       }
-
-       /*
-        * We do not allow the kernel to move the mapping address, so
-        * it is safe referring to the heap contents while extending
-        * it.
-        */
-       p = mremap(heap, heap->maplen, newsize, 0);
-       if (p == MAP_FAILED) {
-               ret = __bt(-errno);
-               goto out;
-       }
-
-       heap->maplen = newsize;
-       extent = (struct heap_extent *)(p + hobj->size + sizeof(*heap));
+       extent = alloc_block(&main_heap.base, size + sizeof(*extent));
        init_extent(heap, extent);
        __list_append(heap, &extent->link, &heap->extents);
-       hobj->size = newsize - sizeof(*heap);
-out:
+       if (size > heap->maxcont)
+               heap->maxcont = size;
+       heap->total += size;
+       hobj->size += size;
+
        write_unlock_safe(&heap->lock, state);
 
-       return ret;
+       return 0;
 }
 
 void *heapobj_alloc(struct heapobj *hobj, size_t size)
@@ -775,18 +877,18 @@ size_t heapobj_validate(struct heapobj *hobj, void *ptr)
 
 size_t heapobj_inquire(struct heapobj *hobj)
 {
-       struct heap *heap = hobj->pool;
+       struct shared_heap *heap = hobj->pool;
        return heap->ubytes;
 }
 
 void *xnmalloc(size_t size)
 {
-       return alloc_block(&main_heap, size);
+       return alloc_block(&main_heap.base, size);
 }
 
 void xnfree(void *ptr)
 {
-       free_block(&main_heap, ptr);
+       free_block(&main_heap.base, ptr);
 }
 
 char *xnstrdup(const char *ptr)
@@ -804,13 +906,7 @@ int heapobj_pkg_init_shared(void)
 {
        int ret;
 
-       ret = heapobj_init(&main_pool, "main", __node_info.mem_pool, NULL);
-       if (ret == 0) {
-               __pshared_heap = main_pool.pool;
-               __pshared_catalog = &main_heap.catalog;
-               return 0;
-       }
-
+       ret = create_main_heap();
        if (ret == -EEXIST) {
                if (__node_info.reset_session)
                        /* Init failed despite override. */
@@ -824,3 +920,16 @@ int heapobj_pkg_init_shared(void)
 
        return __bt(ret);
 }
+
+int heapobj_bind_session(const char *session)
+{
+       /* No error tracking, this is for internal users. */
+       return bind_main_heap(session);
+}
+
+void heapobj_unbind_session(void)
+{
+       size_t len = main_heap.maplen;
+
+       munmap(&main_heap, len);
+}
diff --git a/lib/copperplate/heapobj-tlsf.c b/lib/copperplate/heapobj-tlsf.c
index 598ef24..d824a81 100644
--- a/lib/copperplate/heapobj-tlsf.c
+++ b/lib/copperplate/heapobj-tlsf.c
@@ -32,8 +32,8 @@
 
 static int tlsf_pool_overhead;
 
-int heapobj_init_private(struct heapobj *hobj, const char *name,
-                        size_t size, void *mem)
+int __heapobj_init_private(struct heapobj *hobj, const char *name,
+                          size_t size, void *mem)
 {
        if (mem == NULL) {
                /*
@@ -69,7 +69,7 @@ int heapobj_init_array_private(struct heapobj *hobj, const 
char *name,
        poolsz = (size + TLSF_BLOCK_ALIGN - 1) & ~(TLSF_BLOCK_ALIGN - 1);
        poolsz *= elems;
 
-       return __bt(heapobj_init_private(hobj, name, poolsz, NULL));
+       return __bt(__heapobj_init_private(hobj, name, poolsz, NULL));
 }
 
 int heapobj_pkg_init_private(void)
diff --git a/lib/copperplate/internal.h b/lib/copperplate/internal.h
index 7b4e792..5d1a697 100644
--- a/lib/copperplate/internal.h
+++ b/lib/copperplate/internal.h
@@ -26,6 +26,7 @@
 #include <sched.h>
 #include <xeno_config.h>
 #include <copperplate/list.h>
+#include <copperplate/heapobj.h>
 
 #define sigev_notify_thread_id  _sigev_un._tid
 
@@ -41,6 +42,31 @@ struct coppernode {
        int silent_mode;
 };
 
+#define HOBJ_MINLOG2    3
+#define HOBJ_MAXLOG2    22     /* Must hold pagemap::bcount objects */
+#define HOBJ_NBUCKETS   (HOBJ_MAXLOG2 - HOBJ_MINLOG2 + 2)
+
+/*
+ * The struct below has to live in shared memory; no direct reference
+ * to process local memory in there.
+ */
+struct shared_heap {
+       char name[32];
+       pthread_mutex_t lock;
+       struct list extents;
+       size_t extentsize;
+       size_t hdrsize;
+       size_t npages;
+       size_t ubytes;
+       size_t total;
+       size_t maxcont;
+       struct sysgroup_memspec memspec;
+       struct {
+               memoff_t freelist;
+               int fcount;
+       } buckets[HOBJ_NBUCKETS];
+};
+
 extern pid_t __node_id;
 
 extern struct coppernode __node_info;
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index 8b622fe..a7a144d 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -36,6 +36,7 @@
 #include "copperplate/cluster.h"
 #include "copperplate/clockobj.h"
 #include "copperplate/eventobj.h"
+#include "copperplate/heapobj.h"
 #include "internal.h"
 
 union copperplate_wait_union {
@@ -835,6 +836,8 @@ void threadobj_init(struct threadobj *thobj,
        holder_init(&thobj->wait_link);
        thobj->suspend_hook = idata->suspend_hook;
        thobj->cnode = __node_id;
+       thobj->pid = 0;
+
        /*
         * CAUTION: wait_union and wait_size have been set in
         * __threadobj_alloc(), do not overwrite.
@@ -967,12 +970,18 @@ int threadobj_prologue(struct threadobj *thobj, const 
char *name)
                 * called.
                 */
                assert(current->magic == 0);
+               sysgroup_remove(thread, &current->memspec);
                threadobj_finalize(current);
                threadobj_free(current);
        } else
                pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
 
-       thobj->name = name;
+       if (name) {
+               strncpy(thobj->name, name, sizeof(thobj->name) - 1);
+               thobj->name[sizeof(thobj->name) - 1] = '\0';
+       } else
+               *thobj->name = '\0';
+
        thobj->errno_pointer = &errno;
        backtrace_init_context(&thobj->btd, name);
        ret = threadobj_setup_corespec(thobj);
@@ -980,6 +989,13 @@ int threadobj_prologue(struct threadobj *thobj, const char 
*name)
                return __bt(ret);
 
        threadobj_set_current(thobj);
+       thobj->pid = copperplate_get_tid();
+
+       /*
+        * Link the thread to the shared queue, so that sysregd can
+        * retrieve it. Nop if --disable-pshared.
+        */
+       sysgroup_add(thread, &thobj->memspec);
 
        threadobj_lock(thobj);
        thobj->status &= ~THREADOBJ_WARMUP;
@@ -1023,10 +1039,13 @@ static void threadobj_finalize(void *p) /* thobj->lock 
free */
 
        pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
        threadobj_set_current(p);
+       thobj->pid = 0;
 
        if (thobj->wait_sobj)
                __syncobj_cleanup_wait(thobj->wait_sobj, thobj);
 
+       sysgroup_remove(thread, &thobj->memspec);
+
        if (thobj->tracer)
                traceobj_unwind(thobj->tracer);
 
diff --git a/lib/psos/rn.c b/lib/psos/rn.c
index 5f6fe42..469c6e7 100644
--- a/lib/psos/rn.c
+++ b/lib/psos/rn.c
@@ -126,7 +126,7 @@ u_long rn_create(const char *name, void *saddr, u_long 
length,
                goto out;
        }
 
-       ret = heapobj_init(&rn->hobj, name, length, saddr);
+       ret = __heapobj_init(&rn->hobj, name, length, saddr);
        if (ret) {
                ret = ERR_TINYRN;
                xnfree(rn);
diff --git a/lib/vxworks/memPartLib.c b/lib/vxworks/memPartLib.c
index 95e61b6..f34c5e1 100644
--- a/lib/vxworks/memPartLib.c
+++ b/lib/vxworks/memPartLib.c
@@ -54,7 +54,7 @@ PART_ID memPartCreate(char *pPool, unsigned int poolSize)
        if (mp == NULL)
                goto fail;
 
-       if (heapobj_init(&mp->hobj, NULL, poolSize, pPool)) {
+       if (__heapobj_init(&mp->hobj, NULL, poolSize, pPool)) {
                xnfree(mp);
        fail:
                errno = S_memLib_NOT_ENOUGH_MEMORY;
diff --git a/lib/vxworks/msgQLib.c b/lib/vxworks/msgQLib.c
index e90f1f3..3abc326 100644
--- a/lib/vxworks/msgQLib.c
+++ b/lib/vxworks/msgQLib.c
@@ -96,8 +96,8 @@ MSG_Q_ID msgQCreate(int maxMsgs, int maxMsgLength, int 
options)
         * must share the same allocation base). Create the heap
         * object accordingly.
         */
-       if (heapobj_init_array_shareable(&mq->pool, NULL, maxMsgLength +
-                                        sizeof(struct msgholder), maxMsgs)) {
+       if (heapobj_init_array(&mq->pool, NULL, maxMsgLength +
+                              sizeof(struct msgholder), maxMsgs)) {
                xnfree(mq);
        no_mem:
                errno = S_memLib_NOT_ENOUGH_MEMORY;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to