Module: xenomai-3
Branch: wip/heapmem
Commit: ecdc9770b12be89517135c003e1611378f744b6c
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=ecdc9770b12be89517135c003e1611378f744b6c

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sun Apr 22 18:20:04 2018 +0200

testsuite/smokey: add test suite for memory allocators

---

 configure.ac                                |    4 +
 testsuite/smokey/Makefile.am                |   17 +-
 testsuite/smokey/memcheck/Makefile.am       |    8 +
 testsuite/smokey/memcheck/memcheck.c        |  851 +++++++++++++++++++++++++++
 testsuite/smokey/memcheck/memcheck.h        |   65 ++
 testsuite/smokey/memory-heapmem/Makefile.am |    9 +
 testsuite/smokey/memory-heapmem/heapmem.c   |   51 ++
 testsuite/smokey/memory-pshared/Makefile.am |    9 +
 testsuite/smokey/memory-pshared/pshared.c   |  125 ++++
 testsuite/smokey/memory-tlsf/Makefile.am    |   10 +
 testsuite/smokey/memory-tlsf/tlsf.c         |  123 ++++
 11 files changed, 1271 insertions(+), 1 deletion(-)

diff --git a/configure.ac b/configure.ac
index 59ea776..61ebcbe 100644
--- a/configure.ac
+++ b/configure.ac
@@ -930,6 +930,10 @@ AC_CONFIG_FILES([ \
        testsuite/smokey/timerfd/Makefile \
        testsuite/smokey/tsc/Makefile \
        testsuite/smokey/leaks/Makefile \
+       testsuite/smokey/memcheck/Makefile \
+       testsuite/smokey/memory-heapmem/Makefile \
+       testsuite/smokey/memory-tlsf/Makefile \
+       testsuite/smokey/memory-pshared/Makefile \
        testsuite/smokey/fpu-stress/Makefile \
        testsuite/smokey/net_udp/Makefile \
        testsuite/smokey/net_packet_dgram/Makefile \
diff --git a/testsuite/smokey/Makefile.am b/testsuite/smokey/Makefile.am
index c6fe70b..d7a71fe 100644
--- a/testsuite/smokey/Makefile.am
+++ b/testsuite/smokey/Makefile.am
@@ -5,6 +5,10 @@ CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
 
 smokey_SOURCES = main.c
 
+# Make sure to list modules from the most dependent to the
+# least. e.g. net_common should appear after all net_* modules,
+# memcheck should appear after all heapmem-* modules.
+
 COBALT_SUBDIRS =       \
        arith           \
        bufp            \
@@ -12,6 +16,9 @@ COBALT_SUBDIRS =      \
        fpu-stress      \
        iddp            \
        leaks           \
+       memory-heapmem  \
+       memory-tlsf     \
+       memcheck        \
        net_packet_dgram\
        net_packet_raw  \
        net_udp         \
@@ -31,11 +38,19 @@ COBALT_SUBDIRS =    \
        vdso-access     \
        xddp
 
+if XENO_PSHARED
+COBALT_SUBDIRS += memory-pshared
+endif
+
 if CONFIG_XENO_LIBS_DLOPEN
 COBALT_SUBDIRS += dlopen
 endif
 
-MERCURY_SUBDIRS =
+MERCURY_SUBDIRS = memory-heapmem memory-tlsf
+if XENO_PSHARED
+MERCURY_SUBDIRS += memory-pshared
+endif
+MERCURY_SUBDIRS += memcheck
 
 DIST_SUBDIRS = $(COBALT_SUBDIRS) $(MERCURY_SUBDIRS)
 
diff --git a/testsuite/smokey/memcheck/Makefile.am 
b/testsuite/smokey/memcheck/Makefile.am
new file mode 100644
index 0000000..482314a
--- /dev/null
+++ b/testsuite/smokey/memcheck/Makefile.am
@@ -0,0 +1,8 @@
+noinst_LIBRARIES = libmemcheck.a
+noinst_HEADERS = memcheck.h
+
+AM_CPPFLAGS =                  \
+       @XENO_USER_CFLAGS@      \
+       -I$(top_srcdir)/include
+
+libmemcheck_a_SOURCES = memcheck.c
diff --git a/testsuite/smokey/memcheck/memcheck.c 
b/testsuite/smokey/memcheck/memcheck.c
new file mode 100644
index 0000000..9244fd8
--- /dev/null
+++ b/testsuite/smokey/memcheck/memcheck.c
@@ -0,0 +1,851 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <r...@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#include <time.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sched.h>
+#include <pthread.h>
+#include <boilerplate/time.h>
+#include "memcheck.h"
+
+enum pattern {
+       alphabet_series,
+       digit_series,
+       binary_series,
+};
+
+struct chunk {
+       void *ptr;
+       enum pattern pattern;
+};
+
+struct runstats {
+       size_t heap_size;
+       size_t user_size;
+       size_t block_size;
+       int nrblocks;
+       long alloc_avg_ns;
+       long alloc_max_ns;
+       long free_avg_ns;
+       long free_max_ns;
+       int flags;
+       double overhead;
+       double fragmentation;
+       struct runstats *next;
+};
+
+static struct runstats *statistics;
+
+static int nrstats;
+
+static int max_results = 4;
+
+static inline long diff_ts(struct timespec *left, struct timespec *right)
+{
+       return (long long)(left->tv_sec - right->tv_sec) * ONE_BILLION
+               + left->tv_nsec - right->tv_nsec;
+}
+
+static inline void swap(void *left, void *right, const size_t size)
+{
+       char trans[size];
+
+       memcpy(trans, left, size);
+       memcpy(left, right, size);
+       memcpy(right, trans, size);
+}
+
+static void random_shuffle(void *vbase, size_t nmemb, const size_t size)
+{
+       struct {
+               char x[size];
+       } __attribute__((packed)) *base = vbase;
+       unsigned int j, k;
+       double u;
+
+       for(j = nmemb; j > 0; j--) {
+               u = (double)random() / RAND_MAX;
+               k = (unsigned int)(j * u) + 1;
+               if (j == k)
+                       continue;
+               swap(&base[j - 1], &base[k - 1], size);
+       }
+}
+
+/* Reverse sort, high values first. */
+
+#define compare_values(l, r)                   \
+       ({                                      \
+               typeof(l) _l = (l);             \
+               typeof(r) _r = (r);             \
+               (_l > _r) - (_l < _r);          \
+       })
+
+static int sort_by_heap_size(const void *l, const void *r)
+{
+       const struct runstats *ls = l, *rs = r;
+
+       return compare_values(rs->heap_size, ls->heap_size);
+}
+
+static int sort_by_alloc_time(const void *l, const void *r)
+{
+       const struct runstats *ls = l, *rs = r;
+
+       return compare_values(rs->alloc_max_ns, ls->alloc_max_ns);
+}
+
+static int sort_by_free_time(const void *l, const void *r)
+{
+       const struct runstats *ls = l, *rs = r;
+
+       return compare_values(rs->free_max_ns, ls->free_max_ns);
+}
+
+static int sort_by_frag(const void *l, const void *r)
+{
+       const struct runstats *ls = l, *rs = r;
+
+       return compare_values(rs->fragmentation, ls->fragmentation);
+}
+
+static int sort_by_overhead(const void *l, const void *r)
+{
+       const struct runstats *ls = l, *rs = r;
+
+       return compare_values(rs->overhead, ls->overhead);
+}
+
+static inline const char *get_debug_state(void)
+{
+#if defined(CONFIG_XENO_DEBUG_FULL)
+       return "\n(CAUTION: full debug enabled)";
+#elif defined(CONFIG_XENO_DEBUG)
+       return "\n(debug partially enabled)";
+#else
+       return "";
+#endif
+}
+
+static inline void breathe(int loops)
+{
+#ifdef CONFIG_XENO_COBALT
+       struct timespec idle = {
+               .tv_sec = 0,
+               .tv_nsec = 5000000,
+       };
+
+       /*
+        * There is not rt throttling over Cobalt, so we may need to
+        * keep the host kernel breathing by napping during the test
+        * sequences.
+        */
+       if ((loops % 10000) == 0)
+               __RT(clock_nanosleep(CLOCK_MONOTONIC, 0, &idle, NULL));
+#endif
+}
+
+static void __dump_stats(struct memcheck_descriptor *md,
+                        struct runstats *stats,
+                        int (*sortfn)(const void *l, const void *r),
+                        int nr, const char *key)
+{
+       struct runstats *p;
+       int n;
+
+       qsort(stats, nrstats, sizeof(*p), sortfn);
+
+       smokey_trace("\nsorted by: max %s\n%8s  %7s  %7s  %5s  %5s  %5s  %5s   
%5s  %5s  %s",
+                    key, "HEAPSZ", "BLOCKSZ", "NRBLKS", "AVG-A",
+                    "AVG-F", "MAX-A", "MAX-F", "OVRH%", "FRAG%", "FLAGS");
+
+       for (n = 0; n < nr; n++) {
+               p = stats + n;
+               smokey_trace("%7zuk  %7zu%s  %6d  %5.1f  %5.1f  %5.1f  %5.1f   
%4.1f  %5.1f   %s%s%s",
+                            p->heap_size / 1024,
+                            p->block_size < 1024 ? p->block_size : 
p->block_size / 1024,
+                            p->block_size < 1024 ? " " : "k",
+                            p->nrblocks,
+                            (double)p->alloc_avg_ns/1000.0,
+                            (double)p->free_avg_ns/1000.0,
+                            (double)p->alloc_max_ns/1000.0,
+                            (double)p->free_max_ns/1000.0,
+                            p->overhead,
+                            p->fragmentation,
+                            p->alloc_avg_ns == 0 && p->free_avg_ns == 0 ? 
"FAILED " : "",
+                            p->flags & MEMCHECK_SHUFFLE ? "+shuffle " : "",
+                            p->flags & MEMCHECK_REALLOC ? "+realloc" : "");
+               breathe(n);
+       }
+
+       if (nr < nrstats)
+               smokey_trace("  ... (%d results following) ...", nrstats - nr);
+}
+
+static int dump_stats(struct memcheck_descriptor *md, const char *title)
+{
+       long worst_alloc_max = 0, worst_free_max = 0;
+       double overhead_sum = 0.0, frag_sum = 0.0;
+       long max_alloc_sum = 0, max_free_sum = 0;
+       long avg_alloc_sum = 0, avg_free_sum = 0;
+       struct runstats *stats, *p, *next;
+       int n;
+
+       stats = malloc(sizeof(*p) * nrstats);
+       if (stats == NULL) {
+               smokey_warning("failed allocating memory");
+               return -ENOMEM;
+       }
+
+       for (n = 0, p = statistics; n < nrstats; n++, p = p->next)
+               stats[n] = *p;
+
+       smokey_trace("\n[%s] ON '%s'%s\n",
+                    title, md->name, get_debug_state());
+
+       smokey_trace("HEAPSZ    test heap size");
+       smokey_trace("BLOCKSZ   tested block size");
+       smokey_trace("NRBLKS    number of blocks allocatable in heap");
+       smokey_trace("AVG-A     average time to allocate block (us)");
+       smokey_trace("AVG-F     average time to free block (us)");
+       smokey_trace("MAX-A     max time to allocate block (us)");
+       smokey_trace("MAX-F     max time to free block (us)");
+       smokey_trace("OVRH%     overhead");
+       smokey_trace("FRAG%     external fragmentation");
+       smokey_trace("FLAGS     +shuffle: randomized free");
+       smokey_trace("          +realloc: measure after initial alloc/free pass 
(hot heap)");
+
+       if (max_results > 0) {
+               if (max_results > nrstats)
+                       max_results = nrstats;
+               __dump_stats(md, stats, sort_by_alloc_time, max_results, "alloc 
time");
+               __dump_stats(md, stats, sort_by_free_time, max_results, "free 
time");
+               __dump_stats(md, stats, sort_by_overhead, max_results, 
"overhead");
+               __dump_stats(md, stats, sort_by_frag, max_results, 
"fragmentation");
+       } else
+               __dump_stats(md, stats, sort_by_heap_size, nrstats, "heap 
size");
+
+       __STD(free(stats));
+
+       for (p = statistics; p; p = next) {
+               max_alloc_sum += p->alloc_max_ns;
+               max_free_sum += p->free_max_ns;
+               avg_alloc_sum += p->alloc_avg_ns;
+               avg_free_sum += p->free_avg_ns;
+               overhead_sum += p->overhead;
+               frag_sum += p->fragmentation;
+               if (p->alloc_max_ns > worst_alloc_max)
+                       worst_alloc_max = p->alloc_max_ns;
+               if (p->free_max_ns > worst_free_max)
+                       worst_free_max = p->free_max_ns;
+               next = p->next;
+               __STD(free(p));
+       }
+
+       smokey_trace("\noverall:");
+       smokey_trace("  worst alloc time: %.1f (us)",
+                    (double)worst_alloc_max / 1000.0);
+       smokey_trace("  worst free time: %.1f (us)",
+                    (double)worst_free_max / 1000.0);
+       smokey_trace("  average of max. alloc times: %.1f (us)",
+                    (double)max_alloc_sum / nrstats / 1000.0);
+       smokey_trace("  average of max. free times: %.1f (us)",
+                    (double)max_free_sum / nrstats / 1000.0);
+       smokey_trace("  average alloc time: %.1f (us)",
+                    (double)avg_alloc_sum / nrstats / 1000.0);
+       smokey_trace("  average free time: %.1f (us)",
+                    (double)avg_free_sum / nrstats / 1000.0);
+       smokey_trace("  average overhead: %.1f%%",
+                    (double)overhead_sum / nrstats);
+       smokey_trace("  average fragmentation: %.1f%%",
+                    (double)frag_sum / nrstats);
+
+       statistics = NULL;
+       nrstats = 0;
+
+       return 0;
+}
+
+static void fill_pattern(char *p, size_t size, enum pattern pat)
+{
+       unsigned int val, count;
+
+       switch (pat) {
+       case alphabet_series:
+               val = 'a';
+               count = 26;
+               break;
+       case digit_series:
+               val = '0';
+               count = 10;
+               break;
+       default:
+               val = 0;
+               count = 255;
+               break;
+       }
+
+       while (size-- > 0) {
+               *p++ = (char)(val % count);
+               val++;
+       }
+}
+
+static int check_pattern(const char *p, size_t size, enum pattern pat)
+{
+       unsigned int val, count;
+
+       switch (pat) {
+       case alphabet_series:
+               val = 'a';
+               count = 26;
+               break;
+       case digit_series:
+               val = '0';
+               count = 10;
+               break;
+       default:
+               val = 0;
+               count = 255;
+               break;
+       }
+
+       while (size-- > 0) {
+               if (*p++ != (char)(val % count))
+                       return 0;
+               val++;
+       }
+
+       return 1;
+}
+
+static size_t find_largest_free(struct memcheck_descriptor *md,
+                               size_t free_size, size_t block_size)
+{
+       void *p;
+
+       for (;;) {
+               p = md->alloc(md->heap, free_size);
+               if (p) {
+                       md->free(md->heap, p);
+                       break;
+               }
+               if (free_size <= block_size)
+                       break;
+               free_size -= block_size;
+       }
+
+       return free_size;
+}
+
+static int test_seq(struct memcheck_descriptor *md,
+                   size_t heap_size, size_t block_size, int flags)
+{
+       long alloc_sum_ns, alloc_avg_ns, free_sum_ns, free_avg_ns,
+               alloc_max_ns, free_max_ns, d;
+       size_t arena_size, user_size, largest_free, freed;
+       int ret, n, k, maxblocks, nrblocks;
+       struct timespec start, end;
+       struct runstats *stats;
+       struct chunk *chunks;
+       bool done_frag;
+       void *mem, *p;
+       double frag;
+
+       arena_size = md->get_arena_size(heap_size);
+       if (arena_size == 0) {
+               smokey_trace("cannot get arena size for heap size %zu",
+                            heap_size);
+               return -ENOMEM;
+       }
+       
+       maxblocks = heap_size / block_size;
+
+       mem = __STD(malloc(arena_size));
+       if (mem == NULL)
+               return -ENOMEM;
+
+       ret = md->init(md->heap, mem, arena_size);
+       if (ret) {
+               smokey_trace("cannot init heap with arena size %zu",
+                            arena_size);
+               goto out;
+       }
+
+       chunks = calloc(sizeof(*chunks), maxblocks);
+       if (chunks == NULL) {
+               ret = -ENOMEM;
+               goto no_chunks;
+       }
+
+       if (md->get_usable_size(md->heap) != heap_size) {
+               smokey_trace("memory size inconsistency (%zu / %zu bytes)",
+                            heap_size, md->get_usable_size(md->heap));
+               goto bad;
+       }
+
+       user_size = 0;
+       alloc_avg_ns = 0;
+       free_avg_ns = 0;
+       alloc_max_ns = 0;
+       free_max_ns = 0;
+       frag = 0.0;
+
+       for (n = 0, alloc_sum_ns = 0; ; n++) {
+               __RT(clock_gettime(CLOCK_MONOTONIC, &start));
+               p = md->alloc(md->heap, block_size);
+               __RT(clock_gettime(CLOCK_MONOTONIC, &end));
+               d = diff_ts(&end, &start);
+               if (d > alloc_max_ns)
+                       alloc_max_ns = d;
+               alloc_sum_ns += d;
+               if (p == NULL)
+                       break;
+               user_size += block_size;
+               if (n >= maxblocks) {
+                       smokey_trace("too many blocks fetched"
+                                    " (heap=%zu, block=%zu, "
+                                    "got more than %d blocks)",
+                                    heap_size, block_size, maxblocks);
+                       goto bad;
+               }
+               chunks[n].ptr = p;
+               if (flags & MEMCHECK_PATTERN) {
+                       chunks[n].pattern = (enum pattern)(random() % 3);
+                       fill_pattern(chunks[n].ptr, block_size, 
chunks[n].pattern);
+               }
+               breathe(n);
+       }
+
+       nrblocks = n;
+       if (nrblocks == 0)
+               goto do_stats;
+
+       if ((flags & MEMCHECK_ZEROOVRD) && nrblocks != maxblocks) {
+               smokey_trace("too few blocks fetched, unexpected overhead"
+                            " (heap=%zu, block=%zu, "
+                            "got %d, less than %d blocks)",
+                            heap_size, block_size, nrblocks, maxblocks);
+               goto bad;
+       }
+
+       breathe(0);
+
+       /* Make sure we did not trash any busy block while allocating. */
+       if (flags & MEMCHECK_PATTERN) {
+               for (n = 0; n < nrblocks; n++) {
+                       if (!check_pattern(chunks[n].ptr, block_size,
+                                          chunks[n].pattern)) {
+                               smokey_trace("corrupted block #%d on alloc"
+                                            " sequence (pattern %d)",
+                                            n, chunks[n].pattern);
+                               goto bad;
+                       }
+                       breathe(n);
+               }
+       }
+       
+       if (flags & MEMCHECK_SHUFFLE)
+               random_shuffle(chunks, nrblocks, sizeof(*chunks));
+
+       /*
+        * Release all blocks.
+        */
+       for (n = 0, free_sum_ns = 0, freed = 0, done_frag = false;
+            n < nrblocks; n++) {
+               __RT(clock_gettime(CLOCK_MONOTONIC, &start));
+               ret = md->free(md->heap, chunks[n].ptr);
+               __RT(clock_gettime(CLOCK_MONOTONIC, &end));
+               if (ret) {
+                       smokey_trace("failed to free block %p "
+                                    "(heap=%zu, block=%zu)",
+                                    chunks[n].ptr, heap_size, block_size);
+                       goto bad;
+               }
+               d = diff_ts(&end, &start);
+               if (d > free_max_ns)
+                       free_max_ns = d;
+               free_sum_ns += d;
+               chunks[n].ptr = NULL;
+               /* Make sure we did not trash busy blocks while freeing. */
+               if (flags & MEMCHECK_PATTERN) {
+                       for (k = 0; k < nrblocks; k++) {
+                               if (chunks[k].ptr &&
+                                   !check_pattern(chunks[k].ptr, block_size,
+                                                  chunks[k].pattern)) {
+                                       smokey_trace("corrupted block #%d on 
release"
+                                                    " sequence (pattern %d)",
+                                                    k, chunks[k].pattern);
+                                       goto bad;
+                               }
+                       }
+               }
+               freed += block_size;
+               /*
+                * Get a sense of the fragmentation for the tested
+                * allocation pattern, heap and block sizes when half
+                * of the usable heap size should be available to us.
+                * NOTE: user_size excludes the overhead, this is
+                * actually what we managed to get from the current
+                * heap out of the allocation loop.
+                */
+               if (!done_frag && freed >= user_size / 2) {
+                       /* Calculate the external fragmentation. */
+                       largest_free = find_largest_free(md, freed, block_size);
+                       frag = (1.0 - ((double)largest_free / freed)) * 100.0;
+                       done_frag = true;
+               }
+               breathe(n);
+       }
+
+       /*
+        * If the deallocation mechanism is broken, we might not be
+        * able to reproduce the same allocation pattern with the same
+        * outcome, check this.
+        */
+       if (flags & MEMCHECK_REALLOC) {
+               for (n = 0, alloc_max_ns = alloc_sum_ns = 0; ; n++) {
+                       __RT(clock_gettime(CLOCK_MONOTONIC, &start));
+                       p = md->alloc(md->heap, block_size);
+                       __RT(clock_gettime(CLOCK_MONOTONIC, &end));
+                       d = diff_ts(&end, &start);
+                       if (d > alloc_max_ns)
+                               alloc_max_ns = d;
+                       alloc_sum_ns += d;
+                       if (p == NULL)
+                               break;
+                       if (n >= maxblocks) {
+                               smokey_trace("too many blocks fetched during 
realloc"
+                                            " (heap=%zu, block=%zu, "
+                                            "got more than %d blocks)",
+                                            heap_size, block_size, maxblocks);
+                               goto bad;
+                       }
+                       chunks[n].ptr = p;
+                       breathe(n);
+               }
+               if (n != nrblocks) {
+                       smokey_trace("inconsistent block count fetched during 
realloc"
+                                    " (heap=%zu, block=%zu, "
+                                    "got %d blocks vs %d during alloc)",
+                                    heap_size, block_size, n, nrblocks);
+                       goto bad;
+               }
+               for (n = 0, free_max_ns = free_sum_ns = 0; n < nrblocks; n++) {
+                       __RT(clock_gettime(CLOCK_MONOTONIC, &start));
+                       ret = md->free(md->heap, chunks[n].ptr);
+                       __RT(clock_gettime(CLOCK_MONOTONIC, &end));
+                       if (ret) {
+                               smokey_trace("failed to free block %p during 
realloc"
+                                            "(heap=%zu, block=%zu)",
+                                            chunks[n].ptr, heap_size, 
block_size);
+                               goto bad;
+                       }
+                       d = diff_ts(&end, &start);
+                       if (d > free_max_ns)
+                               free_max_ns = d;
+                       free_sum_ns += d;
+                       breathe(n);
+               }
+       }
+
+       alloc_avg_ns = alloc_sum_ns / nrblocks;
+       free_avg_ns = free_sum_ns / nrblocks;
+
+       if ((flags & MEMCHECK_ZEROOVRD) && heap_size != user_size) {
+               smokey_trace("unexpected overhead reported");
+               goto bad;
+       }
+
+       if (md->get_used_size(md->heap) > 0) {
+               smokey_trace("memory leakage reported: %zu bytes missing",
+                            md->get_used_size(md->heap));
+               goto bad;
+       }
+               
+       /*
+        * Don't report stats when running a pattern check, timings
+        * are affected.
+        */
+do_stats:
+       breathe(0);
+       ret = 0;
+       if (!(flags & MEMCHECK_PATTERN)) {
+               stats = malloc(sizeof(*stats));
+               if (stats == NULL) {
+                       smokey_warning("failed allocating memory");
+                       ret = -ENOMEM;
+                       goto oom;
+               }
+               stats->heap_size = heap_size;
+               stats->user_size = user_size;
+               stats->block_size = block_size;
+               stats->nrblocks = nrblocks;
+               stats->alloc_avg_ns = alloc_avg_ns;
+               stats->alloc_max_ns = alloc_max_ns;
+               stats->free_avg_ns = free_avg_ns;
+               stats->free_max_ns = free_max_ns;
+               stats->overhead = 100.0 - (user_size * 100.0 / heap_size);
+               stats->fragmentation = frag;
+               stats->flags = flags;
+               stats->next = statistics;
+               statistics = stats;
+               nrstats++;
+       }
+
+done:
+       free(chunks);
+no_chunks:
+       md->destroy(md->heap);
+out:
+       if (ret)
+               smokey_trace("** '%s' FAILED(overhead %s, %sshuffle, %scheck, 
%srealloc): heapsz=%zuk, "
+                            "blocksz=%zu, overhead=%zu (%.1f%%)",
+                            md->name,
+                            flags & MEMCHECK_ZEROOVRD ? "disallowed" : 
"allowed",
+                            flags & MEMCHECK_SHUFFLE ? "" : "no ",
+                            flags & MEMCHECK_PATTERN ? "" : "no ",
+                            flags & MEMCHECK_REALLOC ? "" : "no ",
+                            heap_size / 1024, block_size,
+                            arena_size - heap_size,
+                            (arena_size * 100.0 / heap_size) - 100.0);
+oom:
+       __STD(free(mem));
+
+       return ret;
+bad:
+       ret = -EPROTO;
+       goto done;
+}
+
+static inline int test_flags(struct memcheck_descriptor *md, int flags)
+{
+       return md->valid_flags & flags;
+}
+
+int memcheck_run(struct memcheck_descriptor *md,
+                struct smokey_test *t,
+                int argc, char *const argv[])
+{
+       size_t heap_size, block_size;
+       struct sched_param param;
+       cpu_set_t affinity;
+       unsigned long seed;
+       int ret, runs;
+       time_t now;
+
+       smokey_parse_args(t, argc, argv);
+       
+       if (smokey_arg_isset(t, "seq_heap_size"))
+               md->seq_max_heap_size = smokey_arg_size(t, "seq_heap_size");
+       
+       if (smokey_arg_isset(t, "random_alloc_rounds"))
+               md->random_rounds = smokey_arg_int(t, "random_alloc_rounds");
+       
+       if (smokey_arg_isset(t, "pattern_heap_size"))
+               md->pattern_heap_size = smokey_arg_size(t, "pattern_heap_size");
+       
+       if (smokey_arg_isset(t, "pattern_check_rounds"))
+               md->pattern_rounds = smokey_arg_int(t, "pattern_check_rounds");
+
+       if (smokey_arg_isset(t, "max_results"))
+               max_results = smokey_arg_int(t, "max_results");
+
+       now = time(NULL);
+       seed = (unsigned long)now * getpid();
+       srandom(seed);
+
+       smokey_trace("== memcheck started at %s", ctime(&now));
+       smokey_trace("     seq_heap_size=%zuk", md->seq_max_heap_size / 1024);
+       smokey_trace("     random_alloc_rounds=%d", md->random_rounds);
+       smokey_trace("     pattern_heap_size=%zuk", md->pattern_heap_size / 
1024);
+       smokey_trace("     pattern_check_rounds=%d", md->pattern_rounds);
+       
+       CPU_ZERO(&affinity);
+       CPU_SET(0, &affinity);
+       ret = sched_setaffinity(0, sizeof(affinity), &affinity);
+       if (ret) {
+               smokey_warning("failed setting CPU affinity");
+               return -ret;
+       }
+
+       /* This switches to real-time mode over Cobalt. */
+       param.sched_priority = 1;
+       pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
+
+       /*
+        * Create a series of heaps of increasing size, allocating
+        * then freeing all blocks sequentially from them, ^2 block
+        * sizes up to half of the heap size. Test multiple patterns:
+        *
+        * - alloc -> free_in_alloc_order
+        * - alloc -> free_in_alloc_order -> (re)alloc
+        * - alloc -> free_in_random_order
+        * - alloc -> free_in_random_order -> (re)alloc
+        */
+       for (heap_size = md->seq_min_heap_size;
+            heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+               for (block_size = 16;
+                    block_size < heap_size; block_size <<= 1) {
+                       ret = test_seq(md, heap_size, block_size,
+                                      test_flags(md, MEMCHECK_ZEROOVRD));
+                       if (ret) {
+                               smokey_trace("failed with %zuk heap, "
+                                            "%zu-byte block (pow2)",
+                                            heap_size / 1024, block_size);
+                               return ret;
+                       }
+               }
+               for (block_size = 16;
+                    block_size < heap_size; block_size <<= 1) {
+                       ret = test_seq(md, heap_size, block_size,
+                              test_flags(md, 
MEMCHECK_ZEROOVRD|MEMCHECK_REALLOC));
+                       if (ret) {
+                               smokey_trace("failed with %zuk heap, "
+                                            "%zu-byte block (pow2, realloc)",
+                                            heap_size / 1024, block_size);
+                               return ret;
+                       }
+               }
+               for (block_size = 16;
+                    block_size < heap_size; block_size <<= 1) {
+                       ret = test_seq(md, heap_size, block_size,
+                              test_flags(md, 
MEMCHECK_ZEROOVRD|MEMCHECK_SHUFFLE));
+                       if (ret) {
+                               smokey_trace("failed with %zuk heap, "
+                                            "%zu-byte block (pow2, shuffle)",
+                                            heap_size / 1024, block_size);
+                               return ret;
+                       }
+               }
+               for (block_size = 16;
+                    block_size < heap_size; block_size <<= 1) {
+                       ret = test_seq(md, heap_size, block_size,
+                              test_flags(md, 
MEMCHECK_ZEROOVRD|MEMCHECK_REALLOC|MEMCHECK_SHUFFLE));
+                       if (ret) {
+                               smokey_trace("failed with %zuk heap, "
+                                            "%zu-byte block (pow2, shuffle, 
realloc)",
+                                            heap_size / 1024, block_size);
+                               return ret;
+                       }
+               }
+       }
+
+       ret = dump_stats(md, "SEQUENTIAL ALLOC->FREE, ^2 BLOCK SIZES");
+       if (ret)
+               return ret;
+       
+       /*
+        * Create a series of heaps of increasing size, allocating
+        * then freeing all blocks sequentially from them, random
+        * block sizes. Test multiple patterns as previously with ^2
+        * block sizes.
+        */
+       for (heap_size = md->seq_min_heap_size;
+            heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+               for (runs = 0; runs < md->random_rounds; runs++) {
+                       block_size = (random() % heap_size) ?: 1;
+                       ret = test_seq(md, heap_size, block_size, 0);
+                       if (ret) {
+                               smokey_trace("failed with %zuk heap, "
+                                            "%zu-byte block (random)",
+                                            heap_size / 1024, block_size);
+                               return ret;
+                       }
+               }
+       }
+       
+       for (heap_size = md->seq_min_heap_size;
+            heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+               for (runs = 0; runs < md->random_rounds; runs++) {
+                       block_size = (random() % heap_size) ?: 1;
+                       ret = test_seq(md, heap_size, block_size,
+                                      test_flags(md, MEMCHECK_REALLOC));
+                       if (ret) {
+                               smokey_trace("failed with %zuk heap, "
+                                            "%zu-byte block (random, realloc)",
+                                            heap_size / 1024, block_size);
+                               return ret;
+                       }
+               }
+       }
+       
+       for (heap_size = md->seq_min_heap_size;
+            heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+               for (runs = 0; runs < md->random_rounds; runs++) {
+                       block_size = (random() % heap_size) ?: 1;
+                       ret = test_seq(md, heap_size, block_size,
+                                      test_flags(md, MEMCHECK_SHUFFLE));
+                       if (ret) {
+                               smokey_trace("failed with %zuk heap, "
+                                            "%zu-byte block (random, shuffle)",
+                                            heap_size / 1024, block_size);
+                               return ret;
+                       }
+               }
+       }
+       
+       for (heap_size = md->seq_min_heap_size;
+            heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+               for (runs = 0; runs < md->random_rounds; runs++) {
+                       block_size = (random() % heap_size) ?: 1;
+                       ret = test_seq(md, heap_size, block_size,
+                              test_flags(md, 
MEMCHECK_REALLOC|MEMCHECK_SHUFFLE));
+                       if (ret) {
+                               smokey_trace("failed with %zuk heap, "
+                                            "%zu-byte block (random, shuffle, 
realloc)",
+                                            heap_size / 1024, block_size);
+                               return ret;
+                       }
+               }
+       }
+
+       ret = dump_stats(md, "SEQUENTIAL ALLOC->FREE, RANDOM BLOCK SIZES");
+       if (ret)
+               return ret;
+       
+       smokey_trace("\n(running the pattern check test for '%s'"
+                    " -- this may take some time)", md->name);
+
+       for (runs = 0; runs < md->pattern_rounds; runs++) {
+               block_size = (random() % 1024 ?: 1);
+               ret = test_seq(md, md->pattern_heap_size, block_size,
+                              test_flags(md, 
MEMCHECK_SHUFFLE|MEMCHECK_PATTERN));
+               if (ret) {
+                       smokey_trace("failed with %zuk heap, "
+                                    "%zu-byte block (random, shuffle, check)",
+                                    md->pattern_heap_size / 1024, block_size);
+                       return ret;
+               }
+       }
+       
+       now = time(NULL);
+       smokey_trace("\n== memcheck finished at %s", ctime(&now));
+
+       return ret;
+}
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <cobalt/tunables.h>
+
+static int memcheck_tune(void)
+{
+       set_config_tunable(print_buffer_size, 512 * 1024);
+
+       return 0;
+}
+
+static struct setup_descriptor memcheck_setup = {
+       .name = "memcheck",
+       .tune = memcheck_tune,
+};
+
+user_setup_call(memcheck_setup);
+
+#endif
diff --git a/testsuite/smokey/memcheck/memcheck.h 
b/testsuite/smokey/memcheck/memcheck.h
new file mode 100644
index 0000000..6e4e78c
--- /dev/null
+++ b/testsuite/smokey/memcheck/memcheck.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <r...@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#ifndef SMOKEY_MEMCHECK_H
+#define SMOKEY_MEMCHECK_H
+
+#include <sys/types.h>
+#include <boilerplate/ancillaries.h>
+#include <smokey/smokey.h>
+
+#define MEMCHECK_ZEROOVRD   1
+#define MEMCHECK_SHUFFLE    2
+#define MEMCHECK_PATTERN    4
+#define MEMCHECK_REALLOC    8
+#define MEMCHECK_ALL_FLAGS  0xf
+
+struct memcheck_descriptor {
+       const char *name;
+       int (*init)(void *heap, void *mem, size_t heap_size);
+       void (*destroy)(void *heap);
+       void *(*alloc)(void *heap, size_t size);
+       int (*free)(void *heap, void *block);
+       size_t (*get_used_size)(void *heap);
+       size_t (*get_usable_size)(void *heap);
+       size_t (*get_arena_size)(size_t heap_size);
+       size_t seq_min_heap_size;
+       size_t seq_max_heap_size;
+       int random_rounds;
+       size_t pattern_heap_size;
+       int pattern_rounds;
+       void *heap;
+       int valid_flags;
+};
+
+#define HEAP_INIT_T(__p)    ((int (*)(void *heap, void *mem, size_t 
size))(__p))
+#define HEAP_DESTROY_T(__p) ((void (*)(void *heap))(__p))
+#define HEAP_ALLOC_T(__p)   ((void *(*)(void *heap, size_t size))(__p))
+#define HEAP_FREE_T(__p)    ((int (*)(void *heap, void *block))(__p))
+#define HEAP_USED_T(__p)    ((size_t (*)(void *heap))(__p))
+#define HEAP_USABLE_T(__p)  ((size_t (*)(void *heap))(__p))
+
+#define MEMCHECK_ARGS                                  \
+       SMOKEY_ARGLIST(                                 \
+               SMOKEY_SIZE(seq_heap_size),             \
+               SMOKEY_SIZE(pattern_heap_size),         \
+               SMOKEY_INT(random_alloc_rounds),        \
+               SMOKEY_INT(pattern_check_rounds),       \
+               SMOKEY_INT(max_results),                \
+       )
+  
+#define MEMCHECK_HELP_STRINGS                                          \
+       "\tseq_heap_size=<size[K|M|G]>\tmax. heap size for sequential alloc 
tests\n" \
+       "\tpattern_heap_size=<size[K|M|G]>\tmax. heap size for pattern check 
test\n" \
+       "\trandom_alloc_rounds=<N>\t\t# of rounds of random-size allocations\n" 
\
+       "\tpattern_check_rounds=<N>\t# of rounds of pattern check tests\n" \
+       "\tmax_results=<N>\t# of result lines (worst-case first, 0=all)\n" \
+       "\tSet --verbose=2 for detailed runtime statistics.\n"
+
+int memcheck_run(struct memcheck_descriptor *md,
+                struct smokey_test *t,
+                int argc, char *const argv[]);
+
+#endif /* SMOKEY_MEMCHECK_H */
diff --git a/testsuite/smokey/memory-heapmem/Makefile.am 
b/testsuite/smokey/memory-heapmem/Makefile.am
new file mode 100644
index 0000000..35c4a94
--- /dev/null
+++ b/testsuite/smokey/memory-heapmem/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libmemory-heapmem.a
+
+libmemory_heapmem_a_SOURCES = heapmem.c
+
+libmemory_heapmem_a_CPPFLAGS =         \
+       @XENO_USER_CFLAGS@      \
+       -I$(srcdir)/..          \
+       -I$(top_srcdir)/include
diff --git a/testsuite/smokey/memory-heapmem/heapmem.c 
b/testsuite/smokey/memory-heapmem/heapmem.c
new file mode 100644
index 0000000..475056c
--- /dev/null
+++ b/testsuite/smokey/memory-heapmem/heapmem.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <r...@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#include <boilerplate/heapmem.h>
+#include "memcheck/memcheck.h"
+
+smokey_test_plugin(memory_heapmem,
+                  MEMCHECK_ARGS,
+                  "Check for the heapmem allocator sanity.\n"
+                  MEMCHECK_HELP_STRINGS
+       );
+
+#define MIN_HEAP_SIZE  8192
+#define MAX_HEAP_SIZE  (1024 * 1024 * 1)
+#define RANDOM_ROUNDS  128
+
+#define PATTERN_HEAP_SIZE  (128*1024)
+#define PATTERN_ROUNDS     32
+
+static struct heap_memory heap;
+
+static size_t get_arena_size(size_t heap_size)
+{
+       return HEAPMEM_ARENA_SIZE(heap_size);
+}
+
+static struct memcheck_descriptor heapmem_descriptor = {
+       .name = "heapmem",
+       .init = HEAP_INIT_T(heapmem_init),
+       .destroy = HEAP_DESTROY_T(heapmem_destroy),
+       .alloc = HEAP_ALLOC_T(heapmem_alloc),
+       .free = HEAP_FREE_T(heapmem_free),
+       .get_usable_size = HEAP_USABLE_T(heapmem_usable_size),
+       .get_used_size = HEAP_USED_T(heapmem_used_size),
+       .seq_min_heap_size = MIN_HEAP_SIZE,
+       .seq_max_heap_size = MAX_HEAP_SIZE,
+       .random_rounds = RANDOM_ROUNDS,
+       .pattern_heap_size = PATTERN_HEAP_SIZE,
+       .pattern_rounds = PATTERN_ROUNDS,
+       .heap = &heap,
+       .get_arena_size = get_arena_size,
+       .valid_flags = MEMCHECK_ALL_FLAGS,
+};
+
+static int run_memory_heapmem(struct smokey_test *t,
+                             int argc, char *const argv[])
+{
+       return memcheck_run(&heapmem_descriptor, t, argc, argv);
+}
diff --git a/testsuite/smokey/memory-pshared/Makefile.am 
b/testsuite/smokey/memory-pshared/Makefile.am
new file mode 100644
index 0000000..59df783
--- /dev/null
+++ b/testsuite/smokey/memory-pshared/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libmemory-pshared.a
+
+libmemory_pshared_a_SOURCES = pshared.c
+
+libmemory_pshared_a_CPPFLAGS =                 \
+       @XENO_USER_CFLAGS@              \
+       -I$(srcdir)/..                  \
+       -I$(top_srcdir)/include
diff --git a/testsuite/smokey/memory-pshared/pshared.c 
b/testsuite/smokey/memory-pshared/pshared.c
new file mode 100644
index 0000000..fa93089
--- /dev/null
+++ b/testsuite/smokey/memory-pshared/pshared.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <r...@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#include <xenomai/init.h>
+#include <xenomai/tunables.h>
+#include <copperplate/heapobj.h>
+#include "memcheck/memcheck.h"
+
+smokey_test_plugin(memory_pshared,
+                  MEMCHECK_ARGS,
+                  "Check for the pshared allocator sanity.\n"
+                  MEMCHECK_HELP_STRINGS
+       );
+
+/*
+ * This allocator is insanely slow for running some patterns. Use
+ * small test values by default.
+ */
+#define MIN_HEAP_SIZE  4096
+#define MAX_HEAP_SIZE  (1024 * 512)
+#define RANDOM_ROUNDS  128
+
+#define PATTERN_HEAP_SIZE  (128*1024)
+#define PATTERN_ROUNDS     32
+
+static struct heapobj heap;
+
+static int do_pshared_init(void *heap, void *mem, size_t arena_size)
+{
+       /* mem is ignored, pshared uses its own memory. */
+       return heapobj_init(heap, "memcheck", arena_size);
+}
+
+static void do_pshared_destroy(void *heap)
+{
+       heapobj_destroy(heap);
+}
+
+static void *do_pshared_alloc(void *heap, size_t size)
+{
+       return heapobj_alloc(heap, size);
+}
+
+static int do_pshared_free(void *heap, void *block)
+{
+       heapobj_free(heap, block);
+
+       return 0;       /* Hope for the best. */
+}
+
+static size_t do_pshared_used_size(void *heap)
+{
+       return heapobj_inquire(heap);
+}
+
+static size_t do_pshared_usable_size(void *heap)
+{
+       return heapobj_get_size(heap);
+}
+
+static size_t do_pshared_arena_size(size_t heap_size)
+{
+       struct heapobj h;
+       size_t overhead;
+       int ret;
+
+       ret = heapobj_init(&h, "memcheck", heap_size);
+       if (ret)
+               return 0;
+
+       overhead = heap_size - heapobj_get_size(&h);
+       heapobj_destroy(&h);
+
+       /*
+        * pshared must have no external overhead, since
+        * heapobj_init() allocates the memory it needs.  Make sure
+        * this assumption is correct for any tested size.
+        */
+       return overhead == 0 ? heap_size : 0;
+}
+
+static struct memcheck_descriptor pshared_descriptor = {
+       .name = "pshared",
+       .init = HEAP_INIT_T(do_pshared_init),
+       .destroy = HEAP_DESTROY_T(do_pshared_destroy),
+       .alloc = HEAP_ALLOC_T(do_pshared_alloc),
+       .free = HEAP_FREE_T(do_pshared_free),
+       .get_usable_size = HEAP_USABLE_T(do_pshared_usable_size),
+       .get_used_size = HEAP_USED_T(do_pshared_used_size),
+       .get_arena_size = do_pshared_arena_size,
+       .seq_min_heap_size = MIN_HEAP_SIZE,
+       .seq_max_heap_size = MAX_HEAP_SIZE,
+       .random_rounds = RANDOM_ROUNDS,
+       .pattern_heap_size = PATTERN_HEAP_SIZE,
+       .pattern_rounds = PATTERN_ROUNDS,
+       /* heapobj-pshared has overgead even for ^2 sizes, can't check for 
ZEROOVRD. */
+       .valid_flags = MEMCHECK_ALL_FLAGS & ~MEMCHECK_ZEROOVRD,
+       .heap = &heap,
+};
+
+static int run_memory_pshared(struct smokey_test *t,
+                             int argc, char *const argv[])
+{
+       return memcheck_run(&pshared_descriptor, t, argc, argv);
+}
+
+static int memcheck_pshared_tune(void)
+{
+       /*
+        * We create test pools from the main one: make sure the
+        * latter is large enough.
+        */
+       set_config_tunable(mem_pool_size, MAX_HEAP_SIZE + 1024 * 1024);
+
+       return 0;
+}
+
+static struct setup_descriptor memcheck_pshared_setup = {
+       .name = "memcheck_pshared",
+       .tune = memcheck_pshared_tune,
+};
+
+user_setup_call(memcheck_pshared_setup);
diff --git a/testsuite/smokey/memory-tlsf/Makefile.am 
b/testsuite/smokey/memory-tlsf/Makefile.am
new file mode 100644
index 0000000..901e6fd
--- /dev/null
+++ b/testsuite/smokey/memory-tlsf/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libmemory-tlsf.a
+
+libmemory_tlsf_a_SOURCES = tlsf.c
+
+libmemory_tlsf_a_CPPFLAGS =            \
+       @XENO_USER_CFLAGS@              \
+       -I$(top_srcdir)/lib/boilerplate \
+       -I$(srcdir)/..                  \
+       -I$(top_srcdir)/include
diff --git a/testsuite/smokey/memory-tlsf/tlsf.c 
b/testsuite/smokey/memory-tlsf/tlsf.c
new file mode 100644
index 0000000..114c00b
--- /dev/null
+++ b/testsuite/smokey/memory-tlsf/tlsf.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <r...@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#include <tlsf/tlsf.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include "memcheck/memcheck.h"
+
+smokey_test_plugin(memory_tlsf,
+                  MEMCHECK_ARGS,
+                  "Check for the TLSF allocator sanity.\n"
+                  MEMCHECK_HELP_STRINGS
+       );
+
+#define MIN_HEAP_SIZE  8192
+#define MAX_HEAP_SIZE  (1024 * 1024 * 1)
+#define RANDOM_ROUNDS  128
+
+#define PATTERN_HEAP_SIZE  (128*1024)
+#define PATTERN_ROUNDS     32
+
+static struct memcheck_descriptor tlsf_descriptor;
+
+static pthread_mutex_t tlsf_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static size_t overhead;
+
+static size_t test_pool_size; /* TLSF does not save this information. */
+
+static int do_tlsf_init(void *dummy, void *mem, size_t pool_size)
+{
+       tlsf_descriptor.heap = mem;
+       return init_memory_pool(pool_size, mem) == -1L ? -ENOMEM : 0;
+}
+
+static void do_tlsf_destroy(void *pool)
+{
+       destroy_memory_pool(pool);
+}
+
+static void *do_tlsf_alloc(void *pool, size_t size)
+{
+       void *p;
+
+       pthread_mutex_lock(&tlsf_lock);
+       p = malloc_ex(size, pool);
+       pthread_mutex_unlock(&tlsf_lock);
+
+       return p;
+}
+
+static int do_tlsf_free(void *pool, void *block)
+{
+       pthread_mutex_lock(&tlsf_lock);
+       free_ex(block, pool);
+       pthread_mutex_unlock(&tlsf_lock);
+
+       return 0;       /* Yeah, well... */
+}
+
+static size_t do_tlsf_used_size(void *pool)
+{
+       /* Do not count the overhead memory for the TLSF header. */
+       return get_used_size(pool) - overhead;
+}
+
+static size_t do_tlsf_usable_size(void *pool)
+{
+       return test_pool_size;
+}
+
+static size_t do_tlsf_arena_size(size_t pool_size)
+{
+       size_t available_size;
+       void *pool;
+
+       /*
+        * The area size is the total amount of memory some allocator
+        * may need for managing a heap, including its metadata. We
+        * need to figure out how much memory overhead TLSF has for a
+        * given pool size, which we add to the ideal pool_size for
+        * determining the arena size.
+        */
+       test_pool_size = pool_size;
+       pool = __STD(malloc(pool_size));
+       available_size = init_memory_pool(pool_size, pool);
+       if (available_size == (size_t)-1) {
+               __STD(free(pool));
+               return 0;
+       }
+
+       destroy_memory_pool(pool);
+       overhead = pool_size - available_size;
+       __STD(free(pool));
+
+       return pool_size + overhead;
+}
+
+static struct memcheck_descriptor tlsf_descriptor = {
+       .name = "tlsf",
+       .init = HEAP_INIT_T(do_tlsf_init),
+       .destroy = HEAP_DESTROY_T(do_tlsf_destroy),
+       .alloc = HEAP_ALLOC_T(do_tlsf_alloc),
+       .free = HEAP_FREE_T(do_tlsf_free),
+       .get_usable_size = HEAP_USABLE_T(do_tlsf_usable_size),
+       .get_used_size = HEAP_USED_T(do_tlsf_used_size),
+       .get_arena_size = do_tlsf_arena_size,
+       .seq_min_heap_size = MIN_HEAP_SIZE,
+       .seq_max_heap_size = MAX_HEAP_SIZE,
+       .random_rounds = RANDOM_ROUNDS,
+       .pattern_heap_size = PATTERN_HEAP_SIZE,
+       .pattern_rounds = PATTERN_ROUNDS,
+       /* TLSF always has overhead, can't check for ZEROOVRD. */
+       .valid_flags = MEMCHECK_ALL_FLAGS & ~MEMCHECK_ZEROOVRD,
+};
+
+static int run_memory_tlsf(struct smokey_test *t,
+                          int argc, char *const argv[])
+{
+       return memcheck_run(&tlsf_descriptor, t, argc, argv);
+}


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
https://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to