Hi,

This diff adds better error reporting for write-after-free or the more
general write of free memory if malloc option D is active. Knowing the
place where allocations were done often helps to find out where the
overwrite happened.

If option D is active malloc now saves caller info in a separate page
instead of only doing that for chunk index 0. It also reports about
the preceding chunk if applicable. A report looks like this:

X(12489) in calloc(): write to free chunk 0x851ec6066d0[0..7]@16
allocated at /usr/X11R6/lib/modules/dri/radeonsi_dri.so 0x88c949
(preceding chunk 0x851ec6066c0 allocated at /usr/X11R6/bin/X 0x177374 (now 
free))

You can use addr2line -e to get the file and linenumber the allocation
was done (if the object was compiled with debug info).

If D is not used only the first part is printed, as no caller info is
saved. So no extra overhead if D is not useed.

Also: the leak report now do not contain unknown locations anymore, as
each allocation gets its caller recorded if D is active.

        -Otto

Index: stdlib/malloc.3
===================================================================
RCS file: /home/cvs/src/lib/libc/stdlib/malloc.3,v
retrieving revision 1.137
diff -u -p -r1.137 malloc.3
--- stdlib/malloc.3     1 Jul 2023 18:35:14 -0000       1.137
+++ stdlib/malloc.3     10 Oct 2023 10:23:19 -0000
@@ -307,7 +307,7 @@ These malloc options imply
 .Cm D .
 .It Cm F
 .Dq Freecheck .
-Enable more extensive double free and use after free detection.
+Enable more extensive double free and write after free detection.
 All chunks in the delayed free list will be checked for double frees and
 write after frees.
 Unused pages on the freelist are read and write protected to
@@ -641,18 +641,34 @@ or
 reallocate an unallocated pointer was made.
 .It Dq double free
 There was an attempt to free an allocation that had already been freed.
-.It Dq write after free
-An allocation has been modified after it was freed.
+.It Dq write of free mem Va address Ns [ Va start Ns .. Ns Va end Ns ]@ Ns Va 
size
+An allocation has been modified after it was freed,
+or a chunk that was never allocated was written to.
+The
+.Va range
+at which corruption was detected is printed between [ and ].
+.Pp
+Enabling option
+.Cm D
+allows malloc to print information about where the allocation
+was done.
 .It Dq modified chunk-pointer
 The pointer passed to
 .Fn free
 or a reallocation function has been modified.
-.It Dq canary corrupted address offset@length
-A byte after the requested size has been overwritten,
+.It Dq canary corrupted Va address Ns [ Va offset Ns ]@ Ns Va length Ns / Ns 
Va size
+A byte after the requested
+.Va length has been overwritten,
 indicating a heap overflow.
-The offset at which corruption was detected is printed before the @,
-and the requested length of the allocation after the @.
-.It Dq recorded size oldsize inconsistent with size
+The
+.Va offset
+at which corruption was detected is printed between [ and ],
+the requested
+.Va length
+of the allocation is printed before the / and the
+.Va size
+of the allocation after the /.
+.It Dq recorded size Va oldsize No inconsistent with Va size
 .Fn recallocarray
 or
 .Fn freezero
@@ -676,7 +692,7 @@ functions nor utilize any other function
 (e.g.,
 .Xr stdio 3
 routines).
-.It Dq unknown char in MALLOC_OPTIONS
+.It Dq unknown char in Ev MALLOC_OPTIONS
 We found something we didn't understand.
 .It any other error
 .Fn malloc
Index: stdlib/malloc.c
===================================================================
RCS file: /home/cvs/src/lib/libc/stdlib/malloc.c,v
retrieving revision 1.290
diff -u -p -r1.290 malloc.c
--- stdlib/malloc.c     9 Sep 2023 06:52:40 -0000       1.290
+++ stdlib/malloc.c     10 Oct 2023 10:23:19 -0000
@@ -112,7 +112,7 @@ struct region_info {
        void *p;                /* page; low bits used to mark chunks */
        uintptr_t size;         /* size for pages, or chunk_info pointer */
 #ifdef MALLOC_STATS
-       void *f;                /* where allocated from */
+       void **f;               /* where allocated from */
 #endif
 };
 
@@ -146,7 +146,7 @@ struct dir_info {
        size_t regions_total;           /* number of region slots */
        size_t regions_free;            /* number of free slots */
        size_t rbytesused;              /* random bytes used */
-       char *func;                     /* current function */
+       const char *func;               /* current function */
        int malloc_junk;                /* junk fill? */
        int mmap_flag;                  /* extra flag for mmap */
        int mutex;
@@ -166,6 +166,7 @@ struct dir_info {
        void *chunk_pages;
        size_t chunk_pages_used;
 #ifdef MALLOC_STATS
+       void *caller;
        size_t inserts;
        size_t insert_collisions;
        size_t finds;
@@ -183,12 +184,16 @@ struct dir_info {
 #define STATS_INC(x)   ((x)++)
 #define STATS_ZERO(x)  ((x) = 0)
 #define STATS_SETF(x,y)        ((x)->f = (y))
+#define STATS_SETFN(x,k,y)     ((x)->f[k] = (y))
+#define SET_CALLER(x,y)        if (DO_STATS) ((x)->caller = (y))
 #else
 #define STATS_ADD(x,y) /* nothing */
 #define STATS_SUB(x,y) /* nothing */
 #define STATS_INC(x)   /* nothing */
 #define STATS_ZERO(x)  /* nothing */
 #define STATS_SETF(x,y)        /* nothing */
+#define STATS_SETFN(x,k,y)     /* nothing */
+#define SET_CALLER(x,y)        /* nothing */
 #endif /* MALLOC_STATS */
        u_int32_t canary2;
 };
@@ -212,6 +217,8 @@ struct chunk_info {
        u_short bits[1];                /* which chunks are free */
 };
 
+#define CHUNK_FREE(i, n)       ((i)->bits[(n) / MALLOC_BITS] & (1U << ((n) % 
MALLOC_BITS)))
+
 struct malloc_readonly {
                                        /* Main bookkeeping information */
        struct dir_info *malloc_pool[_MALLOC_MUTEXES];
@@ -227,7 +234,7 @@ struct malloc_readonly {
        u_int   junk_loc;               /* variation in location of junk */
        size_t  malloc_guard;           /* use guard pages after allocations? */
 #ifdef MALLOC_STATS
-       int     malloc_stats;           /* dump leak report at end */
+       int     malloc_stats;           /* save callers, dump leak report at 
end */
        int     malloc_verbose;         /* dump verbose statistics at end */
 #define        DO_STATS        mopts.malloc_stats
 #else
@@ -254,6 +261,7 @@ static __dead void wrterror(struct dir_i
 void malloc_dump(void);
 PROTO_NORMAL(malloc_dump);
 static void malloc_exit(void);
+static void print_chunk_details(struct dir_info *, void *, size_t, size_t);
 #endif
 
 #if defined(__aarch64__) || \
@@ -714,14 +722,14 @@ junk_free(int junk, void *p, size_t sz)
 }
 
 static inline void
-validate_junk(struct dir_info *pool, void *p, size_t sz)
+validate_junk(struct dir_info *pool, void *p, size_t argsz)
 {
-       size_t i, step = 1;
+       size_t i, sz, step = 1;
        uint64_t *lp = p;
 
-       if (pool->malloc_junk == 0 || sz == 0)
+       if (pool->malloc_junk == 0 || argsz == 0)
                return;
-       sz /= sizeof(uint64_t);
+       sz = argsz / sizeof(uint64_t);
        if (pool->malloc_junk == 1) {
                if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
                        sz = MALLOC_PAGESIZE / sizeof(uint64_t);
@@ -731,8 +739,17 @@ validate_junk(struct dir_info *pool, voi
        }
        /* see junk_free */
        for (i = mopts.junk_loc % step; i < sz; i += step) {
-               if (lp[i] != SOME_FREEJUNK_ULL)
-                       wrterror(pool, "write after free %p", p);
+               if (lp[i] != SOME_FREEJUNK_ULL) {
+#ifdef MALLOC_STATS
+                       if (DO_STATS && argsz <= MALLOC_MAXCHUNK)
+                               print_chunk_details(pool, lp, argsz, i);
+                       else
+#endif
+                               wrterror(pool,
+                                   "write to free mem %p[%zu..%zu]@%zu",
+                                   lp, i * sizeof(uint64_t),
+                                   (i + 1) * sizeof(uint64_t) - 1, argsz);
+               }
        }
 }
 
@@ -809,7 +826,7 @@ unmap(struct dir_info *d, void *p, size_
                i = getrbyte(d) & (cache->max - 1);
                r = cache->pages[i];
                fresh = (uintptr_t)r & 1;
-               *(uintptr_t*)&r &= ~1ULL;
+               *(uintptr_t*)&r &= ~1UL;
                if (!fresh && !mopts.malloc_freeunmap)
                        validate_junk(d, r, sz);
                if (munmap(r, sz))
@@ -995,11 +1012,18 @@ omalloc_make_chunks(struct dir_info *d, 
 {
        struct chunk_info *bp;
        void *pp;
+       void *ff = NULL;
 
        /* Allocate a new bucket */
        pp = map(d, MALLOC_PAGESIZE, 0);
        if (pp == MAP_FAILED)
                return NULL;
+       if (DO_STATS) {
+               ff = map(d, MALLOC_PAGESIZE, 0);
+               if (ff == MAP_FAILED)
+                       goto err;
+               memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE / 
B2ALLOC(bucket));
+       }
 
        /* memory protect the page allocated in the malloc(0) case */
        if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
@@ -1011,7 +1035,7 @@ omalloc_make_chunks(struct dir_info *d, 
        bp->page = pp;
 
        if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp,
-           NULL))
+           ff))
                goto err;
        LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries);
 
@@ -1022,6 +1046,8 @@ omalloc_make_chunks(struct dir_info *d, 
 
 err:
        unmap(d, pp, MALLOC_PAGESIZE, 0);
+       if (ff != NULL && ff != MAP_FAILED)
+               unmap(d, ff, MALLOC_PAGESIZE, 0);
        return NULL;
 }
 
@@ -1101,7 +1127,7 @@ fill_canary(char *ptr, size_t sz, size_t
  * Allocate a chunk
  */
 static void *
-malloc_bytes(struct dir_info *d, size_t size, void *f)
+malloc_bytes(struct dir_info *d, size_t size)
 {
        u_int i, r, bucket, listnum;
        size_t k;
@@ -1153,11 +1179,6 @@ malloc_bytes(struct dir_info *d, size_t 
                }
        }
 found:
-       if (i == 0 && k == 0 && DO_STATS) {
-               struct region_info *r = find(d, bp->page);
-               STATS_SETF(r, f);
-       }
-
        *lp ^= 1 << k;
 
        /* If there are no more free, remove from free-list */
@@ -1170,6 +1191,11 @@ found:
        if (mopts.chunk_canaries && size > 0)
                bp->bits[bp->offset + k] = size;
 
+       if (DO_STATS) {
+               struct region_info *r = find(d, bp->page);
+               STATS_SETFN(r, k, d->caller);
+       }
+
        k *= B2ALLOC(bp->bucket);
 
        p = (char *)bp->page + k;
@@ -1194,8 +1220,8 @@ validate_canary(struct dir_info *d, u_ch
 
        while (p < q) {
                if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) {
-                       wrterror(d, "canary corrupted %p %#tx@%#zx%s",
-                           ptr, p - ptr, sz,
+                       wrterror(d, "canary corrupted %p[%tu]@%zu/%zu%s",
+                           ptr, p - ptr, sz, allocated,
                            *p == SOME_FREEJUNK ? " (double free?)" : "");
                }
                p++;
@@ -1215,8 +1241,7 @@ find_chunknum(struct dir_info *d, struct
 
        if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1))
                wrterror(d, "modified chunk-pointer %p", ptr);
-       if (info->bits[chunknum / MALLOC_BITS] &
-           (1U << (chunknum % MALLOC_BITS)))
+       if (CHUNK_FREE(info, chunknum))
                wrterror(d, "double free %p", ptr);
        if (check && info->bucket > 0) {
                validate_canary(d, ptr, info->bits[info->offset + chunknum],
@@ -1239,9 +1264,6 @@ free_bytes(struct dir_info *d, struct re
        info = (struct chunk_info *)r->size;
        chunknum = find_chunknum(d, info, ptr, 0);
 
-       if (chunknum == 0)
-               STATS_SETF(r, NULL);
-
        info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
        info->free++;
 
@@ -1261,18 +1283,22 @@ free_bytes(struct dir_info *d, struct re
        if (info->bucket == 0 && !mopts.malloc_freeunmap)
                mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
        unmap(d, info->page, MALLOC_PAGESIZE, 0);
+#ifdef MALLOC_STATS
+       if (r->f != NULL) {
+               unmap(d, r->f, MALLOC_PAGESIZE, MALLOC_PAGESIZE);
+               r->f = NULL;
+       }
+#endif
 
        delete(d, r);
        mp = &d->chunk_info_list[info->bucket];
        LIST_INSERT_HEAD(mp, info, entries);
 }
 
-
-
 static void *
-omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
+omalloc(struct dir_info *pool, size_t sz, int zero_fill)
 {
-       void *p;
+       void *p, *caller = NULL;
        size_t psz;
 
        if (sz > MALLOC_MAXCHUNK) {
@@ -1287,7 +1313,11 @@ omalloc(struct dir_info *pool, size_t sz
                        errno = ENOMEM;
                        return NULL;
                }
-               if (insert(pool, p, sz, f)) {
+#ifdef MALLOC_STATS
+               if (DO_STATS)
+                       caller = pool->caller;
+#endif
+               if (insert(pool, p, sz, caller)) {
                        unmap(pool, p, psz, 0);
                        errno = ENOMEM;
                        return NULL;
@@ -1324,7 +1354,7 @@ omalloc(struct dir_info *pool, size_t sz
 
        } else {
                /* takes care of SOME_JUNK */
-               p = malloc_bytes(pool, sz, f);
+               p = malloc_bytes(pool, sz);
                if (zero_fill && p != NULL && sz > 0)
                        memset(p, 0, sz);
        }
@@ -1473,7 +1503,8 @@ malloc(size_t size)
        int saved_errno = errno;
 
        PROLOGUE(getpool(), "malloc")
-       r = omalloc(d, size, 0, caller());
+       SET_CALLER(d, caller());
+       r = omalloc(d, size, 0);
        EPILOGUE()
        return r;
 }
@@ -1487,7 +1518,8 @@ malloc_conceal(size_t size)
        int saved_errno = errno;
 
        PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
-       r = omalloc(d, size, 0, caller());
+       SET_CALLER(d, caller());
+       r = omalloc(d, size, 0);
        EPILOGUE()
        return r;
 }
@@ -1495,7 +1527,7 @@ DEF_WEAK(malloc_conceal);
 
 static struct region_info *
 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
-    char **saved_function)
+    const char ** saved_function)
 {
        struct dir_info *pool = argpool;
        struct region_info *r = find(pool, p);
@@ -1533,7 +1565,7 @@ ofree(struct dir_info **argpool, void *p
 {
        struct region_info *r;
        struct dir_info *pool;
-       char *saved_function;
+       const char *saved_function;
        size_t sz;
 
        r = findpool(p, *argpool, &pool, &saved_function);
@@ -1721,11 +1753,11 @@ freezero(void *ptr, size_t sz)
 DEF_WEAK(freezero);
 
 static void *
-orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
+orealloc(struct dir_info **argpool, void *p, size_t newsz)
 {
        struct region_info *r;
        struct dir_info *pool;
-       char *saved_function;
+       const char *saved_function;
        struct chunk_info *info;
        size_t oldsz, goldsz, gnewsz;
        void *q, *ret;
@@ -1733,7 +1765,7 @@ orealloc(struct dir_info **argpool, void
        int forced;
 
        if (p == NULL)
-               return omalloc(*argpool, newsz, 0, f);
+               return omalloc(*argpool, newsz, 0);
 
        if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
                errno = ENOMEM;
@@ -1797,7 +1829,7 @@ orealloc(struct dir_info **argpool, void
                                        if (mopts.chunk_canaries)
                                                fill_canary(p, newsz,
                                                    PAGEROUND(newsz));
-                                       STATS_SETF(r, f);
+                                       STATS_SETF(r, (*argpool)->caller);
                                        STATS_INC(pool->cheap_reallocs);
                                        ret = p;
                                        goto done;
@@ -1822,7 +1854,7 @@ orealloc(struct dir_info **argpool, void
                                p = pp;
                        } else if (mopts.chunk_canaries)
                                fill_canary(p, newsz, PAGEROUND(newsz));
-                       STATS_SETF(r, f);
+                       STATS_SETF(r, (*argpool)->caller);
                        ret = p;
                        goto done;
                } else {
@@ -1844,7 +1876,7 @@ orealloc(struct dir_info **argpool, void
                                if (mopts.chunk_canaries)
                                        fill_canary(p, newsz, PAGEROUND(newsz));
                        }
-                       STATS_SETF(r, f);
+                       STATS_SETF(r, (*argpool)->caller);
                        ret = p;
                        goto done;
                }
@@ -1859,12 +1891,12 @@ orealloc(struct dir_info **argpool, void
                        info->bits[info->offset + chunknum] = newsz;
                        fill_canary(p, newsz, B2SIZE(info->bucket));
                }
-               if (DO_STATS && chunknum == 0)
-                       STATS_SETF(r, f);
+               if (DO_STATS)
+                       STATS_SETFN(r, chunknum, (*argpool)->caller);
                ret = p;
        } else if (newsz != oldsz || forced) {
                /* create new allocation */
-               q = omalloc(pool, newsz, 0, f);
+               q = omalloc(pool, newsz, 0);
                if (q == NULL) {
                        ret = NULL;
                        goto done;
@@ -1877,8 +1909,8 @@ orealloc(struct dir_info **argpool, void
                /* oldsz == newsz */
                if (newsz != 0)
                        wrterror(pool, "realloc internal inconsistency");
-               if (DO_STATS && chunknum == 0)
-                       STATS_SETF(r, f);
+               if (DO_STATS)
+                       STATS_SETFN(r, chunknum, (*argpool)->caller);
                ret = p;
        }
 done:
@@ -1897,7 +1929,8 @@ realloc(void *ptr, size_t size)
        int saved_errno = errno;
 
        PROLOGUE(getpool(), "realloc")
-       r = orealloc(&d, ptr, size, caller());
+       SET_CALLER(d, caller());
+       r = orealloc(&d, ptr, size);
        EPILOGUE()
        return r;
 }
@@ -1917,6 +1950,7 @@ calloc(size_t nmemb, size_t size)
        int saved_errno = errno;
 
        PROLOGUE(getpool(), "calloc")
+       SET_CALLER(d, caller());
        if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
            nmemb > 0 && SIZE_MAX / nmemb < size) {
                d->active--;
@@ -1928,7 +1962,7 @@ calloc(size_t nmemb, size_t size)
        }
 
        size *= nmemb;
-       r = omalloc(d, size, 1, caller());
+       r = omalloc(d, size, 1);
        EPILOGUE()
        return r;
 }
@@ -1942,6 +1976,7 @@ calloc_conceal(size_t nmemb, size_t size
        int saved_errno = errno;
 
        PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
+       SET_CALLER(d, caller());
        if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
            nmemb > 0 && SIZE_MAX / nmemb < size) {
                d->active--;
@@ -1953,7 +1988,7 @@ calloc_conceal(size_t nmemb, size_t size
        }
 
        size *= nmemb;
-       r = omalloc(d, size, 1, caller());
+       r = omalloc(d, size, 1);
        EPILOGUE()
        return r;
 }
@@ -1961,16 +1996,16 @@ DEF_WEAK(calloc_conceal);
 
 static void *
 orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
-    size_t newsize, void *f)
+    size_t newsize)
 {
        struct region_info *r;
        struct dir_info *pool;
-       char *saved_function;
+       const char *saved_function;
        void *newptr;
        size_t sz;
 
        if (p == NULL)
-               return omalloc(*argpool, newsize, 1, f);
+               return omalloc(*argpool, newsize, 1);
 
        if (oldsize == newsize)
                return p;
@@ -2001,7 +2036,7 @@ orecallocarray(struct dir_info **argpool
                            sz - mopts.malloc_guard, oldsize);
        }
 
-       newptr = omalloc(pool, newsize, 0, f);
+       newptr = omalloc(pool, newsize, 0);
        if (newptr == NULL)
                goto done;
 
@@ -2086,6 +2121,7 @@ recallocarray(void *ptr, size_t oldnmemb
                return recallocarray_p(ptr, oldnmemb, newnmemb, size);
 
        PROLOGUE(getpool(), "recallocarray")
+       SET_CALLER(d, caller());
 
        if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
            newnmemb > 0 && SIZE_MAX / newnmemb < size) {
@@ -2109,7 +2145,7 @@ recallocarray(void *ptr, size_t oldnmemb
                oldsize = oldnmemb * size;
        }
 
-       r = orecallocarray(&d, ptr, oldsize, newsize, caller());
+       r = orecallocarray(&d, ptr, oldsize, newsize);
        EPILOGUE()
        return r;
 }
@@ -2150,11 +2186,10 @@ mapalign(struct dir_info *d, size_t alig
 }
 
 static void *
-omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
-    void *f)
+omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill)
 {
        size_t psz;
-       void *p;
+       void *p, *caller = NULL;
 
        /* If between half a page and a page, avoid MALLOC_MOVE. */
        if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
@@ -2174,7 +2209,7 @@ omemalign(struct dir_info *pool, size_t 
                                pof2 <<= 1;
                } else
                        pof2 = sz;
-               return omalloc(pool, pof2, zero_fill, f);
+               return omalloc(pool, pof2, zero_fill);
        }
 
        if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
@@ -2193,7 +2228,11 @@ omemalign(struct dir_info *pool, size_t 
                return NULL;
        }
 
-       if (insert(pool, p, sz, f)) {
+#ifdef MALLOC_STATS
+       if (DO_STATS)
+               caller = pool->caller;
+#endif
+       if (insert(pool, p, sz, caller)) {
                unmap(pool, p, psz, 0);
                errno = ENOMEM;
                return NULL;
@@ -2241,7 +2280,8 @@ posix_memalign(void **memptr, size_t ali
                malloc_recurse(d);
                goto err;
        }
-       r = omemalign(d, alignment, size, 0, caller());
+       SET_CALLER(d, caller());
+       r = omemalign(d, alignment, size, 0);
        d->active--;
        _MALLOC_UNLOCK(d->mutex);
        if (r == NULL) {
@@ -2279,7 +2319,8 @@ aligned_alloc(size_t alignment, size_t s
        }
 
        PROLOGUE(getpool(), "aligned_alloc")
-       r = omemalign(d, alignment, size, 0, caller());
+       SET_CALLER(d, caller());
+       r = omemalign(d, alignment, size, 0);
        EPILOGUE()
        return r;
 }
@@ -2288,6 +2329,45 @@ DEF_STRONG(aligned_alloc);
 #ifdef MALLOC_STATS
 
 static void
+print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t i)
+{
+       struct region_info *r;
+       struct chunk_info *chunkinfo;
+       uint32_t chunknum;
+       Dl_info info;
+       const char *caller, *pcaller = NULL;
+       const char *object = ".";
+       const char *pobject = ".";
+       const char *msg = "";
+
+       r = find(pool, p);
+       chunkinfo = (struct chunk_info *)r->size;
+       chunknum = find_chunknum(pool, chunkinfo, p, 0);
+       caller = r->f[chunknum];
+       if (dladdr(caller, &info) != 0) {
+               caller -= (uintptr_t)info.dli_fbase;
+               object = info.dli_fname;
+       }
+       if (chunknum > 0) {
+               chunknum--;
+               pcaller = r->f[chunknum];
+               if (dladdr(pcaller, &info) != 0) {
+                       pcaller -= (uintptr_t)info.dli_fbase;
+                       pobject = info.dli_fname;
+               }
+               if (CHUNK_FREE(chunkinfo, chunknum)) 
+                       msg = " (now free)";
+       }
+
+       wrterror(pool,
+           "write to free chunk %p[%zu..%zu]@%zu allocated at %s %p "
+           "(preceding chunk %p allocated at %s %p%s)",
+           p, i * sizeof(uint64_t),
+           (i + 1) * sizeof(uint64_t) - 1, sz, object, caller, p - sz,
+           pobject, pcaller, msg);
+}
+
+static void
 ulog(const char *format, ...)
 {
        va_list ap;
@@ -2413,23 +2493,19 @@ dump_leaks(struct leaktree *leaks)
 }
 
 static void
-dump_chunk(struct leaktree* leaks, struct chunk_info *p, void *f,
+dump_chunk(struct leaktree* leaks, struct chunk_info *p, void **f,
     int fromfreelist)
 {
        while (p != NULL) {
                if (mopts.malloc_verbose)
                        ulog("chunk %18p %18p %4zu %d/%d\n",
-                           p->page, ((p->bits[0] & 1) ? NULL : f),
+                           p->page, NULL,
                            B2SIZE(p->bucket), p->free, p->total);
                if (!fromfreelist) {
-                       size_t sz =  B2SIZE(p->bucket);
-                       if (p->bits[0] & 1)
-                               putleakinfo(leaks, NULL, sz, p->total -
-                                   p->free);
-                       else {
-                               putleakinfo(leaks, f, sz, 1);
-                               putleakinfo(leaks, NULL, sz,
-                                   p->total - p->free - 1);
+                       size_t i, sz =  B2SIZE(p->bucket);
+                       for (i = 0; i < p->total; i++) {
+                               if (!CHUNK_FREE(p, i))
+                                       putleakinfo(leaks, f[i], sz, 1);
                        }
                        break;
                }
@@ -2501,7 +2577,7 @@ malloc_dump1(int poolno, struct dir_info
 
        if (mopts.malloc_verbose) {
                ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
-               ulog("MT=%d J=%d Fl=%x\n", d->malloc_mt, d->malloc_junk,
+               ulog("MT=%d J=%d Fl=%#x\n", d->malloc_mt, d->malloc_junk,
                    d->mmap_flag);
                ulog("Region slots free %zu/%zu\n",
                        d->regions_free, d->regions_total);
@@ -2589,7 +2665,7 @@ malloc_exit(void)
        int save_errno = errno;
 
        ulog("******** Start dump %s *******\n", __progname);
-       ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u "
+       ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%#x cache=%u "
            "G=%zu\n",
            mopts.malloc_mutexes,
            mopts.internal_funcs, mopts.malloc_freecheck,



Reply via email to