On Wed, Sep 07, 2011 at 08:53:02AM +0200, Otto Moerbeek wrote:
Hi,
This makes struct chunk_info a variable sized struct, wasting less
space for meta data, by only allocating space actually needed for the
bitmap (modulo alignment requirements).
I have been runing this on amd64 and loongson for a while.
Please review this and test on as many platforms as you can find.
Got some postitive test reports and no negative. I would not mind a
review though. Anybody?
-Otto
Index: malloc.c
===
RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
retrieving revision 1.139
diff -u -p -r1.139 malloc.c
--- malloc.c 12 Jul 2011 14:43:42 - 1.139
+++ malloc.c 7 Sep 2011 06:49:36 -
@@ -109,8 +109,8 @@ struct dir_info {
struct region_info *r; /* region slots */
size_t regions_total; /* number of region slots */
size_t regions_free;/* number of free slots */
- /* list of free chunk info structs */
- struct chunk_head chunk_info_list;
+ /* lists of free chunk info structs */
+ struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
/* lists of chunks with free slots */
struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1];
size_t free_regions_size; /* free pages cached */
@@ -156,7 +156,7 @@ struct chunk_info {
u_short free; /* how many free chunks */
u_short total; /* how many chunk */
/* which chunks are free */
- u_short bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS];
+ u_short bits[1];
};
struct malloc_readonly {
@@ -622,9 +622,10 @@ omalloc_init(struct dir_info **dp)
d-regions_total = 0;
return 1;
}
- LIST_INIT(d-chunk_info_list);
- for (i = 0; i = MALLOC_MAXSHIFT; i++)
+ for (i = 0; i = MALLOC_MAXSHIFT; i++) {
+ LIST_INIT(d-chunk_info_list[i]);
LIST_INIT(d-chunk_dir[i]);
+ }
malloc_used += regioninfo_size;
d-canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
d-canary2 = ~d-canary1;
@@ -689,22 +690,36 @@ omalloc_grow(struct dir_info *d)
}
static struct chunk_info *
-alloc_chunk_info(struct dir_info *d)
+alloc_chunk_info(struct dir_info *d, int bits)
{
struct chunk_info *p;
- int i;
-
- if (LIST_EMPTY(d-chunk_info_list)) {
- p = MMAP(MALLOC_PAGESIZE);
- if (p == MAP_FAILED)
+ size_t size, count;
+
+ if (bits == 0)
+ count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
+ else
+ count = MALLOC_PAGESIZE bits;
+
+ size = howmany(count, MALLOC_BITS);
+ size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
+ size = ALIGN(size);
+
+ if (LIST_EMPTY(d-chunk_info_list[bits])) {
+ void *q;
+ int i;
+
+ q = MMAP(MALLOC_PAGESIZE);
+ if (q == MAP_FAILED)
return NULL;
malloc_used += MALLOC_PAGESIZE;
- for (i = 0; i MALLOC_PAGESIZE / sizeof(*p); i++)
- LIST_INSERT_HEAD(d-chunk_info_list, p[i], entries);
+ count = MALLOC_PAGESIZE / size;
+ for (i = 0; i count; i++, q += size)
+ LIST_INSERT_HEAD(d-chunk_info_list[bits],
+ (struct chunk_info *)q, entries);
}
- p = LIST_FIRST(d-chunk_info_list);
+ p = LIST_FIRST(d-chunk_info_list[bits]);
LIST_REMOVE(p, entries);
- memset(p, 0, sizeof *p);
+ memset(p, 0, size);
p-canary = d-canary1;
return p;
}
@@ -803,14 +818,14 @@ omalloc_make_chunks(struct dir_info *d,
{
struct chunk_info *bp;
void*pp;
- longi, k;
+ int i, k;
/* Allocate a new bucket */
pp = map(d, MALLOC_PAGESIZE, 0);
if (pp == MAP_FAILED)
return NULL;
- bp = alloc_chunk_info(d);
+ bp = alloc_chunk_info(d, bits);
if (bp == NULL) {
unmap(d, pp, MALLOC_PAGESIZE);
return NULL;
@@ -829,7 +844,7 @@ omalloc_make_chunks(struct dir_info *d,
k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
if (k 0) {
unmap(d, pp, MALLOC_PAGESIZE);
- LIST_INSERT_HEAD(d-chunk_info_list, bp, entries);
+ LIST_INSERT_HEAD(d-chunk_info_list[0], bp, entries);
return NULL;
}
} else {
@@ -956,7 +971,7 @@ free_bytes(struct dir_info *d, struct re
{
struct chunk_head *mp;
struct chunk_info *info;
- long i;
+ int i;
info = (struct chunk_info *)r-size;
if