malloc diff

2022-10-21 Thread Otto Moerbeek
Hi,

this diff has been sent out earlier, but now that more or the
immutable parts are in, it is time to test it in this new environment.

Thanks,

-Otto

Index: stdlib/malloc.c
===
RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
retrieving revision 1.274
diff -u -p -r1.274 malloc.c
--- stdlib/malloc.c 30 Jun 2022 17:15:48 -  1.274
+++ stdlib/malloc.c 29 Sep 2022 09:42:57 -
@@ -142,6 +142,7 @@ struct dir_info {
int malloc_junk;/* junk fill? */
int mmap_flag;  /* extra flag for mmap */
int mutex;
+   int malloc_mt;  /* multi-threaded mode? */
/* lists of free chunk info structs */
struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
/* lists of chunks with free slots */
@@ -181,8 +182,6 @@ struct dir_info {
 #endif /* MALLOC_STATS */
u_int32_t canary2;
 };
-#define DIR_INFO_RSZ   ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
-   ~MALLOC_PAGEMASK)
 
 static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
 
@@ -208,7 +207,6 @@ struct malloc_readonly {
/* Main bookkeeping information */
struct dir_info *malloc_pool[_MALLOC_MUTEXES];
u_int   malloc_mutexes; /* how much in actual use? */
-   int malloc_mt;  /* multi-threaded mode? */
int malloc_freecheck;   /* Extensive double free check */
int malloc_freeunmap;   /* mprotect free pages PROT_NONE? */
int def_malloc_junk;/* junk fill? */
@@ -257,7 +255,7 @@ static void malloc_exit(void);
 static inline void
 _MALLOC_LEAVE(struct dir_info *d)
 {
-   if (mopts.malloc_mt) {
+   if (d->malloc_mt) {
d->active--;
_MALLOC_UNLOCK(d->mutex);
}
@@ -266,7 +264,7 @@ _MALLOC_LEAVE(struct dir_info *d)
 static inline void
 _MALLOC_ENTER(struct dir_info *d)
 {
-   if (mopts.malloc_mt) {
+   if (d->malloc_mt) {
_MALLOC_LOCK(d->mutex);
d->active++;
}
@@ -291,7 +289,7 @@ hash(void *p)
 static inline struct dir_info *
 getpool(void)
 {
-   if (!mopts.malloc_mt)
+   if (mopts.malloc_pool[1] == NULL || !mopts.malloc_pool[1]->malloc_mt)
return mopts.malloc_pool[1];
else/* first one reserved for special pool */
return mopts.malloc_pool[1 + TIB_GET()->tib_tid %
@@ -496,46 +494,22 @@ omalloc_init(void)
 }
 
 static void
-omalloc_poolinit(struct dir_info **dp, int mmap_flag)
+omalloc_poolinit(struct dir_info *d, int mmap_flag)
 {
-   char *p;
-   size_t d_avail, regioninfo_size;
-   struct dir_info *d;
int i, j;
 
-   /*
-* Allocate dir_info with a guard page on either side. Also
-* randomise offset inside the page at which the dir_info
-* lies (subject to alignment by 1 << MALLOC_MINSHIFT)
-*/
-   if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2), mmap_flag)) ==
-   MAP_FAILED)
-   wrterror(NULL, "malloc init mmap failed");
-   mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE);
-   d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
-   d = (struct dir_info *)(p + MALLOC_PAGESIZE +
-   (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
-
-   rbytes_init(d);
-   d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
-   regioninfo_size = d->regions_total * sizeof(struct region_info);
-   d->r = MMAP(regioninfo_size, mmap_flag);
-   if (d->r == MAP_FAILED) {
-   d->regions_total = 0;
-   wrterror(NULL, "malloc init mmap failed");
-   }
+   d->r = NULL;
+   d->rbytesused = sizeof(d->rbytes);
+   d->regions_free = d->regions_total = 0;
for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
LIST_INIT(>chunk_info_list[i]);
for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
LIST_INIT(>chunk_dir[i][j]);
}
-   STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE);
d->mmap_flag = mmap_flag;
d->malloc_junk = mopts.def_malloc_junk;
d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
d->canary2 = ~d->canary1;
-
-   *dp = d;
 }
 
 static int
@@ -550,7 +524,8 @@ omalloc_grow(struct dir_info *d)
if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
return 1;
 
-   newtotal = d->regions_total * 2;
+   newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS :
+   d->regions_total * 2;
newsize = PAGEROUND(newtotal * sizeof(struct region_info));
mask = newtotal - 1;
 
@@ -575,10 +550,12 @@ omalloc_grow(struct dir_info *d)
}
}
 
-   

Re: small malloc diff

2021-04-08 Thread Otto Moerbeek
On Fri, Apr 09, 2021 at 07:39:05AM +0200, Theo Buehler wrote:

> On Fri, Apr 09, 2021 at 07:36:35AM +0200, Otto Moerbeek wrote:
> > On Thu, Apr 01, 2021 at 11:23:58AM +0200, Otto Moerbeek wrote:
> > 
> > > Hi,
> > > 
> > > here's a small malloc diff. Most important part is an extra internal
> > > consistency check. I have been running this for a few week already,
> > 
> > ping?
> 
> Been running this since you posted it on several busy boxes.
> 
> ok tb

Thanks, will commit soon.

-Otto

> 
> > 
> > > 
> > >   -Otto
> > > 
> > > Index: stdlib/malloc.3
> > > ===
> > > RCS file: /cvs/src/lib/libc/stdlib/malloc.3,v
> > > retrieving revision 1.127
> > > diff -u -p -r1.127 malloc.3
> > > --- stdlib/malloc.3   25 Feb 2021 15:20:18 -  1.127
> > > +++ stdlib/malloc.3   1 Apr 2021 09:21:59 -
> > > @@ -366,7 +366,8 @@ If a program changes behavior if any of 
> > >  are used,
> > >  it is buggy.
> > >  .Pp
> > > -The default number of free pages cached is 64 per malloc pool.
> > > +The default size of the cache is 64 single page allocations.
> > > +It also caches a number of larger regions.
> > >  Multi-threaded programs use multiple pools.
> > >  .Sh RETURN VALUES
> > >  Upon successful completion, the allocation functions
> > > Index: stdlib/malloc.c
> > > ===
> > > RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
> > > retrieving revision 1.269
> > > diff -u -p -r1.269 malloc.c
> > > --- stdlib/malloc.c   9 Mar 2021 07:39:28 -   1.269
> > > +++ stdlib/malloc.c   1 Apr 2021 09:22:00 -
> > > @@ -1404,6 +1404,8 @@ ofree(struct dir_info **argpool, void *p
> > >   } else {
> > >   /* Validate and optionally canary check */
> > >   struct chunk_info *info = (struct chunk_info *)r->size;
> > > + if (info->size != sz)
> > > + wrterror(pool, "internal struct corrupt");
> > >   find_chunknum(pool, info, p, mopts.chunk_canaries);
> > >   if (!clear) {
> > >   void *tmp;
> > > @@ -1608,6 +1610,7 @@ orealloc(struct dir_info **argpool, void
> > >   }
> > >   if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
> > >   wrterror(pool, "munmap %p", (char *)r->p + 
> > > rnewsz);
> > > + STATS_SUB(d->malloc_used, roldsz - rnewsz);
> > >   r->size = gnewsz;
> > >   if (MALLOC_MOVE_COND(gnewsz)) {
> > >   void *pp = MALLOC_MOVE(r->p, gnewsz);
> > > 
> > > 
> > 
> 



Re: small malloc diff

2021-04-08 Thread Theo Buehler
On Fri, Apr 09, 2021 at 07:36:35AM +0200, Otto Moerbeek wrote:
> On Thu, Apr 01, 2021 at 11:23:58AM +0200, Otto Moerbeek wrote:
> 
> > Hi,
> > 
> > here's a small malloc diff. Most important part is an extra internal
> > consistency check. I have been running this for a few week already,
> 
> ping?

Been running this since you posted it on several busy boxes.

ok tb

> 
> > 
> > -Otto
> > 
> > Index: stdlib/malloc.3
> > ===
> > RCS file: /cvs/src/lib/libc/stdlib/malloc.3,v
> > retrieving revision 1.127
> > diff -u -p -r1.127 malloc.3
> > --- stdlib/malloc.3 25 Feb 2021 15:20:18 -  1.127
> > +++ stdlib/malloc.3 1 Apr 2021 09:21:59 -
> > @@ -366,7 +366,8 @@ If a program changes behavior if any of 
> >  are used,
> >  it is buggy.
> >  .Pp
> > -The default number of free pages cached is 64 per malloc pool.
> > +The default size of the cache is 64 single page allocations.
> > +It also caches a number of larger regions.
> >  Multi-threaded programs use multiple pools.
> >  .Sh RETURN VALUES
> >  Upon successful completion, the allocation functions
> > Index: stdlib/malloc.c
> > ===
> > RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
> > retrieving revision 1.269
> > diff -u -p -r1.269 malloc.c
> > --- stdlib/malloc.c 9 Mar 2021 07:39:28 -   1.269
> > +++ stdlib/malloc.c 1 Apr 2021 09:22:00 -
> > @@ -1404,6 +1404,8 @@ ofree(struct dir_info **argpool, void *p
> > } else {
> > /* Validate and optionally canary check */
> > struct chunk_info *info = (struct chunk_info *)r->size;
> > +   if (info->size != sz)
> > +   wrterror(pool, "internal struct corrupt");
> > find_chunknum(pool, info, p, mopts.chunk_canaries);
> > if (!clear) {
> > void *tmp;
> > @@ -1608,6 +1610,7 @@ orealloc(struct dir_info **argpool, void
> > }
> > if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
> > wrterror(pool, "munmap %p", (char *)r->p + 
> > rnewsz);
> > +   STATS_SUB(d->malloc_used, roldsz - rnewsz);
> > r->size = gnewsz;
> > if (MALLOC_MOVE_COND(gnewsz)) {
> > void *pp = MALLOC_MOVE(r->p, gnewsz);
> > 
> > 
> 



Re: small malloc diff

2021-04-08 Thread Otto Moerbeek
On Thu, Apr 01, 2021 at 11:23:58AM +0200, Otto Moerbeek wrote:

> Hi,
> 
> here's a small malloc diff. Most important part is an extra internal
> consistency check. I have been running this for a few week already,

ping?

> 
>   -Otto
> 
> Index: stdlib/malloc.3
> ===
> RCS file: /cvs/src/lib/libc/stdlib/malloc.3,v
> retrieving revision 1.127
> diff -u -p -r1.127 malloc.3
> --- stdlib/malloc.3   25 Feb 2021 15:20:18 -  1.127
> +++ stdlib/malloc.3   1 Apr 2021 09:21:59 -
> @@ -366,7 +366,8 @@ If a program changes behavior if any of 
>  are used,
>  it is buggy.
>  .Pp
> -The default number of free pages cached is 64 per malloc pool.
> +The default size of the cache is 64 single page allocations.
> +It also caches a number of larger regions.
>  Multi-threaded programs use multiple pools.
>  .Sh RETURN VALUES
>  Upon successful completion, the allocation functions
> Index: stdlib/malloc.c
> ===
> RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
> retrieving revision 1.269
> diff -u -p -r1.269 malloc.c
> --- stdlib/malloc.c   9 Mar 2021 07:39:28 -   1.269
> +++ stdlib/malloc.c   1 Apr 2021 09:22:00 -
> @@ -1404,6 +1404,8 @@ ofree(struct dir_info **argpool, void *p
>   } else {
>   /* Validate and optionally canary check */
>   struct chunk_info *info = (struct chunk_info *)r->size;
> + if (info->size != sz)
> + wrterror(pool, "internal struct corrupt");
>   find_chunknum(pool, info, p, mopts.chunk_canaries);
>   if (!clear) {
>   void *tmp;
> @@ -1608,6 +1610,7 @@ orealloc(struct dir_info **argpool, void
>   }
>   if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
>   wrterror(pool, "munmap %p", (char *)r->p + 
> rnewsz);
> + STATS_SUB(d->malloc_used, roldsz - rnewsz);
>   r->size = gnewsz;
>   if (MALLOC_MOVE_COND(gnewsz)) {
>   void *pp = MALLOC_MOVE(r->p, gnewsz);
> 
> 



small malloc diff

2021-04-01 Thread Otto Moerbeek
Hi,

here's a small malloc diff. Most important part is an extra internal
consistency check. I have been running this for a few week already,

-Otto

Index: stdlib/malloc.3
===
RCS file: /cvs/src/lib/libc/stdlib/malloc.3,v
retrieving revision 1.127
diff -u -p -r1.127 malloc.3
--- stdlib/malloc.3 25 Feb 2021 15:20:18 -  1.127
+++ stdlib/malloc.3 1 Apr 2021 09:21:59 -
@@ -366,7 +366,8 @@ If a program changes behavior if any of 
 are used,
 it is buggy.
 .Pp
-The default number of free pages cached is 64 per malloc pool.
+The default size of the cache is 64 single page allocations.
+It also caches a number of larger regions.
 Multi-threaded programs use multiple pools.
 .Sh RETURN VALUES
 Upon successful completion, the allocation functions
Index: stdlib/malloc.c
===
RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
retrieving revision 1.269
diff -u -p -r1.269 malloc.c
--- stdlib/malloc.c 9 Mar 2021 07:39:28 -   1.269
+++ stdlib/malloc.c 1 Apr 2021 09:22:00 -
@@ -1404,6 +1404,8 @@ ofree(struct dir_info **argpool, void *p
} else {
/* Validate and optionally canary check */
struct chunk_info *info = (struct chunk_info *)r->size;
+   if (info->size != sz)
+   wrterror(pool, "internal struct corrupt");
find_chunknum(pool, info, p, mopts.chunk_canaries);
if (!clear) {
void *tmp;
@@ -1608,6 +1610,7 @@ orealloc(struct dir_info **argpool, void
}
if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
wrterror(pool, "munmap %p", (char *)r->p + 
rnewsz);
+   STATS_SUB(d->malloc_used, roldsz - rnewsz);
r->size = gnewsz;
if (MALLOC_MOVE_COND(gnewsz)) {
void *pp = MALLOC_MOVE(r->p, gnewsz);