This changes getrnibble in malloc to getrbyte() for more potential
randomness.

The delayed chunk array can (should) be larger, but requires more than
a nibble of random to span it. Not changed yet, but with this diff it
can grow (or even shrink) in the future.

The bit offset in malloc_bytes cannot be spanned by a single nibble
either. Instead of adding two nibbles together, just use a whole byte.
Everything is simpler now.


Index: stdlib/malloc.c
===================================================================
RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
retrieving revision 1.158
diff -u -p -r1.158 malloc.c
--- stdlib/malloc.c     23 Apr 2014 15:07:27 -0000      1.158
+++ stdlib/malloc.c     29 Apr 2014 19:33:12 -0000
@@ -61,7 +61,7 @@
 
 #define MALLOC_MAXCHUNK                (1 << MALLOC_MAXSHIFT)
 #define MALLOC_MAXCACHE                256
-#define MALLOC_DELAYED_CHUNKS  15      /* max of getrnibble() */
+#define MALLOC_DELAYED_CHUNK_MASK      15
 #define MALLOC_INITIAL_REGIONS 512
 #define MALLOC_DEFAULT_CACHE   64
 
@@ -115,7 +115,7 @@ struct dir_info {
                                        /* free pages cache */
        struct region_info free_regions[MALLOC_MAXCACHE];
                                        /* delayed free chunk slots */
-       void *delayed_chunks[MALLOC_DELAYED_CHUNKS + 1];
+       void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
        u_short chunk_start;
 #ifdef MALLOC_STATS
        size_t inserts;
@@ -191,9 +191,9 @@ static int  malloc_active;          /* status of 
 static size_t  malloc_guarded;         /* bytes used for guards */
 static size_t  malloc_used;            /* bytes allocated */
 
-static size_t rnibblesused;            /* random nibbles used */
+static size_t rbytesused;              /* random bytes used */
 static u_char rbytes[512];             /* random bytes */
-static u_char getrnibble(void);
+static u_char getrbyte(void);
 
 extern char    *__progname;
 
@@ -273,18 +273,18 @@ static void
 rbytes_init(void)
 {
        arc4random_buf(rbytes, sizeof(rbytes));
-       rnibblesused = 0;
+       rbytesused = 0;
 }
 
 static inline u_char
-getrnibble(void)
+getrbyte(void)
 {
        u_char x;
 
-       if (rnibblesused >= 2 * sizeof(rbytes))
+       if (rbytesused >= sizeof(rbytes))
                rbytes_init();
-       x = rbytes[rnibblesused++ / 2];
-       return (rnibblesused & 1 ? x & 0xf : x >> 4);
+       x = rbytes[rbytesused++];
+       return x;
 }
 
 /*
@@ -317,7 +317,7 @@ unmap(struct dir_info *d, void *p, size_
        rsz = mopts.malloc_cache - d->free_regions_size;
        if (psz > rsz)
                tounmap = psz - rsz;
-       offset = getrnibble() + (getrnibble() << 4);
+       offset = getrbyte();
        for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
                r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
                if (r->p != NULL) {
@@ -398,7 +398,7 @@ map(struct dir_info *d, size_t sz, int z
                /* zero fill not needed */
                return p;
        }
-       offset = getrnibble() + (getrnibble() << 4);
+       offset = getrbyte();
        for (i = 0; i < mopts.malloc_cache; i++) {
                r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
                if (r->p != NULL) {
@@ -920,7 +920,7 @@ malloc_bytes(struct dir_info *d, size_t 
 
        i = d->chunk_start;
        if (bp->free > 1)
-               i += getrnibble();
+               i += getrbyte();
        if (i >= bp->total)
                i &= bp->total - 1;
        for (;;) {
@@ -1200,7 +1200,7 @@ ofree(void *p)
                if (mopts.malloc_junk && sz > 0)
                        memset(p, SOME_FREEJUNK, sz);
                if (!mopts.malloc_freenow) {
-                       i = getrnibble();
+                       i = getrbyte() & MALLOC_DELAYED_CHUNK_MASK;
                        tmp = p;
                        p = g_pool->delayed_chunks[i];
                        g_pool->delayed_chunks[i] = tmp;

Reply via email to