Author: jasone
Date: Thu May 10 18:29:40 2012
New Revision: 235238
URL: http://svn.freebsd.org/changeset/base/235238

Log:
  Import jemalloc 37b6f95dcd866f51c91488531a2efc3ed4c2b754 (dev branch,
  prior to 3.0.0 release).  This version is likely very close to what will be
  3.0.0.

Modified:
  head/contrib/jemalloc/ChangeLog
  head/contrib/jemalloc/FREEBSD-Xlist
  head/contrib/jemalloc/FREEBSD-diffs
  head/contrib/jemalloc/VERSION
  head/contrib/jemalloc/doc/jemalloc.3
  head/contrib/jemalloc/include/jemalloc/internal/arena.h
  head/contrib/jemalloc/include/jemalloc/internal/atomic.h
  head/contrib/jemalloc/include/jemalloc/internal/ctl.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
  head/contrib/jemalloc/include/jemalloc/internal/mutex.h
  head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
  head/contrib/jemalloc/include/jemalloc/internal/prof.h
  head/contrib/jemalloc/include/jemalloc/internal/tcache.h
  head/contrib/jemalloc/include/jemalloc/internal/tsd.h
  head/contrib/jemalloc/include/jemalloc/internal/util.h
  head/contrib/jemalloc/include/jemalloc/jemalloc.h
  head/contrib/jemalloc/include/jemalloc/jemalloc_defs.h
  head/contrib/jemalloc/src/arena.c
  head/contrib/jemalloc/src/chunk.c
  head/contrib/jemalloc/src/chunk_mmap.c
  head/contrib/jemalloc/src/ctl.c
  head/contrib/jemalloc/src/huge.c
  head/contrib/jemalloc/src/jemalloc.c
  head/contrib/jemalloc/src/mutex.c
  head/contrib/jemalloc/src/prof.c
  head/contrib/jemalloc/src/quarantine.c
  head/contrib/jemalloc/src/stats.c
  head/contrib/jemalloc/src/tcache.c
  head/contrib/jemalloc/src/tsd.c
  head/contrib/jemalloc/src/util.c

Modified: head/contrib/jemalloc/ChangeLog
==============================================================================
--- head/contrib/jemalloc/ChangeLog     Thu May 10 18:25:59 2012        
(r235237)
+++ head/contrib/jemalloc/ChangeLog     Thu May 10 18:29:40 2012        
(r235238)
@@ -19,9 +19,10 @@ found in the git revision history:
 
   New features:
   - Implement Valgrind support, redzones, and quarantine.
-  - Add support for additional operating systems:
+  - Add support for additional platforms:
     + FreeBSD
     + Mac OS X Lion
+    + MinGW
   - Add support for additional architectures:
     + MIPS
     + SH4
@@ -64,18 +65,24 @@ found in the git revision history:
   - Remove the --enable-sysv configure option.
 
   Bug fixes:
-  - Fix fork-related bugs that could cause deadlock in children between fork
-    and exec.
   - Fix a statistics-related bug in the "thread.arena" mallctl that could cause
     invalid statistics and crashes.
-  - Work around TLS dallocation via free() on Linux.  This bug could cause
+  - Work around TLS deallocation via free() on Linux.  This bug could cause
     write-after-free memory corruption.
+  - Fix a potential deadlock that could occur during interval- and
+    growth-triggered heap profile dumps.
   - Fix chunk_alloc_dss() to stop claiming memory is zeroed.  This bug could
     cause memory corruption and crashes with --enable-dss specified.
+  - Fix fork-related bugs that could cause deadlock in children between fork
+    and exec.
   - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
   - Fix realloc(p, 0) to act like free(p).
   - Do not enforce minimum alignment in memalign().
   - Check for NULL pointer in malloc_usable_size().
+  - Fix an off-by-one heap profile statistics bug that could be observed in
+    interval- and growth-triggered heap profiles.
+  - Fix the "epoch" mallctl to update cached stats even if the passed in epoch
+    is 0.
   - Fix bin->runcur management to fix a layout policy bug.  This bug did not
     affect correctness.
   - Fix a bug in choose_arena_hard() that potentially caused more arenas to be

Modified: head/contrib/jemalloc/FREEBSD-Xlist
==============================================================================
--- head/contrib/jemalloc/FREEBSD-Xlist Thu May 10 18:25:59 2012        
(r235237)
+++ head/contrib/jemalloc/FREEBSD-Xlist Thu May 10 18:29:40 2012        
(r235238)
@@ -18,6 +18,7 @@ include/jemalloc/internal/jemalloc_inter
 include/jemalloc/internal/size_classes.sh
 include/jemalloc/jemalloc.h.in
 include/jemalloc/jemalloc_defs.h.in
+include/msvc_compat/
 install-sh
 src/zone.c
 test/

Modified: head/contrib/jemalloc/FREEBSD-diffs
==============================================================================
--- head/contrib/jemalloc/FREEBSD-diffs Thu May 10 18:25:59 2012        
(r235237)
+++ head/contrib/jemalloc/FREEBSD-diffs Thu May 10 18:29:40 2012        
(r235238)
@@ -1,5 +1,5 @@
 diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index e8a5722..cec85b5 100644
+index 93c16dc..b5c5595 100644
 --- a/doc/jemalloc.xml.in
 +++ b/doc/jemalloc.xml.in
 @@ -51,12 +51,23 @@
@@ -27,7 +27,7 @@ index e8a5722..cec85b5 100644
        <refsect2>
          <title>Standard API</title>
          <funcprototype>
-@@ -2091,4 +2102,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+@@ -2101,4 +2112,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
      <para>The <function>posix_memalign<parameter/></function> function 
conforms
      to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
    </refsect1>
@@ -45,7 +45,7 @@ index e8a5722..cec85b5 100644
 +  </refsect1>
  </refentry>
 diff --git a/include/jemalloc/internal/jemalloc_internal.h.in 
b/include/jemalloc/internal/jemalloc_internal.h.in
-index b61abe8..edbb437 100644
+index 268cd14..cfb1fb9 100644
 --- a/include/jemalloc/internal/jemalloc_internal.h.in
 +++ b/include/jemalloc/internal/jemalloc_internal.h.in
 @@ -1,5 +1,8 @@
@@ -54,12 +54,12 @@ index b61abe8..edbb437 100644
 +#include "libc_private.h"
 +#include "namespace.h"
 +
- #include <sys/mman.h>
- #include <sys/param.h>
- #include <sys/syscall.h>
-@@ -35,6 +38,9 @@
- #include <pthread.h>
  #include <math.h>
+ #ifdef _WIN32
+ #  include <windows.h>
+@@ -54,6 +57,9 @@ typedef intptr_t ssize_t;
+ #endif
+ #include <fcntl.h>
  
 +#include "un-namespace.h"
 +#include "libc_private.h"
@@ -68,10 +68,10 @@ index b61abe8..edbb437 100644
  #include "../jemalloc@install_suffix@.h"
  
 diff --git a/include/jemalloc/internal/mutex.h 
b/include/jemalloc/internal/mutex.h
-index 8837ef5..d7133f4 100644
+index de44e14..564d604 100644
 --- a/include/jemalloc/internal/mutex.h
 +++ b/include/jemalloc/internal/mutex.h
-@@ -39,9 +39,6 @@ struct malloc_mutex_s {
+@@ -43,9 +43,6 @@ struct malloc_mutex_s {
  
  #ifdef JEMALLOC_LAZY_LOCK
  extern bool isthreaded;
@@ -82,10 +82,10 @@ index 8837ef5..d7133f4 100644
  
  bool  malloc_mutex_init(malloc_mutex_t *mutex);
 diff --git a/include/jemalloc/internal/private_namespace.h 
b/include/jemalloc/internal/private_namespace.h
-index bb1b63e..00eb169 100644
+index b816647..b8ce6b1 100644
 --- a/include/jemalloc/internal/private_namespace.h
 +++ b/include/jemalloc/internal/private_namespace.h
-@@ -165,7 +165,6 @@
+@@ -186,7 +186,6 @@
  #define       iqalloc JEMALLOC_N(iqalloc)
  #define       iralloc JEMALLOC_N(iralloc)
  #define       isalloc JEMALLOC_N(isalloc)
@@ -94,7 +94,7 @@ index bb1b63e..00eb169 100644
  #define       jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
  #define       jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
 diff --git a/include/jemalloc/jemalloc.h.in b/include/jemalloc/jemalloc.h.in
-index f0581db..f26d8bc 100644
+index ad06948..505dd38 100644
 --- a/include/jemalloc/jemalloc.h.in
 +++ b/include/jemalloc/jemalloc.h.in
 @@ -15,6 +15,7 @@ extern "C" {
@@ -107,10 +107,10 @@ index f0581db..f26d8bc 100644
  #define       ALLOCM_LG_ALIGN(la)     (la)
 diff --git a/include/jemalloc/jemalloc_FreeBSD.h 
b/include/jemalloc/jemalloc_FreeBSD.h
 new file mode 100644
-index 0000000..2c5797f
+index 0000000..9efab93
 --- /dev/null
 +++ b/include/jemalloc/jemalloc_FreeBSD.h
-@@ -0,0 +1,76 @@
+@@ -0,0 +1,80 @@
 +/*
 + * Override settings that were generated in jemalloc_defs.h as necessary.
 + */
@@ -154,8 +154,12 @@ index 0000000..2c5797f
 +#  define LG_SIZEOF_PTR               2
 +#endif
 +#ifdef __mips__
++#ifdef __mips_n64
++#  define LG_SIZEOF_PTR               3
++#else
 +#  define LG_SIZEOF_PTR               2
 +#endif
++#endif
 +#ifdef __powerpc64__
 +#  define LG_SIZEOF_PTR               3
 +#elif defined(__powerpc__)
@@ -188,20 +192,21 @@ index 0000000..2c5797f
 +#define       pthread_mutex_lock      _pthread_mutex_lock
 +#define       pthread_mutex_unlock    _pthread_mutex_unlock
 diff --git a/src/jemalloc.c b/src/jemalloc.c
-index f9c8916..8e24a5a 100644
+index d42e91d..cdf6222 100644
 --- a/src/jemalloc.c
 +++ b/src/jemalloc.c
-@@ -8,6 +8,9 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
+@@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
  malloc_tsd_data(, thread_allocated, thread_allocated_t,
      THREAD_ALLOCATED_INITIALIZER)
  
-+const char    *__malloc_options_1_0;
++/* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
++const char    *__malloc_options_1_0 = NULL;
 +__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
 +
  /* Runtime configuration options. */
- const char    *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
+ const char    *je_malloc_conf;
  #ifdef JEMALLOC_DEBUG
-@@ -401,7 +404,8 @@ malloc_conf_init(void)
+@@ -429,7 +433,8 @@ malloc_conf_init(void)
  #endif
                            ;
  
@@ -212,10 +217,10 @@ index f9c8916..8e24a5a 100644
                                 * Do nothing; opts is already initialized to
                                 * the value of the MALLOC_CONF environment
 diff --git a/src/mutex.c b/src/mutex.c
-index 4b8ce57..7be5fc9 100644
+index 37a843e..4a90a05 100644
 --- a/src/mutex.c
 +++ b/src/mutex.c
-@@ -63,6 +63,17 @@ pthread_create(pthread_t *__restrict thread,
+@@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
  #ifdef JEMALLOC_MUTEX_INIT_CB
  int   _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
      void *(calloc_cb)(size_t, size_t));
@@ -234,14 +239,14 @@ index 4b8ce57..7be5fc9 100644
  
  bool
 diff --git a/src/util.c b/src/util.c
-index 99ae26d..b80676c 100644
+index 9b73c3e..f94799f 100644
 --- a/src/util.c
 +++ b/src/util.c
-@@ -60,6 +60,22 @@ wrtmessage(void *cbopaque, const char *s)
- void  (*je_malloc_message)(void *, const char *s)
-     JEMALLOC_ATTR(visibility("default")) = wrtmessage;
+@@ -58,6 +58,22 @@ wrtmessage(void *cbopaque, const char *s)
+ 
+ JEMALLOC_EXPORT void  (*je_malloc_message)(void *, const char *s);
  
-+JEMALLOC_CATTR(visibility("hidden"), static)
++JEMALLOC_ATTR(visibility("hidden"))
 +void
 +wrtmessage_1_0(const char *s1, const char *s2, const char *s3,
 +    const char *s4)
@@ -258,5 +263,5 @@ index 99ae26d..b80676c 100644
 +__sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0);
 +
  /*
-  * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
-  * provide a wrapper.
+  * Wrapper around malloc_message() that avoids the need for
+  * je_malloc_message(...) throughout the code.

Modified: head/contrib/jemalloc/VERSION
==============================================================================
--- head/contrib/jemalloc/VERSION       Thu May 10 18:25:59 2012        
(r235237)
+++ head/contrib/jemalloc/VERSION       Thu May 10 18:29:40 2012        
(r235238)
@@ -1 +1 @@
-1.0.0-286-ga8f8d7540d66ddee7337db80c92890916e1063ca
+1.0.0-335-g37b6f95dcd866f51c91488531a2efc3ed4c2b754

Modified: head/contrib/jemalloc/doc/jemalloc.3
==============================================================================
--- head/contrib/jemalloc/doc/jemalloc.3        Thu May 10 18:25:59 2012        
(r235237)
+++ head/contrib/jemalloc/doc/jemalloc.3        Thu May 10 18:29:40 2012        
(r235238)
@@ -2,12 +2,12 @@
 .\"     Title: JEMALLOC
 .\"    Author: Jason Evans
 .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\"      Date: 04/21/2012
+.\"      Date: 05/09/2012
 .\"    Manual: User Manual
-.\"    Source: jemalloc 1.0.0-286-ga8f8d7540d66ddee7337db80c92890916e1063ca
+.\"    Source: jemalloc 1.0.0-335-g37b6f95dcd866f51c91488531a2efc3ed4c2b754
 .\"  Language: English
 .\"
-.TH "JEMALLOC" "3" "04/21/2012" "jemalloc 1.0.0-286-ga8f8d7540d" "User Manual"
+.TH "JEMALLOC" "3" "05/09/2012" "jemalloc 1.0.0-335-g37b6f95dcd" "User Manual"
 .\" -----------------------------------------------------------------
 .\" * Define some portability stuff
 .\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
 jemalloc \- general purpose memory allocation functions
 .SH "LIBRARY"
 .PP
-This manual describes jemalloc 
1\&.0\&.0\-286\-ga8f8d7540d66ddee7337db80c92890916e1063ca\&. More information 
can be found at the
+This manual describes jemalloc 
1\&.0\&.0\-335\-g37b6f95dcd866f51c91488531a2efc3ed4c2b754\&. More information 
can be found at the
 \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
 .PP
 The following configuration options are enabled in libc\*(Aqs built\-in 
jemalloc:
@@ -567,6 +567,12 @@ was specified during build configuration
 was specified during build configuration\&.
 .RE
 .PP
+"config\&.mremap" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-mremap\fR
+was specified during build configuration\&.
+.RE
+.PP
 "config\&.munmap" (\fBbool\fR) r\-
 .RS 4
 \fB\-\-enable\-munmap\fR
@@ -1462,7 +1468,7 @@ jemalloc website
 .IP " 2." 4
 Valgrind
 .RS 4
-\%http://http://valgrind.org/
+\%http://valgrind.org/
 .RE
 .IP " 3." 4
 gperftools package

Modified: head/contrib/jemalloc/include/jemalloc/internal/arena.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/arena.h     Thu May 10 
18:25:59 2012        (r235237)
+++ head/contrib/jemalloc/include/jemalloc/internal/arena.h     Thu May 10 
18:29:40 2012        (r235238)
@@ -109,7 +109,8 @@ struct arena_chunk_map_s {
         *
         * p : run page offset
         * s : run size
-        * c : (binind+1) for size class (used only if prof_promote is true)
+        * n : binind for size class; large objects set these to BININD_INVALID
+        *     except for promoted allocations (see prof_promote)
         * x : don't care
         * - : 0
         * + : 1
@@ -117,35 +118,38 @@ struct arena_chunk_map_s {
         * [dula] : bit unset
         *
         *   Unallocated (clean):
-        *     ssssssss ssssssss ssss---- ----du-a
-        *     xxxxxxxx xxxxxxxx xxxx---- -----Uxx
-        *     ssssssss ssssssss ssss---- ----dU-a
+        *     ssssssss ssssssss ssss1111 1111du-a
+        *     xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
+        *     ssssssss ssssssss ssss1111 1111dU-a
         *
         *   Unallocated (dirty):
-        *     ssssssss ssssssss ssss---- ----D--a
-        *     xxxxxxxx xxxxxxxx xxxx---- ----xxxx
-        *     ssssssss ssssssss ssss---- ----D--a
+        *     ssssssss ssssssss ssss1111 1111D--a
+        *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+        *     ssssssss ssssssss ssss1111 1111D--a
         *
         *   Small:
-        *     pppppppp pppppppp pppp---- ----d--A
-        *     pppppppp pppppppp pppp---- -------A
-        *     pppppppp pppppppp pppp---- ----d--A
+        *     pppppppp pppppppp ppppnnnn nnnnd--A
+        *     pppppppp pppppppp ppppnnnn nnnn---A
+        *     pppppppp pppppppp ppppnnnn nnnnd--A
         *
         *   Large:
-        *     ssssssss ssssssss ssss---- ----D-LA
-        *     xxxxxxxx xxxxxxxx xxxx---- ----xxxx
-        *     -------- -------- -------- ----D-LA
+        *     ssssssss ssssssss ssss1111 1111D-LA
+        *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+        *     -------- -------- ----1111 1111D-LA
         *
         *   Large (sampled, size <= PAGE):
-        *     ssssssss ssssssss sssscccc ccccD-LA
+        *     ssssssss ssssssss ssssnnnn nnnnD-LA
         *
         *   Large (not sampled, size == PAGE):
-        *     ssssssss ssssssss ssss---- ----D-LA
+        *     ssssssss ssssssss ssss1111 1111D-LA
         */
        size_t                          bits;
-#define        CHUNK_MAP_CLASS_SHIFT   4
-#define        CHUNK_MAP_CLASS_MASK    ((size_t)0xff0U)
-#define        CHUNK_MAP_FLAGS_MASK    ((size_t)0xfU)
+#define        CHUNK_MAP_BININD_SHIFT  4
+#define        BININD_INVALID          ((size_t)0xffU)
+/*     CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
+#define        CHUNK_MAP_BININD_MASK   ((size_t)0xff0U)
+#define        CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
+#define        CHUNK_MAP_FLAGS_MASK    ((size_t)0xcU)
 #define        CHUNK_MAP_DIRTY         ((size_t)0x8U)
 #define        CHUNK_MAP_UNZEROED      ((size_t)0x4U)
 #define        CHUNK_MAP_LARGE         ((size_t)0x2U)
@@ -409,8 +413,14 @@ void       *arena_malloc_small(arena_t *arena,
 void   *arena_malloc_large(arena_t *arena, size_t size, bool zero);
 void   *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
 void   arena_prof_promoted(const void *ptr, size_t size);
-void   arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+void   arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     arena_chunk_map_t *mapelm);
+void   arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t pageind, arena_chunk_map_t *mapelm);
+void   arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t pageind);
+void   arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr);
 void   arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
 void   arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
     arena_stats_t *astats, malloc_bin_stats_t *bstats,
@@ -430,6 +440,31 @@ void       arena_postfork_child(arena_t *arena
 #ifdef JEMALLOC_H_INLINES
 
 #ifndef JEMALLOC_ENABLE_INLINE
+arena_chunk_map_t      *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
+size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
+    size_t pageind);
+size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
+void   arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
+    size_t size, size_t flags);
+void   arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
+    size_t size);
+void   arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
+    size_t size, size_t flags);
+void   arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
+    size_t binind);
+void   arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
+    size_t runind, size_t binind, size_t flags);
+void   arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
+    size_t unzeroed);
+size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
 size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
 unsigned       arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
     const void *ptr);
@@ -442,6 +477,227 @@ void      arena_dalloc(arena_t *arena, arena_
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
+#  ifdef JEMALLOC_ARENA_INLINE_A
+JEMALLOC_INLINE arena_chunk_map_t *
+arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
+{
+
+       assert(pageind >= map_bias);
+       assert(pageind < chunk_npages);
+
+       return (&chunk->map[pageind-map_bias]);
+}
+
+JEMALLOC_INLINE size_t *
+arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
+{
+
+       return (&arena_mapp_get(chunk, pageind)->bits);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
+{
+
+       return (*arena_mapbitsp_get(chunk, pageind));
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
+{
+       size_t mapbits;
+
+       mapbits = arena_mapbits_get(chunk, pageind);
+       assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+       return (mapbits & ~PAGE_MASK);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
+{
+       size_t mapbits;
+
+       mapbits = arena_mapbits_get(chunk, pageind);
+       assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
+           (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
+       return (mapbits & ~PAGE_MASK);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
+{
+       size_t mapbits;
+
+       mapbits = arena_mapbits_get(chunk, pageind);
+       assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
+           CHUNK_MAP_ALLOCATED);
+       return (mapbits >> LG_PAGE);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
+{
+       size_t mapbits;
+       size_t binind;
+
+       mapbits = arena_mapbits_get(chunk, pageind);
+       binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
+       assert(binind < NBINS || binind == BININD_INVALID);
+       return (binind);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
+{
+       size_t mapbits;
+
+       mapbits = arena_mapbits_get(chunk, pageind);
+       return (mapbits & CHUNK_MAP_DIRTY);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
+{
+       size_t mapbits;
+
+       mapbits = arena_mapbits_get(chunk, pageind);
+       return (mapbits & CHUNK_MAP_UNZEROED);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
+{
+       size_t mapbits;
+
+       mapbits = arena_mapbits_get(chunk, pageind);
+       return (mapbits & CHUNK_MAP_LARGE);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
+{
+       size_t mapbits;
+
+       mapbits = arena_mapbits_get(chunk, pageind);
+       return (mapbits & CHUNK_MAP_ALLOCATED);
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t 
size,
+    size_t flags)
+{
+       size_t *mapbitsp;
+
+       mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       assert((size & PAGE_MASK) == 0);
+       assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
+       *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
+    size_t size)
+{
+       size_t *mapbitsp;
+
+       mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       assert((size & PAGE_MASK) == 0);
+       assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+       *mapbitsp = size | (*mapbitsp & PAGE_MASK);
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
+    size_t flags)
+{
+       size_t *mapbitsp;
+
+       mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       assert((size & PAGE_MASK) == 0);
+       assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
+       *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
+           CHUNK_MAP_ALLOCATED;
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
+    size_t binind)
+{
+       size_t *mapbitsp;
+
+       assert(binind <= BININD_INVALID);
+       mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
+       *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
+           CHUNK_MAP_BININD_SHIFT);
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
+    size_t binind, size_t flags)
+{
+       size_t *mapbitsp;
+
+       assert(binind < BININD_INVALID);
+       mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       assert(pageind - runind >= map_bias);
+       assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
+       *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
+           flags | CHUNK_MAP_ALLOCATED;
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
+    size_t unzeroed)
+{
+       size_t *mapbitsp;
+
+       mapbitsp = arena_mapbitsp_get(chunk, pageind);
+       *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
+}
+
+JEMALLOC_INLINE size_t
+arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
+{
+       size_t binind;
+
+       binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
+
+       if (config_debug) {
+               arena_chunk_t *chunk;
+               arena_t *arena;
+               size_t pageind;
+               size_t actual_mapbits;
+               arena_run_t *run;
+               arena_bin_t *bin;
+               size_t actual_binind;
+               arena_bin_info_t *bin_info;
+
+               assert(binind != BININD_INVALID);
+               assert(binind < NBINS);
+               chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+               arena = chunk->arena;
+               pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+               actual_mapbits = arena_mapbits_get(chunk, pageind);
+               assert(mapbits == actual_mapbits);
+               assert(arena_mapbits_large_get(chunk, pageind) == 0);
+               assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+               run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
+                   (actual_mapbits >> LG_PAGE)) << LG_PAGE));
+               bin = run->bin;
+               actual_binind = bin - arena->bins;
+               assert(binind == actual_binind);
+               bin_info = &arena_bin_info[actual_binind];
+               assert(((uintptr_t)ptr - ((uintptr_t)run +
+                   (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
+                   == 0);
+       }
+
+       return (binind);
+}
+#  endif /* JEMALLOC_ARENA_INLINE_A */
+
+#  ifdef JEMALLOC_ARENA_INLINE_B
 JEMALLOC_INLINE size_t
 arena_bin_index(arena_t *arena, arena_bin_t *bin)
 {
@@ -535,7 +791,7 @@ arena_prof_ctx_get(const void *ptr)
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
        pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       mapbits = chunk->map[pageind-map_bias].bits;
+       mapbits = arena_mapbits_get(chunk, pageind);
        assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
        if ((mapbits & CHUNK_MAP_LARGE) == 0) {
                if (prof_promote)
@@ -544,7 +800,8 @@ arena_prof_ctx_get(const void *ptr)
                        arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
                            (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
                            LG_PAGE));
-                       size_t binind = arena_bin_index(chunk->arena, run->bin);
+                       size_t binind = arena_ptr_small_binind_get(ptr,
+                           mapbits);
                        arena_bin_info_t *bin_info = &arena_bin_info[binind];
                        unsigned regind;
 
@@ -554,7 +811,7 @@ arena_prof_ctx_get(const void *ptr)
                            sizeof(prof_ctx_t *)));
                }
        } else
-               ret = chunk->map[pageind-map_bias].prof_ctx;
+               ret = arena_mapp_get(chunk, pageind)->prof_ctx;
 
        return (ret);
 }
@@ -571,19 +828,18 @@ arena_prof_ctx_set(const void *ptr, prof
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
        pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       mapbits = chunk->map[pageind-map_bias].bits;
+       mapbits = arena_mapbits_get(chunk, pageind);
        assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
        if ((mapbits & CHUNK_MAP_LARGE) == 0) {
                if (prof_promote == false) {
                        arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
                            (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
                            LG_PAGE));
-                       arena_bin_t *bin = run->bin;
                        size_t binind;
                        arena_bin_info_t *bin_info;
                        unsigned regind;
 
-                       binind = arena_bin_index(chunk->arena, bin);
+                       binind = arena_ptr_small_binind_get(ptr, mapbits);
                        bin_info = &arena_bin_info[binind];
                        regind = arena_run_regind(run, bin_info, ptr);
 
@@ -592,7 +848,7 @@ arena_prof_ctx_set(const void *ptr, prof
                } else
                        assert((uintptr_t)ctx == (uintptr_t)1U);
        } else
-               chunk->map[pageind-map_bias].prof_ctx = ctx;
+               arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
 }
 
 JEMALLOC_INLINE void *
@@ -631,35 +887,42 @@ arena_salloc(const void *ptr, bool demot
 {
        size_t ret;
        arena_chunk_t *chunk;
-       size_t pageind, mapbits;
+       size_t pageind, binind;
 
        assert(ptr != NULL);
        assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
        pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       mapbits = chunk->map[pageind-map_bias].bits;
-       assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
-       if ((mapbits & CHUNK_MAP_LARGE) == 0) {
-               arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
-                   (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
-               size_t binind = arena_bin_index(chunk->arena, run->bin);
-               arena_bin_info_t *bin_info = &arena_bin_info[binind];
-               assert(((uintptr_t)ptr - ((uintptr_t)run +
-                   (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
-                   == 0);
-               ret = bin_info->reg_size;
-       } else {
+       assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+       binind = arena_mapbits_binind_get(chunk, pageind);
+       if (binind == BININD_INVALID || (config_prof && demote == false &&
+           prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) {
+               /*
+                * Large allocation.  In the common case (demote == true), and
+                * as this is an inline function, most callers will only end up
+                * looking at binind to determine that ptr is a small
+                * allocation.
+                */
                assert(((uintptr_t)ptr & PAGE_MASK) == 0);
-               ret = mapbits & ~PAGE_MASK;
-               if (config_prof && demote && prof_promote && ret == PAGE &&
-                   (mapbits & CHUNK_MAP_CLASS_MASK) != 0) {
-                       size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
-                           CHUNK_MAP_CLASS_SHIFT) - 1;
-                       assert(binind < NBINS);
-                       ret = arena_bin_info[binind].reg_size;
-               }
+               ret = arena_mapbits_large_size_get(chunk, pageind);
                assert(ret != 0);
+               assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
+               assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
+                   pageind+(ret>>LG_PAGE)-1) == 0);
+               assert(binind == arena_mapbits_binind_get(chunk,
+                   pageind+(ret>>LG_PAGE)-1));
+               assert(arena_mapbits_dirty_get(chunk, pageind) ==
+                   arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
+       } else {
+               /*
+                * Small allocation (possibly promoted to a large object due to
+                * prof_promote).
+                */
+               assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
+                   arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
+                   pageind)) == binind);
+               ret = arena_bin_info[binind].reg_size;
        }
 
        return (ret);
@@ -668,8 +931,7 @@ arena_salloc(const void *ptr, bool demot
 JEMALLOC_INLINE void
 arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
 {
-       size_t pageind;
-       arena_chunk_map_t *mapelm;
+       size_t pageind, mapbits;
        tcache_t *tcache;
 
        assert(arena != NULL);
@@ -678,47 +940,30 @@ arena_dalloc(arena_t *arena, arena_chunk
        assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
        pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-       mapelm = &chunk->map[pageind-map_bias];
-       assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
-       if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
+       mapbits = arena_mapbits_get(chunk, pageind);
+       assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+       if ((mapbits & CHUNK_MAP_LARGE) == 0) {
                /* Small allocation. */
-               if (try_tcache && (tcache = tcache_get(false)) != NULL)
-                       tcache_dalloc_small(tcache, ptr);
-               else {
-                       arena_run_t *run;
-                       arena_bin_t *bin;
+               if (try_tcache && (tcache = tcache_get(false)) != NULL) {
+                       size_t binind;
 
-                       run = (arena_run_t *)((uintptr_t)chunk +
-                           (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
-                           LG_PAGE));
-                       bin = run->bin;
-                       if (config_debug) {
-                               size_t binind = arena_bin_index(arena, bin);
-                               UNUSED arena_bin_info_t *bin_info =
-                                   &arena_bin_info[binind];
-                               assert(((uintptr_t)ptr - ((uintptr_t)run +
-                                   (uintptr_t)bin_info->reg0_offset)) %
-                                   bin_info->reg_interval == 0);
-                       }
-                       malloc_mutex_lock(&bin->lock);
-                       arena_dalloc_bin(arena, chunk, ptr, mapelm);
-                       malloc_mutex_unlock(&bin->lock);
-               }
+                       binind = arena_ptr_small_binind_get(ptr, mapbits);
+                       tcache_dalloc_small(tcache, ptr, binind);
+               } else
+                       arena_dalloc_small(arena, chunk, ptr, pageind);
        } else {
-               size_t size = mapelm->bits & ~PAGE_MASK;
+               size_t size = arena_mapbits_large_size_get(chunk, pageind);
 
                assert(((uintptr_t)ptr & PAGE_MASK) == 0);
 
                if (try_tcache && size <= tcache_maxclass && (tcache =
                    tcache_get(false)) != NULL) {
                        tcache_dalloc_large(tcache, ptr, size);
-               } else {
-                       malloc_mutex_lock(&arena->lock);
+               } else
                        arena_dalloc_large(arena, chunk, ptr);
-                       malloc_mutex_unlock(&arena->lock);
-               }
        }
 }
+#  endif /* JEMALLOC_ARENA_INLINE_B */
 #endif
 
 #endif /* JEMALLOC_H_INLINES */

Modified: head/contrib/jemalloc/include/jemalloc/internal/atomic.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/atomic.h    Thu May 10 
18:25:59 2012        (r235237)
+++ head/contrib/jemalloc/include/jemalloc/internal/atomic.h    Thu May 10 
18:29:40 2012        (r235238)
@@ -47,6 +47,20 @@ atomic_sub_uint64(uint64_t *p, uint64_t 
 
        return (__sync_sub_and_fetch(p, x));
 }
+#elif (defined(_MSC_VER))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+
+       return (InterlockedExchangeAdd64(p, x));
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+
+       return (InterlockedExchangeAdd64(p, -((int64_t)x)));
+}
 #elif (defined(JEMALLOC_OSATOMIC))
 JEMALLOC_INLINE uint64_t
 atomic_add_uint64(uint64_t *p, uint64_t x)
@@ -145,6 +159,20 @@ atomic_sub_uint32(uint32_t *p, uint32_t 
 
        return (__sync_sub_and_fetch(p, x));
 }
+#elif (defined(_MSC_VER))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+
+       return (InterlockedExchangeAdd(p, x));
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+
+       return (InterlockedExchangeAdd(p, -((int32_t)x)));
+}
 #elif (defined(JEMALLOC_OSATOMIC))
 JEMALLOC_INLINE uint32_t
 atomic_add_uint32(uint32_t *p, uint32_t x)

Modified: head/contrib/jemalloc/include/jemalloc/internal/ctl.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/ctl.h       Thu May 10 
18:25:59 2012        (r235237)
+++ head/contrib/jemalloc/include/jemalloc/internal/ctl.h       Thu May 10 
18:29:40 2012        (r235238)
@@ -2,6 +2,8 @@
 #ifdef JEMALLOC_H_TYPES
 
 typedef struct ctl_node_s ctl_node_t;
+typedef struct ctl_named_node_s ctl_named_node_t;
+typedef struct ctl_indexed_node_s ctl_indexed_node_t;
 typedef struct ctl_arena_stats_s ctl_arena_stats_t;
 typedef struct ctl_stats_s ctl_stats_t;
 
@@ -11,20 +13,21 @@ typedef struct ctl_stats_s ctl_stats_t;
 
 struct ctl_node_s {
        bool                    named;
-       union {
-               struct {
-                       const char      *name;
-                       /* If (nchildren == 0), this is a terminal node. */
-                       unsigned        nchildren;
-                       const   ctl_node_t *children;
-               } named;
-               struct {
-                       const ctl_node_t *(*index)(const size_t *, size_t,
-                           size_t);
-               } indexed;
-       } u;
-       int     (*ctl)(const size_t *, size_t, void *, size_t *, void *,
-           size_t);
+};
+
+struct ctl_named_node_s {
+       struct ctl_node_s       node;
+       const char              *name;
+       /* If (nchildren == 0), this is a terminal node. */
+       unsigned                nchildren;
+       const                   ctl_node_t *children;
+       int                     (*ctl)(const size_t *, size_t, void *, size_t *,
+           void *, size_t);
+};
+
+struct ctl_indexed_node_s {
+       struct ctl_node_s       node;
+       const ctl_named_node_t  *(*index)(const size_t *, size_t, size_t);
 };
 
 struct ctl_arena_stats_s {

Modified: head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Thu May 
10 18:25:59 2012        (r235237)
+++ head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Thu May 
10 18:29:40 2012        (r235238)
@@ -3,23 +3,34 @@
 #include "libc_private.h"
 #include "namespace.h"
 
-#include <sys/mman.h>
-#include <sys/param.h>
-#include <sys/syscall.h>
-#if !defined(SYS_write) && defined(__NR_write)
-#define        SYS_write __NR_write
+#include <math.h>
+#ifdef _WIN32
+#  include <windows.h>
+#  define ENOENT ERROR_PATH_NOT_FOUND
+#  define EINVAL ERROR_BAD_ARGUMENTS
+#  define EAGAIN ERROR_OUTOFMEMORY
+#  define EPERM  ERROR_WRITE_FAULT
+#  define EFAULT ERROR_INVALID_ADDRESS
+#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
+#  undef ERANGE
+#  define ERANGE ERROR_INVALID_DATA
+#else
+#  include <sys/param.h>
+#  include <sys/mman.h>
+#  include <sys/syscall.h>
+#  if !defined(SYS_write) && defined(__NR_write)
+#    define SYS_write __NR_write
+#  endif
+#  include <sys/uio.h>
+#  include <pthread.h>
+#  include <errno.h>
 #endif
-#include <sys/time.h>
 #include <sys/types.h>
-#include <sys/uio.h>
 
-#include <errno.h>
 #include <limits.h>
 #ifndef SIZE_T_MAX
 #  define SIZE_T_MAX   SIZE_MAX
 #endif
-#include <pthread.h>
-#include <sched.h>
 #include <stdarg.h>
 #include <stdbool.h>
 #include <stdio.h>
@@ -33,10 +44,18 @@
 #include <string.h>
 #include <strings.h>
 #include <ctype.h>
-#include <unistd.h>
+#ifdef _MSC_VER
+#  include <io.h>
+typedef intptr_t ssize_t;
+#  define PATH_MAX 1024
+#  define STDERR_FILENO 2
+#  define __func__ __FUNCTION__
+/* Disable warnings about deprecated system functions */
+#  pragma warning(disable: 4996)
+#else
+#  include <unistd.h>
+#endif
 #include <fcntl.h>
-#include <pthread.h>
-#include <math.h>
 
 #include "un-namespace.h"
 #include "libc_private.h"
@@ -110,6 +129,13 @@ static const bool config_prof_libunwind 
     false
 #endif
     ;
+static const bool config_mremap =
+#ifdef JEMALLOC_MREMAP
+    true
+#else
+    false
+#endif
+    ;
 static const bool config_munmap =
 #ifdef JEMALLOC_MUNMAP
     true
@@ -218,6 +244,9 @@ static const bool config_ivsalloc =
 #else
 #  define JEMALLOC_ENABLE_INLINE
 #  define JEMALLOC_INLINE static inline
+#  ifdef _MSC_VER
+#    define inline _inline
+#  endif
 #endif
 

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to