Author: jasone
Date: Sat Nov 10 01:46:13 2012
New Revision: 242844
URL: http://svnweb.freebsd.org/changeset/base/242844

Log:
  Import jemalloc 3.2.0.

Modified:
  head/contrib/jemalloc/ChangeLog
  head/contrib/jemalloc/FREEBSD-diffs
  head/contrib/jemalloc/VERSION
  head/contrib/jemalloc/doc/jemalloc.3
  head/contrib/jemalloc/include/jemalloc/internal/arena.h
  head/contrib/jemalloc/include/jemalloc/internal/chunk.h
  head/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
  head/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
  head/contrib/jemalloc/include/jemalloc/internal/ctl.h
  head/contrib/jemalloc/include/jemalloc/internal/extent.h
  head/contrib/jemalloc/include/jemalloc/internal/huge.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
  head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
  head/contrib/jemalloc/include/jemalloc/internal/prof.h
  head/contrib/jemalloc/include/jemalloc/internal/rtree.h
  head/contrib/jemalloc/include/jemalloc/jemalloc.h
  head/contrib/jemalloc/include/jemalloc/jemalloc_defs.h
  head/contrib/jemalloc/src/arena.c
  head/contrib/jemalloc/src/base.c
  head/contrib/jemalloc/src/chunk.c
  head/contrib/jemalloc/src/chunk_dss.c
  head/contrib/jemalloc/src/chunk_mmap.c
  head/contrib/jemalloc/src/ctl.c
  head/contrib/jemalloc/src/huge.c
  head/contrib/jemalloc/src/jemalloc.c
  head/contrib/jemalloc/src/mutex.c
  head/contrib/jemalloc/src/prof.c
  head/contrib/jemalloc/src/rtree.c
  head/contrib/jemalloc/src/stats.c
  head/contrib/jemalloc/src/tcache.c
  head/contrib/jemalloc/src/util.c

Modified: head/contrib/jemalloc/ChangeLog
==============================================================================
--- head/contrib/jemalloc/ChangeLog     Sat Nov 10 00:36:53 2012        
(r242843)
+++ head/contrib/jemalloc/ChangeLog     Sat Nov 10 01:46:13 2012        
(r242844)
@@ -6,6 +6,47 @@ found in the git revision history:
     http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
     git://canonware.com/jemalloc.git
 
+* 3.2.0 (November 9, 2012)
+
+  In addition to a couple of bug fixes, this version modifies page run
+  allocation and dirty page purging algorithms in order to better control
+  page-level virtual memory fragmentation.
+
+  Incompatible changes:
+  - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1).
+
+  Bug fixes:
+  - Fix dss/mmap allocation precedence code to use recyclable mmap memory only
+    after primary dss allocation fails.
+  - Fix deadlock in the "arenas.purge" mallctl.  This regression was introduced
+    in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
+
+* 3.1.0 (October 16, 2012)
+
+  New features:
+  - Auto-detect whether running inside Valgrind, thus removing the need to
+    manually specify MALLOC_CONF=valgrind:true.
+  - Add the "arenas.extend" mallctl, which allows applications to create
+    manually managed arenas.
+  - Add the ALLOCM_ARENA() flag for {,r,d}allocm().
+  - Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
+    which provide control over dss/mmap precedence.
+  - Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
+  - Define LG_QUANTUM for hppa.
+
+  Incompatible changes:
+  - Disable tcache by default if running inside Valgrind, in order to avoid
+    making unallocated objects appear reachable to Valgrind.
+  - Drop const from malloc_usable_size() argument on Linux.
+
+  Bug fixes:
+  - Fix heap profiling crash if sampled object is freed via realloc(p, 0).
+  - Remove const from __*_hook variable declarations, so that glibc can modify
+    them during process forking.
+  - Fix mlockall(2)/madvise(2) interaction.
+  - Fix fork(2)-related deadlocks.
+  - Fix error return value for "thread.tcache.enabled" mallctl.
+
 * 3.0.0 (May 11, 2012)
 
   Although this version adds some major new features, the primary focus is on

Modified: head/contrib/jemalloc/FREEBSD-diffs
==============================================================================
--- head/contrib/jemalloc/FREEBSD-diffs Sat Nov 10 00:36:53 2012        
(r242843)
+++ head/contrib/jemalloc/FREEBSD-diffs Sat Nov 10 01:46:13 2012        
(r242844)
@@ -1,5 +1,5 @@
 diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index 877c500..7d659a7 100644
+index 54b8747..91c4a4e 100644
 --- a/doc/jemalloc.xml.in
 +++ b/doc/jemalloc.xml.in
 @@ -51,12 +51,23 @@
@@ -27,7 +27,7 @@ index 877c500..7d659a7 100644
        <refsect2>
          <title>Standard API</title>
          <funcprototype>
-@@ -2101,4 +2112,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+@@ -2170,4 +2181,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
      <para>The <function>posix_memalign<parameter/></function> function 
conforms
      to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
    </refsect1>
@@ -45,7 +45,7 @@ index 877c500..7d659a7 100644
 +  </refsect1>
  </refentry>
 diff --git a/include/jemalloc/internal/jemalloc_internal.h.in 
b/include/jemalloc/internal/jemalloc_internal.h.in
-index 268cd14..2acd2eb 100644
+index 475821a..73306ac 100644
 --- a/include/jemalloc/internal/jemalloc_internal.h.in
 +++ b/include/jemalloc/internal/jemalloc_internal.h.in
 @@ -1,5 +1,8 @@
@@ -97,19 +97,19 @@ index de44e14..564d604 100644
  
  bool  malloc_mutex_init(malloc_mutex_t *mutex);
 diff --git a/include/jemalloc/internal/private_namespace.h 
b/include/jemalloc/internal/private_namespace.h
-index b816647..b8ce6b1 100644
+index 06241cd..7b19906 100644
 --- a/include/jemalloc/internal/private_namespace.h
 +++ b/include/jemalloc/internal/private_namespace.h
-@@ -186,7 +186,6 @@
- #define       iqalloc JEMALLOC_N(iqalloc)
+@@ -204,7 +204,6 @@
  #define       iralloc JEMALLOC_N(iralloc)
+ #define       irallocx JEMALLOC_N(irallocx)
  #define       isalloc JEMALLOC_N(isalloc)
 -#define       isthreaded JEMALLOC_N(isthreaded)
  #define       ivsalloc JEMALLOC_N(ivsalloc)
  #define       jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
  #define       jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
 diff --git a/include/jemalloc/jemalloc.h.in b/include/jemalloc/jemalloc.h.in
-index ad06948..505dd38 100644
+index 31b1304..c3ef2f5 100644
 --- a/include/jemalloc/jemalloc.h.in
 +++ b/include/jemalloc/jemalloc.h.in
 @@ -15,6 +15,7 @@ extern "C" {
@@ -122,7 +122,7 @@ index ad06948..505dd38 100644
  #define       ALLOCM_LG_ALIGN(la)     (la)
 diff --git a/include/jemalloc/jemalloc_FreeBSD.h 
b/include/jemalloc/jemalloc_FreeBSD.h
 new file mode 100644
-index 0000000..9efab93
+index 0000000..9c97a13
 --- /dev/null
 +++ b/include/jemalloc/jemalloc_FreeBSD.h
 @@ -0,0 +1,76 @@
@@ -203,7 +203,7 @@ index 0000000..9efab93
 +#define       pthread_mutex_lock      _pthread_mutex_lock
 +#define       pthread_mutex_unlock    _pthread_mutex_unlock
 diff --git a/src/jemalloc.c b/src/jemalloc.c
-index bc54cd7..fa9fcf0 100644
+index 8a667b6..aaf5012 100644
 --- a/src/jemalloc.c
 +++ b/src/jemalloc.c
 @@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
@@ -217,7 +217,7 @@ index bc54cd7..fa9fcf0 100644
  /* Runtime configuration options. */
  const char    *je_malloc_conf;
  #ifdef JEMALLOC_DEBUG
-@@ -429,7 +433,8 @@ malloc_conf_init(void)
+@@ -448,7 +452,8 @@ malloc_conf_init(void)
  #endif
                            ;
  
@@ -228,12 +228,12 @@ index bc54cd7..fa9fcf0 100644
                                 * Do nothing; opts is already initialized to
                                 * the value of the MALLOC_CONF environment
 diff --git a/src/mutex.c b/src/mutex.c
-index 37a843e..4a90a05 100644
+index 55e18c2..6b6f438 100644
 --- a/src/mutex.c
 +++ b/src/mutex.c
 @@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
  #ifdef JEMALLOC_MUTEX_INIT_CB
- int   _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ JEMALLOC_EXPORT int   _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
      void *(calloc_cb)(size_t, size_t));
 +
 +__weak_reference(_pthread_mutex_init_calloc_cb_stub,
@@ -250,7 +250,7 @@ index 37a843e..4a90a05 100644
  
  bool
 diff --git a/src/util.c b/src/util.c
-index 9b73c3e..f94799f 100644
+index b3a0114..df1c5d5 100644
 --- a/src/util.c
 +++ b/src/util.c
 @@ -58,6 +58,22 @@ wrtmessage(void *cbopaque, const char *s)

Modified: head/contrib/jemalloc/VERSION
==============================================================================
--- head/contrib/jemalloc/VERSION       Sat Nov 10 00:36:53 2012        
(r242843)
+++ head/contrib/jemalloc/VERSION       Sat Nov 10 01:46:13 2012        
(r242844)
@@ -1 +1 @@
-3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046
+3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9

Modified: head/contrib/jemalloc/doc/jemalloc.3
==============================================================================
--- head/contrib/jemalloc/doc/jemalloc.3        Sat Nov 10 00:36:53 2012        
(r242843)
+++ head/contrib/jemalloc/doc/jemalloc.3        Sat Nov 10 01:46:13 2012        
(r242844)
@@ -2,12 +2,12 @@
 .\"     Title: JEMALLOC
 .\"    Author: Jason Evans
 .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\"      Date: 05/12/2012
+.\"      Date: 11/09/2012
 .\"    Manual: User Manual
-.\"    Source: jemalloc 3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046
+.\"    Source: jemalloc 3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9
 .\"  Language: English
 .\"
-.TH "JEMALLOC" "3" "05/12/2012" "jemalloc 3.0.0-0-gfc9b1dbf69f5" "User Manual"
+.TH "JEMALLOC" "3" "11/09/2012" "jemalloc 3.2.0-0-g87499f6748eb" "User Manual"
 .\" -----------------------------------------------------------------
 .\" * Define some portability stuff
 .\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
 jemalloc \- general purpose memory allocation functions
 .SH "LIBRARY"
 .PP
-This manual describes jemalloc 
3\&.0\&.0\-0\-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046\&. More information can 
be found at the
+This manual describes jemalloc 
3\&.2\&.0\-0\-g87499f6748ebe4817571e817e9f680ccb5bf54a9\&. More information can 
be found at the
 \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
 .PP
 The following configuration options are enabled in libc\*(Aqs built\-in 
jemalloc:
@@ -310,6 +310,14 @@ Initialize newly allocated memory to con
 For reallocation, fail rather than moving the object\&. This constraint can 
apply to both growth and shrinkage\&.
 .RE
 .PP
+\fBALLOCM_ARENA(\fR\fB\fIa\fR\fR\fB) \fR
+.RS 4
+Use the arena specified by the index
+\fIa\fR\&. This macro does not validate that
+\fIa\fR
+specifies an arena in the valid range\&.
+.RE
+.PP
 The
 \fBallocm\fR\fB\fR
 function allocates at least
@@ -647,16 +655,23 @@ is specified during configuration, in wh
 Virtual memory chunk size (log base 2)\&. The default chunk size is 4 MiB 
(2^22)\&.
 .RE
 .PP
+"opt\&.dss" (\fBconst char *\fR) r\-
+.RS 4
+dss (\fBsbrk\fR(2)) allocation precedence as related to
+\fBmmap\fR(2)
+allocation\&. The following settings are supported: \(lqdisabled\(rq, 
\(lqprimary\(rq, and \(lqsecondary\(rq (default)\&.
+.RE
+.PP
 "opt\&.narenas" (\fBsize_t\fR) r\-
 .RS 4
-Maximum number of arenas to use\&. The default maximum number of arenas is 
four times the number of CPUs, or one if there is a single CPU\&.
+Maximum number of arenas to use for automatic multiplexing of threads and 
arenas\&. The default is four times the number of CPUs, or one if there is a 
single CPU\&.
 .RE
 .PP
 "opt\&.lg_dirty_mult" (\fBssize_t\fR) r\-
 .RS 4
 Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty 
unused pages may be allowed to accumulate, within the limit set by the ratio 
(or one chunk worth of dirty pages, whichever is greater), before informing the 
kernel about some of those pages via
 \fBmadvise\fR(2)
-or a similar system call\&. This provides the kernel with sufficient 
information to recycle dirty pages if physical memory becomes scarce and the 
pages remain unused\&. The default minimum ratio is 32:1 (2^5:1); an option 
value of \-1 will disable dirty page purging\&.
+or a similar system call\&. This provides the kernel with sufficient 
information to recycle dirty pages if physical memory becomes scarce and the 
pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option 
value of \-1 will disable dirty page purging\&.
 .RE
 .PP
 "opt\&.stats_print" (\fBbool\fR) r\-
@@ -676,7 +691,8 @@ Junk filling enabled/disabled\&. If enab
 0xa5\&. All deallocated memory will be initialized to
 0x5a\&. This is intended for debugging and will impact performance 
negatively\&. This option is disabled by default unless
 \fB\-\-enable\-debug\fR
-is specified during configuration, in which case it is enabled by default\&.
+is specified during configuration, in which case it is enabled by default 
unless running inside
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
 .RE
 .PP
 "opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -684,7 +700,7 @@ is specified during configuration, in wh
 Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a 
FIFO object quarantine that stores up to the specified number of bytes of 
memory\&. The quarantined memory is not freed until it is released from 
quarantine, though it is immediately junk\-filled if the
 "opt\&.junk"
 option is enabled\&. This feature is of particular use in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to 
access quarantined objects\&. This is intended for debugging and will impact 
performance negatively\&. The default quarantine size is 0\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to 
access quarantined objects\&. This is intended for debugging and will impact 
performance negatively\&. The default quarantine size is 0 unless running 
inside Valgrind, in which case the default is 16 MiB\&.
 .RE
 .PP
 "opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -692,7 +708,7 @@ option is enabled\&. This feature is of 
 Redzones enabled/disabled\&. If enabled, small allocations have redzones 
before and after them\&. Furthermore, if the
 "opt\&.junk"
 option is enabled, the redzones are checked for corruption during 
deallocation\&. However, the primary intended purpose of this feature is to be 
used in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to 
do effective buffer overflow/underflow detection\&. This option is intended for 
debugging and will impact performance negatively\&. This option is disabled by 
default\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to 
do effective buffer overflow/underflow detection\&. This option is intended for 
debugging and will impact performance negatively\&. This option is disabled by 
default unless running inside Valgrind\&.
 .RE
 .PP
 "opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -714,15 +730,7 @@ enabled/disabled\&. This option is disab
 "opt\&.valgrind" (\fBbool\fR) r\- [\fB\-\-enable\-valgrind\fR]
 .RS 4
 \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
-support enabled/disabled\&. If enabled, several other options are 
automatically modified during options processing to work well with Valgrind:
-"opt\&.junk"
-and
-"opt\&.zero"
-are set to false,
-"opt\&.quarantine"
-is set to 16 MiB, and
-"opt\&.redzone"
-is set to true\&. This option is disabled by default\&.
+support enabled/disabled\&. This option is vestigal because jemalloc 
auto\-detects whether it is running inside Valgrind\&. This option is disabled 
by default, unless running inside Valgrind\&.
 .RE
 .PP
 "opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR]
@@ -749,7 +757,8 @@ This option is disabled by default\&.
 .RS 4
 Thread\-specific caching enabled/disabled\&. When there are multiple threads, 
each thread uses a thread\-specific cache for objects up to a certain size\&. 
Thread\-specific caching allows many allocations to be satisfied without 
performing any thread synchronization, at the cost of increased memory use\&. 
See the
 "opt\&.lg_tcache_max"
-option for related tuning information\&. This option is enabled by default\&.
+option for related tuning information\&. This option is enabled by default 
unless running inside
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
 .RE
 .PP
 "opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
@@ -845,9 +854,7 @@ option for information on analyzing heap
 .PP
 "thread\&.arena" (\fBunsigned\fR) rw
 .RS 4
-Get or set the arena associated with the calling thread\&. The arena index 
must be less than the maximum number of arenas (see the
-"arenas\&.narenas"
-mallctl)\&. If the specified arena was not initialized beforehand (see the
+Get or set the arena associated with the calling thread\&. If the specified 
arena was not initialized beforehand (see the
 "arenas\&.initialized"
 mallctl), it will be automatically initialized as a side effect of calling 
this interface\&.
 .RE
@@ -891,9 +898,23 @@ Enable/disable calling thread\*(Aqs tcac
 Flush calling thread\*(Aqs tcache\&. This interface releases all cached 
objects and internal data structures associated with the calling thread\*(Aqs 
thread\-specific cache\&. Ordinarily, this interface need not be called, since 
automatic periodic incremental garbage collection occurs, and the thread cache 
is automatically discarded when a thread exits\&. However, garbage collection 
is triggered by allocation activity, so it is possible for a thread that stops 
allocating/deallocating to retain its cache indefinitely, in which case the 
developer may find manual flushing useful\&.
 .RE
 .PP
+"arena\&.<i>\&.purge" (\fBunsigned\fR) \-\-
+.RS 4
+Purge unused dirty pages for arena <i>, or for all arenas if <i> equals
+"arenas\&.narenas"\&.
+.RE
+.PP
+"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
+.RS 4
+Set the precedence of dss allocation as related to mmap allocation for arena 
<i>, or for all arenas if <i> equals
+"arenas\&.narenas"\&. See
+"opt\&.dss"
+for supported settings\&.
+.RE
+.PP
 "arenas\&.narenas" (\fBunsigned\fR) r\-
 .RS 4
-Maximum number of arenas\&.
+Current limit on number of arenas\&.
 .RE
 .PP
 "arenas\&.initialized" (\fBbool *\fR) r\-
@@ -958,6 +979,11 @@ Maximum size supported by this large siz
 Purge unused dirty pages for the specified arena, or for all arenas if none is 
specified\&.
 .RE
 .PP
+"arenas\&.extend" (\fBunsigned\fR) r\-
+.RS 4
+Extend the array of arenas by appending a new arena, and returning the new 
arena index\&.
+.RE
+.PP
 "prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
 .RS 4
 Control whether sampling is currently active\&. See the
@@ -997,7 +1023,9 @@ Total number of bytes allocated by the a
 "stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
 .RS 4
 Total number of bytes in active pages allocated by the application\&. This is 
a multiple of the page size, and greater than or equal to
-"stats\&.allocated"\&.
+"stats\&.allocated"\&. This does not include
+"stats\&.arenas\&.<i>\&.pdirty"
+and pages entirely devoted to allocator metadata\&.
 .RE
 .PP
 "stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
@@ -1036,6 +1064,15 @@ Cumulative number of huge allocation req
 Cumulative number of huge deallocation requests\&.
 .RE
 .PP
+"stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\-
+.RS 4
+dss (\fBsbrk\fR(2)) allocation precedence as related to
+\fBmmap\fR(2)
+allocation\&. See
+"opt\&.dss"
+for details\&.
+.RE
+.PP
 "stats\&.arenas\&.<i>\&.nthreads" (\fBunsigned\fR) r\-
 .RS 4
 Number of threads currently assigned to arena\&.
@@ -1197,9 +1234,7 @@ This implementation does not provide muc
 \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
 tool if the
 \fB\-\-enable\-valgrind\fR
-configuration option is enabled and the
-"opt\&.valgrind"
-option is enabled\&.
+configuration option is enabled\&.
 .SH "DIAGNOSTIC MESSAGES"
 .PP
 If any of the memory allocation/deallocation functions detect an error or 
warning condition, a message will be printed to file descriptor

Modified: head/contrib/jemalloc/include/jemalloc/internal/arena.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/arena.h     Sat Nov 10 
00:36:53 2012        (r242843)
+++ head/contrib/jemalloc/include/jemalloc/internal/arena.h     Sat Nov 10 
01:46:13 2012        (r242844)
@@ -38,10 +38,10 @@
  *
  *   (nactive >> opt_lg_dirty_mult) >= ndirty
  *
- * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
- * times as many active pages as dirty pages.
+ * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
+ * as many active pages as dirty pages.
  */
-#define        LG_DIRTY_MULT_DEFAULT   5
+#define        LG_DIRTY_MULT_DEFAULT   3
 
 typedef struct arena_chunk_map_s arena_chunk_map_t;
 typedef struct arena_chunk_s arena_chunk_t;
@@ -69,7 +69,7 @@ struct arena_chunk_map_s {
                /*
                 * Linkage for run trees.  There are two disjoint uses:
                 *
-                * 1) arena_t's runs_avail_{clean,dirty} trees.
+                * 1) arena_t's runs_avail tree.
                 * 2) arena_run_t conceptually uses this linkage for in-use
                 *    non-full runs, rather than directly embedding linkage.
                 */
@@ -162,20 +162,24 @@ typedef rb_tree(arena_chunk_map_t) arena
 /* Arena chunk header. */
 struct arena_chunk_s {
        /* Arena that owns the chunk. */
-       arena_t         *arena;
+       arena_t                 *arena;
 
-       /* Linkage for the arena's chunks_dirty list. */
-       ql_elm(arena_chunk_t) link_dirty;
-
-       /*
-        * True if the chunk is currently in the chunks_dirty list, due to
-        * having at some point contained one or more dirty pages.  Removal
-        * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
-        */
-       bool            dirtied;
+       /* Linkage for tree of arena chunks that contain dirty runs. */
+       rb_node(arena_chunk_t)  dirty_link;
 
        /* Number of dirty pages. */
-       size_t          ndirty;
+       size_t                  ndirty;
+
+       /* Number of available runs. */
+       size_t                  nruns_avail;
+
+       /*
+        * Number of available run adjacencies.  Clean and dirty available runs
+        * are not coalesced, which causes virtual memory fragmentation.  The
+        * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
+        * this fragmentation.
+        * */
+       size_t                  nruns_adjac;
 
        /*
         * Map of pages within chunk that keeps track of free/large/small.  The
@@ -183,7 +187,7 @@ struct arena_chunk_s {
         * need to be tracked in the map.  This omission saves a header page
         * for common chunk sizes (e.g. 4 MiB).
         */
-       arena_chunk_map_t map[1]; /* Dynamically sized. */
+       arena_chunk_map_t       map[1]; /* Dynamically sized. */
 };
 typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
 
@@ -331,8 +335,10 @@ struct arena_s {
 
        uint64_t                prof_accumbytes;
 
-       /* List of dirty-page-containing chunks this arena manages. */
-       ql_head(arena_chunk_t)  chunks_dirty;
+       dss_prec_t              dss_prec;
+
+       /* Tree of dirty-page-containing chunks this arena manages. */
+       arena_chunk_tree_t      chunks_dirty;
 
        /*
         * In order to avoid rapid chunk allocation/deallocation when an arena
@@ -367,18 +373,9 @@ struct arena_s {
 
        /*
         * Size/address-ordered trees of this arena's available runs.  The trees
-        * are used for first-best-fit run allocation.  The dirty tree contains
-        * runs with dirty pages (i.e. very likely to have been touched and
-        * therefore have associated physical pages), whereas the clean tree
-        * contains runs with pages that either have no associated physical
-        * pages, or have pages that the kernel may recycle at any time due to
-        * previous madvise(2) calls.  The dirty tree is used in preference to
-        * the clean tree for allocations, because using dirty pages reduces
-        * the amount of dirty purging necessary to keep the active:dirty page
-        * ratio below the purge threshold.
+        * are used for first-best-fit run allocation.
         */
-       arena_avail_tree_t      runs_avail_clean;
-       arena_avail_tree_t      runs_avail_dirty;
+       arena_avail_tree_t      runs_avail;
 
        /* bins is used to store trees of free regions. */
        arena_bin_t             bins[NBINS];
@@ -422,13 +419,16 @@ void      arena_dalloc_small(arena_t *arena, 
 void   arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
     void *ptr);
 void   arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-void   arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
-    arena_stats_t *astats, malloc_bin_stats_t *bstats,
-    malloc_large_stats_t *lstats);
 void   *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
     size_t extra, bool zero);
-void   *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
-    size_t alignment, bool zero, bool try_tcache);
+void   *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+    size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
+    bool try_tcache_dalloc);
+dss_prec_t     arena_dss_prec_get(arena_t *arena);
+void   arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+void   arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
+    size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
+    malloc_large_stats_t *lstats);
 bool   arena_new(arena_t *arena, unsigned ind);
 void   arena_boot(void);
 void   arena_prefork(arena_t *arena);

Modified: head/contrib/jemalloc/include/jemalloc/internal/chunk.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/chunk.h     Sat Nov 10 
00:36:53 2012        (r242843)
+++ head/contrib/jemalloc/include/jemalloc/internal/chunk.h     Sat Nov 10 
01:46:13 2012        (r242844)
@@ -28,6 +28,7 @@
 #ifdef JEMALLOC_H_EXTERNS
 
 extern size_t          opt_lg_chunk;
+extern const char      *opt_dss;
 
 /* Protects stats_chunks; currently not used for any other purpose. */
 extern malloc_mutex_t  chunks_mtx;
@@ -42,9 +43,14 @@ extern size_t                chunk_npages;
 extern size_t          map_bias; /* Number of arena chunk header pages. */
 extern size_t          arena_maxclass; /* Max size class for arenas. */
 
-void   *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
+void   *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+    dss_prec_t dss_prec);
+void   chunk_unmap(void *chunk, size_t size);
 void   chunk_dealloc(void *chunk, size_t size, bool unmap);
 bool   chunk_boot(void);
+void   chunk_prefork(void);
+void   chunk_postfork_parent(void);
+void   chunk_postfork_child(void);
 
 #endif /* JEMALLOC_H_EXTERNS */
 
/******************************************************************************/

Modified: head/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h Sat Nov 10 
00:36:53 2012        (r242843)
+++ head/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h Sat Nov 10 
01:46:13 2012        (r242844)
@@ -1,14 +1,28 @@
 
/******************************************************************************/
 #ifdef JEMALLOC_H_TYPES
 
+typedef enum {
+       dss_prec_disabled  = 0,
+       dss_prec_primary   = 1,
+       dss_prec_secondary = 2,
+
+       dss_prec_limit     = 3
+} dss_prec_t ;
+#define        DSS_PREC_DEFAULT        dss_prec_secondary
+#define        DSS_DEFAULT             "secondary"
+
 #endif /* JEMALLOC_H_TYPES */
 
/******************************************************************************/
 #ifdef JEMALLOC_H_STRUCTS
 
+extern const char *dss_prec_names[];
+
 #endif /* JEMALLOC_H_STRUCTS */
 
/******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
+dss_prec_t     chunk_dss_prec_get(void);
+bool   chunk_dss_prec_set(dss_prec_t dss_prec);
 void   *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
 bool   chunk_in_dss(void *chunk);
 bool   chunk_dss_boot(void);

Modified: head/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h        Sat Nov 
10 00:36:53 2012        (r242843)
+++ head/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h        Sat Nov 
10 01:46:13 2012        (r242844)
@@ -9,7 +9,7 @@
 
/******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-void   pages_purge(void *addr, size_t length);
+bool   pages_purge(void *addr, size_t length);
 
 void   *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
 bool   chunk_dealloc_mmap(void *chunk, size_t size);

Modified: head/contrib/jemalloc/include/jemalloc/internal/ctl.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/ctl.h       Sat Nov 10 
00:36:53 2012        (r242843)
+++ head/contrib/jemalloc/include/jemalloc/internal/ctl.h       Sat Nov 10 
01:46:13 2012        (r242844)
@@ -33,6 +33,7 @@ struct ctl_indexed_node_s {
 struct ctl_arena_stats_s {
        bool                    initialized;
        unsigned                nthreads;
+       const char              *dss;
        size_t                  pactive;
        size_t                  pdirty;
        arena_stats_t           astats;
@@ -61,6 +62,7 @@ struct ctl_stats_s {
                uint64_t        nmalloc;        /* huge_nmalloc */
                uint64_t        ndalloc;        /* huge_ndalloc */
        } huge;
+       unsigned                narenas;
        ctl_arena_stats_t       *arenas;        /* (narenas + 1) elements. */
 };
 
@@ -75,6 +77,9 @@ int   ctl_nametomib(const char *name, size
 int    ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
     void *newp, size_t newlen);
 bool   ctl_boot(void);
+void   ctl_prefork(void);
+void   ctl_postfork_parent(void);
+void   ctl_postfork_child(void);
 
 #define        xmallctl(name, oldp, oldlenp, newp, newlen) do {                
\
        if (je_mallctl(name, oldp, oldlenp, newp, newlen)               \

Modified: head/contrib/jemalloc/include/jemalloc/internal/extent.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/extent.h    Sat Nov 10 
00:36:53 2012        (r242843)
+++ head/contrib/jemalloc/include/jemalloc/internal/extent.h    Sat Nov 10 
01:46:13 2012        (r242844)
@@ -23,6 +23,9 @@ struct extent_node_s {
 
        /* Total region size. */
        size_t                  size;
+
+       /* True if zero-filled; used by chunk recycling code. */
+       bool                    zeroed;
 };
 typedef rb_tree(extent_node_t) extent_tree_t;
 

Modified: head/contrib/jemalloc/include/jemalloc/internal/huge.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/huge.h      Sat Nov 10 
00:36:53 2012        (r242843)
+++ head/contrib/jemalloc/include/jemalloc/internal/huge.h      Sat Nov 10 
01:46:13 2012        (r242844)
@@ -22,7 +22,7 @@ void  *huge_palloc(size_t size, size_t al
 void   *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
     size_t extra);
 void   *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
-    size_t alignment, bool zero);
+    size_t alignment, bool zero, bool try_tcache_dalloc);
 void   huge_dalloc(void *ptr, bool unmap);
 size_t huge_salloc(const void *ptr);
 prof_ctx_t     *huge_prof_ctx_get(const void *ptr);

Modified: head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Sat Nov 
10 00:36:53 2012        (r242843)
+++ head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Sat Nov 
10 01:46:13 2012        (r242844)
@@ -270,6 +270,9 @@ static const bool config_ivsalloc =
 #  ifdef __arm__
 #    define LG_QUANTUM         3
 #  endif
+#  ifdef __hppa__
+#    define LG_QUANTUM         4
+#  endif
 #  ifdef __mips__
 #    define LG_QUANTUM         3
 #  endif
@@ -424,6 +427,7 @@ static const bool config_ivsalloc =
                VALGRIND_FREELIKE_BLOCK(ptr, rzsize);                   \
 } while (0)
 #else
+#define        RUNNING_ON_VALGRIND     ((unsigned)0)
 #define        VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
 #define        VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
 #define        VALGRIND_FREELIKE_BLOCK(addr, rzB)
@@ -510,13 +514,19 @@ extern size_t     opt_narenas;
 /* Number of CPUs. */
 extern unsigned                ncpus;
 
-extern malloc_mutex_t  arenas_lock; /* Protects arenas initialization. */
+/* Protects arenas initialization (arenas, arenas_total). */
+extern malloc_mutex_t  arenas_lock;
 /*
  * Arenas that are used to service external requests.  Not all elements of the
  * arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas.  arenas[narenas_auto..narenas_total) are only used if the 
application
+ * takes some action to create them and allocate from them.
  */
 extern arena_t         **arenas;
-extern unsigned                narenas;
+extern unsigned                narenas_total;
+extern unsigned                narenas_auto; /* Read-only after 
initialization. */
 
 arena_t        *arenas_extend(unsigned ind);
 void   arenas_cleanup(void *arg);
@@ -571,6 +581,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused),
 
 size_t s2u(size_t size);
 size_t sa2u(size_t size, size_t alignment);
+unsigned       narenas_total_get(void);
 arena_t        *choose_arena(arena_t *arena);
 #endif
 
@@ -675,6 +686,18 @@ sa2u(size_t size, size_t alignment)
        }
 }
 
+JEMALLOC_INLINE unsigned
+narenas_total_get(void)
+{
+       unsigned narenas;
+
+       malloc_mutex_lock(&arenas_lock);
+       narenas = narenas_total;
+       malloc_mutex_unlock(&arenas_lock);
+
+       return (narenas);
+}
+
 /* Choose an arena based on a per-thread value. */
 JEMALLOC_INLINE arena_t *
 choose_arena(arena_t *arena)
@@ -710,15 +733,24 @@ choose_arena(arena_t *arena)
 #include "jemalloc/internal/quarantine.h"
 
 #ifndef JEMALLOC_ENABLE_INLINE
+void   *imallocx(size_t size, bool try_tcache, arena_t *arena);
 void   *imalloc(size_t size);
+void   *icallocx(size_t size, bool try_tcache, arena_t *arena);
 void   *icalloc(size_t size);
+void   *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+    arena_t *arena);
 void   *ipalloc(size_t usize, size_t alignment, bool zero);
 size_t isalloc(const void *ptr, bool demote);
 size_t ivsalloc(const void *ptr, bool demote);
 size_t u2rz(size_t usize);
 size_t p2rz(const void *ptr);
+void   idallocx(void *ptr, bool try_tcache);
 void   idalloc(void *ptr);
+void   iqallocx(void *ptr, bool try_tcache);
 void   iqalloc(void *ptr);
+void   *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
+    bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
+    arena_t *arena);
 void   *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
     bool zero, bool no_move);
 malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
@@ -726,29 +758,44 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused),
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
 JEMALLOC_INLINE void *
-imalloc(size_t size)
+imallocx(size_t size, bool try_tcache, arena_t *arena)
 {
 
        assert(size != 0);
 
        if (size <= arena_maxclass)
-               return (arena_malloc(NULL, size, false, true));
+               return (arena_malloc(arena, size, false, try_tcache));
        else
                return (huge_malloc(size, false));
 }
 
 JEMALLOC_INLINE void *
-icalloc(size_t size)
+imalloc(size_t size)
+{
+
+       return (imallocx(size, true, NULL));
+}
+
+JEMALLOC_INLINE void *
+icallocx(size_t size, bool try_tcache, arena_t *arena)
 {
 
        if (size <= arena_maxclass)
-               return (arena_malloc(NULL, size, true, true));
+               return (arena_malloc(arena, size, true, try_tcache));
        else
                return (huge_malloc(size, true));
 }
 
 JEMALLOC_INLINE void *
-ipalloc(size_t usize, size_t alignment, bool zero)
+icalloc(size_t size)
+{
+
+       return (icallocx(size, true, NULL));
+}
+
+JEMALLOC_INLINE void *
+ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+    arena_t *arena)
 {
        void *ret;
 
@@ -756,11 +803,11 @@ ipalloc(size_t usize, size_t alignment, 
        assert(usize == sa2u(usize, alignment));
 
        if (usize <= arena_maxclass && alignment <= PAGE)
-               ret = arena_malloc(NULL, usize, zero, true);
+               ret = arena_malloc(arena, usize, zero, try_tcache);
        else {
                if (usize <= arena_maxclass) {
-                       ret = arena_palloc(choose_arena(NULL), usize, alignment,
-                           zero);
+                       ret = arena_palloc(choose_arena(arena), usize,
+                           alignment, zero);
                } else if (alignment <= chunksize)
                        ret = huge_malloc(usize, zero);
                else
@@ -771,6 +818,13 @@ ipalloc(size_t usize, size_t alignment, 
        return (ret);
 }
 
+JEMALLOC_INLINE void *
+ipalloc(size_t usize, size_t alignment, bool zero)
+{
+
+       return (ipallocx(usize, alignment, zero, true, NULL));
+}
+
 /*
  * Typical usage:
  *   void *ptr = [...]
@@ -829,7 +883,7 @@ p2rz(const void *ptr)
 }
 
 JEMALLOC_INLINE void
-idalloc(void *ptr)
+idallocx(void *ptr, bool try_tcache)
 {
        arena_chunk_t *chunk;
 
@@ -837,24 +891,38 @@ idalloc(void *ptr)
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
        if (chunk != ptr)
-               arena_dalloc(chunk->arena, chunk, ptr, true);
+               arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
        else
                huge_dalloc(ptr, true);
 }
 
 JEMALLOC_INLINE void
-iqalloc(void *ptr)
+idalloc(void *ptr)
+{
+
+       idallocx(ptr, true);
+}
+
+JEMALLOC_INLINE void
+iqallocx(void *ptr, bool try_tcache)
 {
 
        if (config_fill && opt_quarantine)
                quarantine(ptr);
        else
-               idalloc(ptr);
+               idallocx(ptr, try_tcache);
+}
+
+JEMALLOC_INLINE void
+iqalloc(void *ptr)
+{
+
+       iqallocx(ptr, true);
 }
 
 JEMALLOC_INLINE void *
-iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
-    bool no_move)
+irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+    bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t 
*arena)
 {
        void *ret;
        size_t oldsize;
@@ -877,7 +945,7 @@ iralloc(void *ptr, size_t size, size_t e
                usize = sa2u(size + extra, alignment);
                if (usize == 0)
                        return (NULL);
-               ret = ipalloc(usize, alignment, zero);
+               ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
                if (ret == NULL) {
                        if (extra == 0)
                                return (NULL);
@@ -885,7 +953,8 @@ iralloc(void *ptr, size_t size, size_t e
                        usize = sa2u(size, alignment);
                        if (usize == 0)
                                return (NULL);
-                       ret = ipalloc(usize, alignment, zero);
+                       ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+                           arena);
                        if (ret == NULL)
                                return (NULL);
                }
@@ -896,7 +965,7 @@ iralloc(void *ptr, size_t size, size_t e
                 */
                copysize = (size < oldsize) ? size : oldsize;
                memcpy(ret, ptr, copysize);
-               iqalloc(ptr);
+               iqallocx(ptr, try_tcache_dalloc);
                return (ret);
        }
 
@@ -910,15 +979,25 @@ iralloc(void *ptr, size_t size, size_t e
                }
        } else {
                if (size + extra <= arena_maxclass) {
-                       return (arena_ralloc(ptr, oldsize, size, extra,
-                           alignment, zero, true));
+                       return (arena_ralloc(arena, ptr, oldsize, size, extra,
+                           alignment, zero, try_tcache_alloc,
+                           try_tcache_dalloc));
                } else {
                        return (huge_ralloc(ptr, oldsize, size, extra,
-                           alignment, zero));
+                           alignment, zero, try_tcache_dalloc));
                }
        }
 }
 
+JEMALLOC_INLINE void *
+iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+    bool no_move)
+{
+
+       return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
+           NULL));
+}
+
 malloc_tsd_externs(thread_allocated, thread_allocated_t)
 malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
     THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)

Modified: head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h Sat Nov 
10 00:36:53 2012        (r242843)
+++ head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h Sat Nov 
10 01:46:13 2012        (r242844)
@@ -12,6 +12,8 @@
 #define        arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
 #define        arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
 #define        arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
+#define        arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
+#define        arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
 #define        arena_malloc JEMALLOC_N(arena_malloc)
 #define        arena_malloc_large JEMALLOC_N(arena_malloc_large)
 #define        arena_malloc_small JEMALLOC_N(arena_malloc_small)
@@ -51,14 +53,13 @@
 #define        arena_stats_merge JEMALLOC_N(arena_stats_merge)
 #define        arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
 #define        arenas JEMALLOC_N(arenas)
-#define        arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
 #define        arenas_booted JEMALLOC_N(arenas_booted)
 #define        arenas_cleanup JEMALLOC_N(arenas_cleanup)
 #define        arenas_extend JEMALLOC_N(arenas_extend)
 #define        arenas_initialized JEMALLOC_N(arenas_initialized)
 #define        arenas_lock JEMALLOC_N(arenas_lock)
-#define        arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
 #define        arenas_tls JEMALLOC_N(arenas_tls)
+#define        arenas_tsd JEMALLOC_N(arenas_tsd)
 #define        arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
 #define        arenas_tsd_cleanup_wrapper 
JEMALLOC_N(arenas_tsd_cleanup_wrapper)
 #define        arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
@@ -101,9 +102,15 @@
 #define        chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
 #define        chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
 #define        chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
+#define        chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
+#define        chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
 #define        chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
 #define        chunk_in_dss JEMALLOC_N(chunk_in_dss)
 #define        chunk_npages JEMALLOC_N(chunk_npages)
+#define        chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
+#define        chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
+#define        chunk_prefork JEMALLOC_N(chunk_prefork)
+#define        chunk_unmap JEMALLOC_N(chunk_unmap)
 #define        chunks_mtx JEMALLOC_N(chunks_mtx)
 #define        chunks_rtree JEMALLOC_N(chunks_rtree)
 #define        chunksize JEMALLOC_N(chunksize)
@@ -129,6 +136,10 @@
 #define        ctl_bymib JEMALLOC_N(ctl_bymib)
 #define        ctl_byname JEMALLOC_N(ctl_byname)
 #define        ctl_nametomib JEMALLOC_N(ctl_nametomib)
+#define        ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
+#define        ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
+#define        ctl_prefork JEMALLOC_N(ctl_prefork)
+#define        dss_prec_names JEMALLOC_N(dss_prec_names)
 #define        extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
 #define        extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
 #define        extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
@@ -161,6 +172,7 @@
 #define        extent_tree_szad_reverse_iter_recurse 
JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
 #define        extent_tree_szad_reverse_iter_start 
JEMALLOC_N(extent_tree_szad_reverse_iter_start)
 #define        extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
+#define        get_errno JEMALLOC_N(get_errno)
 #define        hash JEMALLOC_N(hash)
 #define        huge_allocated JEMALLOC_N(huge_allocated)
 #define        huge_boot JEMALLOC_N(huge_boot)
@@ -180,11 +192,17 @@
 #define        huge_salloc JEMALLOC_N(huge_salloc)
 #define        iallocm JEMALLOC_N(iallocm)
 #define        icalloc JEMALLOC_N(icalloc)
+#define        icallocx JEMALLOC_N(icallocx)
 #define        idalloc JEMALLOC_N(idalloc)
+#define        idallocx JEMALLOC_N(idallocx)
 #define        imalloc JEMALLOC_N(imalloc)
+#define        imallocx JEMALLOC_N(imallocx)
 #define        ipalloc JEMALLOC_N(ipalloc)
+#define        ipallocx JEMALLOC_N(ipallocx)
 #define        iqalloc JEMALLOC_N(iqalloc)
+#define        iqallocx JEMALLOC_N(iqallocx)
 #define        iralloc JEMALLOC_N(iralloc)
+#define        irallocx JEMALLOC_N(irallocx)
 #define        isalloc JEMALLOC_N(isalloc)
 #define        ivsalloc JEMALLOC_N(ivsalloc)
 #define        jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
@@ -211,7 +229,9 @@
 #define        map_bias JEMALLOC_N(map_bias)
 #define        mb_write JEMALLOC_N(mb_write)
 #define        mutex_boot JEMALLOC_N(mutex_boot)
-#define        narenas JEMALLOC_N(narenas)
+#define        narenas_auto JEMALLOC_N(narenas_auto)
+#define        narenas_total JEMALLOC_N(narenas_total)
+#define        narenas_total_get JEMALLOC_N(narenas_total_get)
 #define        ncpus JEMALLOC_N(ncpus)
 #define        nhbins JEMALLOC_N(nhbins)
 #define        opt_abort JEMALLOC_N(opt_abort)
@@ -253,6 +273,9 @@
 #define        prof_lookup JEMALLOC_N(prof_lookup)
 #define        prof_malloc JEMALLOC_N(prof_malloc)
 #define        prof_mdump JEMALLOC_N(prof_mdump)
+#define        prof_postfork_child JEMALLOC_N(prof_postfork_child)
+#define        prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
+#define        prof_prefork JEMALLOC_N(prof_prefork)
 #define        prof_promote JEMALLOC_N(prof_promote)
 #define        prof_realloc JEMALLOC_N(prof_realloc)
 #define        prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
@@ -263,6 +286,7 @@
 #define        prof_tdata_init JEMALLOC_N(prof_tdata_init)
 #define        prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
 #define        prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
+#define        prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to