Author: jasone
Date: Mon Jun  3 14:36:28 2013
New Revision: 251300
URL: http://svnweb.freebsd.org/changeset/base/251300

Log:
  Update jemalloc to version 3.4.0.

Modified:
  head/contrib/jemalloc/ChangeLog
  head/contrib/jemalloc/FREEBSD-diffs
  head/contrib/jemalloc/VERSION
  head/contrib/jemalloc/doc/jemalloc.3
  head/contrib/jemalloc/include/jemalloc/internal/arena.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
  head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
  head/contrib/jemalloc/include/jemalloc/internal/prof.h
  head/contrib/jemalloc/include/jemalloc/internal/quarantine.h
  head/contrib/jemalloc/include/jemalloc/internal/tcache.h
  head/contrib/jemalloc/include/jemalloc/jemalloc.h
  head/contrib/jemalloc/src/arena.c
  head/contrib/jemalloc/src/base.c
  head/contrib/jemalloc/src/chunk.c
  head/contrib/jemalloc/src/chunk_dss.c
  head/contrib/jemalloc/src/jemalloc.c
  head/contrib/jemalloc/src/prof.c
  head/contrib/jemalloc/src/quarantine.c
  head/contrib/jemalloc/src/tcache.c

Modified: head/contrib/jemalloc/ChangeLog
==============================================================================
--- head/contrib/jemalloc/ChangeLog     Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/ChangeLog     Mon Jun  3 14:36:28 2013        
(r251300)
@@ -6,6 +6,47 @@ found in the git revision history:
     http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
     git://canonware.com/jemalloc.git
 
+* 3.4.0 (June 2, 2013)
+
+  This version is essentially a small bugfix release, but the addition of
+  aarch64 support requires that the minor version be incremented.
+
+  Bug fixes:
+  - Fix race-triggered deadlocks in chunk_record().  These deadlocks were
+    typically triggered by multiple threads concurrently deallocating huge
+    objects.
+
+  New features:
+  - Add support for the aarch64 architecture.
+
+* 3.3.1 (March 6, 2013)
+
+  This version fixes bugs that are typically encountered only when utilizing
+  custom run-time options.
+
+  Bug fixes:
+  - Fix a locking order bug that could cause deadlock during fork if heap
+    profiling were enabled.
+  - Fix a chunk recycling bug that could cause the allocator to lose track of
+    whether a chunk was zeroed.  On FreeBSD, NetBSD, and OS X, it could cause
+    corruption if allocating via sbrk(2) (unlikely unless running with the
+    "dss:primary" option specified).  This was completely harmless on Linux
+    unless using mlockall(2) (and unlikely even then, unless the
+    --disable-munmap configure option or the "dss:primary" option was
+    specified).  This regression was introduced in 3.1.0 by the
+    mlockall(2)/madvise(2) interaction fix.
+  - Fix TLS-related memory corruption that could occur during thread exit if 
the
+    thread never allocated memory.  Only the quarantine and prof facilities 
were
+    susceptible.
+  - Fix two quarantine bugs:
+    + Internal reallocation of the quarantined object array leaked the old
+      array.
+    + Reallocation failure for internal reallocation of the quarantined object
+      array (very unlikely) resulted in memory corruption.
+  - Fix Valgrind integration to annotate all internally allocated memory in a
+    way that keeps Valgrind happy about internal data structure access.
+  - Fix building for s390 systems.
+
 * 3.3.0 (January 23, 2013)
 
   This version includes a few minor performance improvements in addition to the

Modified: head/contrib/jemalloc/FREEBSD-diffs
==============================================================================
--- head/contrib/jemalloc/FREEBSD-diffs Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/FREEBSD-diffs Mon Jun  3 14:36:28 2013        
(r251300)
@@ -1,5 +1,5 @@
 diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index 0930580..d45fa3d 100644
+index abd5e6f..1d7491a 100644
 --- a/doc/jemalloc.xml.in
 +++ b/doc/jemalloc.xml.in
 @@ -51,12 +51,23 @@
@@ -27,7 +27,7 @@ index 0930580..d45fa3d 100644
        <refsect2>
          <title>Standard API</title>
          <funcprototype>
-@@ -2173,4 +2184,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+@@ -2180,4 +2191,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
      <para>The <function>posix_memalign<parameter/></function> function 
conforms
      to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
    </refsect1>
@@ -45,7 +45,7 @@ index 0930580..d45fa3d 100644
 +  </refsect1>
  </refentry>
 diff --git a/include/jemalloc/internal/jemalloc_internal.h.in 
b/include/jemalloc/internal/jemalloc_internal.h.in
-index c606c12..0d46d9d 100644
+index e46ac54..527449d 100644
 --- a/include/jemalloc/internal/jemalloc_internal.h.in
 +++ b/include/jemalloc/internal/jemalloc_internal.h.in
 @@ -1,5 +1,8 @@
@@ -97,7 +97,7 @@ index de44e14..564d604 100644
  
  bool  malloc_mutex_init(malloc_mutex_t *mutex);
 diff --git a/include/jemalloc/internal/private_namespace.h 
b/include/jemalloc/internal/private_namespace.h
-index 903fb4d..d6638df 100644
+index 65de316..366676b 100644
 --- a/include/jemalloc/internal/private_namespace.h
 +++ b/include/jemalloc/internal/private_namespace.h
 @@ -216,7 +216,6 @@
@@ -122,10 +122,10 @@ index 31b1304..c3ef2f5 100644
  #define       ALLOCM_LG_ALIGN(la)     (la)
 diff --git a/include/jemalloc/jemalloc_FreeBSD.h 
b/include/jemalloc/jemalloc_FreeBSD.h
 new file mode 100644
-index 0000000..9c97a13
+index 0000000..e6c8407
 --- /dev/null
 +++ b/include/jemalloc/jemalloc_FreeBSD.h
-@@ -0,0 +1,76 @@
+@@ -0,0 +1,117 @@
 +/*
 + * Override settings that were generated in jemalloc_defs.h as necessary.
 + */
@@ -196,14 +196,55 @@ index 0000000..9c97a13
 +#define       isthreaded              ((bool)__isthreaded)
 +
 +/* Mangle. */
++#undef je_malloc
++#undef je_calloc
++#undef je_realloc
++#undef je_free
++#undef je_posix_memalign
++#undef je_malloc_usable_size
++#undef je_allocm
++#undef je_rallocm
++#undef je_sallocm
++#undef je_dallocm
++#undef je_nallocm
++#define       je_malloc               __malloc
++#define       je_calloc               __calloc
++#define       je_realloc              __realloc
++#define       je_free                 __free
++#define       je_posix_memalign       __posix_memalign
++#define       je_malloc_usable_size   __malloc_usable_size
++#define       je_allocm               __allocm
++#define       je_rallocm              __rallocm
++#define       je_sallocm              __sallocm
++#define       je_dallocm              __dallocm
++#define       je_nallocm              __nallocm
 +#define       open                    _open
 +#define       read                    _read
 +#define       write                   _write
 +#define       close                   _close
 +#define       pthread_mutex_lock      _pthread_mutex_lock
 +#define       pthread_mutex_unlock    _pthread_mutex_unlock
++
++#ifdef JEMALLOC_C_
++/*
++ * Define 'weak' symbols so that an application can have its own versions
++ * of malloc, calloc, realloc, free, et al.
++ */
++__weak_reference(__malloc, malloc);
++__weak_reference(__calloc, calloc);
++__weak_reference(__realloc, realloc);
++__weak_reference(__free, free);
++__weak_reference(__posix_memalign, posix_memalign);
++__weak_reference(__malloc_usable_size, malloc_usable_size);
++__weak_reference(__allocm, allocm);
++__weak_reference(__rallocm, rallocm);
++__weak_reference(__sallocm, sallocm);
++__weak_reference(__dallocm, dallocm);
++__weak_reference(__nallocm, nallocm);
++#endif
++
 diff --git a/src/jemalloc.c b/src/jemalloc.c
-index c117685..665d98f 100644
+index bc350ed..352c98e 100644
 --- a/src/jemalloc.c
 +++ b/src/jemalloc.c
 @@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
@@ -217,7 +258,7 @@ index c117685..665d98f 100644
  /* Runtime configuration options. */
  const char    *je_malloc_conf;
  bool  opt_abort =
-@@ -453,7 +457,8 @@ malloc_conf_init(void)
+@@ -471,7 +475,8 @@ malloc_conf_init(void)
  #endif
                            ;
  

Modified: head/contrib/jemalloc/VERSION
==============================================================================
--- head/contrib/jemalloc/VERSION       Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/VERSION       Mon Jun  3 14:36:28 2013        
(r251300)
@@ -1 +1 @@
-3.3.0-0-g83789f45307379e096c4e8be81d9e9a51e3f5a4a
+3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775

Modified: head/contrib/jemalloc/doc/jemalloc.3
==============================================================================
--- head/contrib/jemalloc/doc/jemalloc.3        Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/doc/jemalloc.3        Mon Jun  3 14:36:28 2013        
(r251300)
@@ -2,12 +2,12 @@
 .\"     Title: JEMALLOC
 .\"    Author: Jason Evans
 .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\"      Date: 01/23/2013
+.\"      Date: 06/02/2013
 .\"    Manual: User Manual
-.\"    Source: jemalloc 3.3.0-0-g83789f45307379e096c4e8be81d9e9a51e3f5a4a
+.\"    Source: jemalloc 3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775
 .\"  Language: English
 .\"
-.TH "JEMALLOC" "3" "01/23/2013" "jemalloc 3.3.0-0-g83789f453073" "User Manual"
+.TH "JEMALLOC" "3" "06/02/2013" "jemalloc 3.4.0-0-g0ed518e5dab7" "User Manual"
 .\" -----------------------------------------------------------------
 .\" * Define some portability stuff
 .\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
 jemalloc \- general purpose memory allocation functions
 .SH "LIBRARY"
 .PP
-This manual describes jemalloc 
3\&.3\&.0\-0\-g83789f45307379e096c4e8be81d9e9a51e3f5a4a\&. More information can 
be found at the
+This manual describes jemalloc 
3\&.4\&.0\-0\-g0ed518e5dab789ad2171bb38977a8927e2a26775\&. More information can 
be found at the
 \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
 .PP
 The following configuration options are enabled in libc\*(Aqs built\-in 
jemalloc:
@@ -392,7 +392,19 @@ Once, when the first call is made to one
 The string pointed to by the global variable
 \fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic 
link named
 /etc/malloc\&.conf, and the value of the environment variable
-\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as 
options\&.
+\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as 
options\&. Note that
+\fImalloc_conf\fR
+may be read before
+\fBmain\fR\fB\fR
+is entered, so the declaration of
+\fImalloc_conf\fR
+should specify an initializer that contains the final value to be read by 
jemalloc\&.
+\fImalloc_conf\fR
+is a compile\-time setting, whereas
+/etc/malloc\&.conf
+and
+\fBMALLOC_CONF\fR
+can be safely set any time prior to program invocation\&.
 .PP
 An options string is a comma\-separated list of option:value pairs\&. There is 
one key corresponding to each
 "opt\&.*"

Modified: head/contrib/jemalloc/include/jemalloc/internal/arena.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/arena.h     Mon Jun  3 
14:33:10 2013        (r251299)
+++ head/contrib/jemalloc/include/jemalloc/internal/arena.h     Mon Jun  3 
14:36:28 2013        (r251300)
@@ -463,9 +463,9 @@ void        arena_mapbits_small_set(arena_chunk
     size_t runind, size_t binind, size_t flags);
 void   arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
     size_t unzeroed);
-void   arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
-void   arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
-void   arena_prof_accum(arena_t *arena, uint64_t accumbytes);
+bool   arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
+bool   arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
+bool   arena_prof_accum(arena_t *arena, uint64_t accumbytes);
 size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
 size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
 unsigned       arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
@@ -663,7 +663,7 @@ arena_mapbits_unzeroed_set(arena_chunk_t
        *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
 }
 
-JEMALLOC_INLINE void
+JEMALLOC_INLINE bool
 arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
 {
 
@@ -672,33 +672,40 @@ arena_prof_accum_impl(arena_t *arena, ui
 
        arena->prof_accumbytes += accumbytes;
        if (arena->prof_accumbytes >= prof_interval) {
-               prof_idump();
                arena->prof_accumbytes -= prof_interval;
+               return (true);
        }
+       return (false);
 }
 
-JEMALLOC_INLINE void
+JEMALLOC_INLINE bool
 arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
 {
 
        cassert(config_prof);
 
        if (prof_interval == 0)
-               return;
-       arena_prof_accum_impl(arena, accumbytes);
+               return (false);
+       return (arena_prof_accum_impl(arena, accumbytes));
 }
 
-JEMALLOC_INLINE void
+JEMALLOC_INLINE bool
 arena_prof_accum(arena_t *arena, uint64_t accumbytes)
 {
 
        cassert(config_prof);
 
        if (prof_interval == 0)
-               return;
-       malloc_mutex_lock(&arena->lock);
-       arena_prof_accum_impl(arena, accumbytes);
-       malloc_mutex_unlock(&arena->lock);
+               return (false);
+
+       {
+               bool ret;
+
+               malloc_mutex_lock(&arena->lock);
+               ret = arena_prof_accum_impl(arena, accumbytes);
+               malloc_mutex_unlock(&arena->lock);
+               return (ret);
+       }
 }
 
 JEMALLOC_ALWAYS_INLINE size_t

Modified: head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Mon Jun 
 3 14:33:10 2013        (r251299)
+++ head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Mon Jun 
 3 14:36:28 2013        (r251300)
@@ -278,6 +278,9 @@ static const bool config_ivsalloc =
 #  ifdef __arm__
 #    define LG_QUANTUM         3
 #  endif
+#  ifdef __aarch64__
+#    define LG_QUANTUM         4
+#  endif
 #  ifdef __hppa__
 #    define LG_QUANTUM         4
 #  endif
@@ -287,7 +290,7 @@ static const bool config_ivsalloc =
 #  ifdef __powerpc__
 #    define LG_QUANTUM         4
 #  endif
-#  ifdef __s390x__
+#  ifdef __s390__
 #    define LG_QUANTUM         4
 #  endif
 #  ifdef __SH4__
@@ -440,15 +443,18 @@ static const bool config_ivsalloc =
 } while (0)
 #else
 #define        RUNNING_ON_VALGRIND     ((unsigned)0)
-#define        VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
-#define        VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
-#define        VALGRIND_FREELIKE_BLOCK(addr, rzB)
-#define        VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
-#define        VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
-#define        JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
+#define        VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+    do {} while (0)
+#define        VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
+    do {} while (0)
+#define        VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
+#define        VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
+#define        VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
+#define        VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
+#define        JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
 #define        JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,       
\
-    old_rzsize, zero)
-#define        JEMALLOC_VALGRIND_FREE(ptr, rzsize)
+    old_rzsize, zero) do {} while (0)
+#define        JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
 #endif
 
 #include "jemalloc/internal/util.h"

Modified: head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h Mon Jun 
 3 14:33:10 2013        (r251299)
+++ head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h Mon Jun 
 3 14:36:28 2013        (r251300)
@@ -305,7 +305,13 @@
 #define        prof_tdata_tsd_get_wrapper 
JEMALLOC_N(prof_tdata_tsd_get_wrapper)
 #define        prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
 #define        quarantine JEMALLOC_N(quarantine)
+#define        quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
 #define        quarantine_boot JEMALLOC_N(quarantine_boot)
+#define        quarantine_booted JEMALLOC_N(quarantine_booted)
+#define        quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
+#define        quarantine_init JEMALLOC_N(quarantine_init)
+#define        quarantine_tls JEMALLOC_N(quarantine_tls)
+#define        quarantine_tsd JEMALLOC_N(quarantine_tsd)
 #define        quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
 #define        quarantine_tsd_cleanup_wrapper 
JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
 #define        quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)

Modified: head/contrib/jemalloc/include/jemalloc/internal/prof.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/prof.h      Mon Jun  3 
14:33:10 2013        (r251299)
+++ head/contrib/jemalloc/include/jemalloc/internal/prof.h      Mon Jun  3 
14:36:28 2013        (r251300)
@@ -237,7 +237,7 @@ void        prof_postfork_child(void);
                                                                        \
        assert(size == s2u(size));                                      \
                                                                        \
-       prof_tdata = prof_tdata_get();                                  \
+       prof_tdata = prof_tdata_get(true);                              \
        if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
                if (prof_tdata != NULL)                                 \
                        ret = (prof_thr_cnt_t *)(uintptr_t)1U;          \
@@ -286,7 +286,7 @@ void        prof_postfork_child(void);
 #ifndef JEMALLOC_ENABLE_INLINE
 malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
 
-prof_tdata_t   *prof_tdata_get(void);
+prof_tdata_t   *prof_tdata_get(bool create);
 void   prof_sample_threshold_update(prof_tdata_t *prof_tdata);
 prof_ctx_t     *prof_ctx_get(const void *ptr);
 void   prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
@@ -304,17 +304,15 @@ malloc_tsd_funcs(JEMALLOC_INLINE, prof_t
     prof_tdata_cleanup)
 
 JEMALLOC_INLINE prof_tdata_t *
-prof_tdata_get(void)
+prof_tdata_get(bool create)
 {
        prof_tdata_t *prof_tdata;
 
        cassert(config_prof);
 
        prof_tdata = *prof_tdata_tsd_get();
-       if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) {
-               if (prof_tdata == NULL)
-                       prof_tdata = prof_tdata_init();
-       }
+       if (create && prof_tdata == NULL)
+               prof_tdata = prof_tdata_init();
 
        return (prof_tdata);
 }
@@ -397,7 +395,7 @@ prof_sample_accum_update(size_t size)
        /* Sampling logic is unnecessary if the interval is 1. */
        assert(opt_lg_prof_sample != 0);
 
-       prof_tdata = *prof_tdata_tsd_get();
+       prof_tdata = prof_tdata_get(false);
        if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
                return (true);
 

Modified: head/contrib/jemalloc/include/jemalloc/internal/quarantine.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/quarantine.h        Mon Jun 
 3 14:33:10 2013        (r251299)
+++ head/contrib/jemalloc/include/jemalloc/internal/quarantine.h        Mon Jun 
 3 14:36:28 2013        (r251300)
@@ -1,6 +1,9 @@
 
/******************************************************************************/
 #ifdef JEMALLOC_H_TYPES
 
+typedef struct quarantine_obj_s quarantine_obj_t;
+typedef struct quarantine_s quarantine_t;
+
 /* Default per thread quarantine size if valgrind is enabled. */
 #define        JEMALLOC_VALGRIND_QUARANTINE_DEFAULT    (ZU(1) << 24)
 
@@ -8,17 +11,57 @@
 
/******************************************************************************/
 #ifdef JEMALLOC_H_STRUCTS
 
+struct quarantine_obj_s {
+       void    *ptr;
+       size_t  usize;
+};
+
+struct quarantine_s {
+       size_t                  curbytes;
+       size_t                  curobjs;
+       size_t                  first;
+#define        LG_MAXOBJS_INIT 10
+       size_t                  lg_maxobjs;
+       quarantine_obj_t        objs[1]; /* Dynamically sized ring buffer. */
+};
+
 #endif /* JEMALLOC_H_STRUCTS */
 
/******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
+quarantine_t   *quarantine_init(size_t lg_maxobjs);
 void   quarantine(void *ptr);
+void   quarantine_cleanup(void *arg);
 bool   quarantine_boot(void);
 
 #endif /* JEMALLOC_H_EXTERNS */
 
/******************************************************************************/
 #ifdef JEMALLOC_H_INLINES
 
+#ifndef JEMALLOC_ENABLE_INLINE
+malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *)
+
+void   quarantine_alloc_hook(void);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
+malloc_tsd_externs(quarantine, quarantine_t *)
+malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
+    quarantine_cleanup)
+
+JEMALLOC_ALWAYS_INLINE void
+quarantine_alloc_hook(void)
+{
+       quarantine_t *quarantine;
+
+       assert(config_fill && opt_quarantine);
+
+       quarantine = *quarantine_tsd_get();
+       if (quarantine == NULL)
+               quarantine_init(LG_MAXOBJS_INIT);
+}
+#endif
+
 #endif /* JEMALLOC_H_INLINES */
 
/******************************************************************************/
 

Modified: head/contrib/jemalloc/include/jemalloc/internal/tcache.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/tcache.h    Mon Jun  3 
14:33:10 2013        (r251299)
+++ head/contrib/jemalloc/include/jemalloc/internal/tcache.h    Mon Jun  3 
14:36:28 2013        (r251300)
@@ -320,8 +320,8 @@ tcache_alloc_small(tcache_t *tcache, siz
                }
                VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
                memset(ret, 0, size);
-               VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
        }
+       VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
 
        if (config_stats)
                tbin->tstats.nrequests++;
@@ -371,8 +371,8 @@ tcache_alloc_large(tcache_t *tcache, siz
                } else {
                        VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
                        memset(ret, 0, size);
-                       VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
                }
+               VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
 
                if (config_stats)
                        tbin->tstats.nrequests++;

Modified: head/contrib/jemalloc/include/jemalloc/jemalloc.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/jemalloc.h   Mon Jun  3 14:33:10 
2013        (r251299)
+++ head/contrib/jemalloc/include/jemalloc/jemalloc.h   Mon Jun  3 14:36:28 
2013        (r251300)
@@ -7,12 +7,12 @@ extern "C" {
 #include <limits.h>
 #include <strings.h>
 
-#define        JEMALLOC_VERSION 
"3.3.0-0-g83789f45307379e096c4e8be81d9e9a51e3f5a4a"
+#define        JEMALLOC_VERSION 
"3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775"
 #define        JEMALLOC_VERSION_MAJOR 3
-#define        JEMALLOC_VERSION_MINOR 3
+#define        JEMALLOC_VERSION_MINOR 4
 #define        JEMALLOC_VERSION_BUGFIX 0
 #define        JEMALLOC_VERSION_NREV 0
-#define        JEMALLOC_VERSION_GID "83789f45307379e096c4e8be81d9e9a51e3f5a4a"
+#define        JEMALLOC_VERSION_GID "0ed518e5dab789ad2171bb38977a8927e2a26775"
 
 #include "jemalloc_defs.h"
 #include "jemalloc_FreeBSD.h"

Modified: head/contrib/jemalloc/src/arena.c
==============================================================================
--- head/contrib/jemalloc/src/arena.c   Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/src/arena.c   Mon Jun  3 14:36:28 2013        
(r251300)
@@ -366,8 +366,6 @@ arena_run_zero(arena_chunk_t *chunk, siz
            LG_PAGE)), (npages << LG_PAGE));
        memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
            (npages << LG_PAGE));
-       VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
-           LG_PAGE)), (npages << LG_PAGE));
 }
 
 static inline void
@@ -380,8 +378,6 @@ arena_run_page_validate_zeroed(arena_chu
            LG_PAGE)), PAGE);
        for (i = 0; i < PAGE / sizeof(size_t); i++)
                assert(p[i] == 0);
-       VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
-           LG_PAGE)), PAGE);
 }
 
 static void
@@ -513,6 +509,8 @@ arena_run_split(arena_t *arena, arena_ru
                            run_ind+need_pages-1);
                }
        }
+       VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
+           LG_PAGE)), (need_pages << LG_PAGE));
 }
 
 static arena_chunk_t *
@@ -574,6 +572,11 @@ arena_chunk_alloc(arena_t *arena)
                        for (i = map_bias+1; i < chunk_npages-1; i++)
                                arena_mapbits_unzeroed_set(chunk, i, unzeroed);
                } else if (config_debug) {
+                       VALGRIND_MAKE_MEM_DEFINED(
+                           (void *)arena_mapp_get(chunk, map_bias+1),
+                           (void *)((uintptr_t)
+                           arena_mapp_get(chunk, chunk_npages-1)
+                           - (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
                        for (i = map_bias+1; i < chunk_npages-1; i++) {
                                assert(arena_mapbits_unzeroed_get(chunk, i) ==
                                    unzeroed);
@@ -1246,8 +1249,6 @@ arena_bin_nonfull_run_get(arena_t *arena
                    (uintptr_t)bin_info->bitmap_offset);
 
                /* Initialize run internals. */
-               VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
-                   bin_info->redzone_size);
                run->bin = bin;
                run->nextind = 0;
                run->nfree = bin_info->nregs;
@@ -1337,8 +1338,8 @@ arena_tcache_fill_small(arena_t *arena, 
 
        assert(tbin->ncached == 0);
 
-       if (config_prof)
-               arena_prof_accum(arena, prof_accumbytes);
+       if (config_prof && arena_prof_accum(arena, prof_accumbytes))
+               prof_idump();
        bin = &arena->bins[binind];
        malloc_mutex_lock(&bin->lock);
        for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1446,8 +1447,8 @@ arena_malloc_small(arena_t *arena, size_
                bin->stats.nrequests++;
        }
        malloc_mutex_unlock(&bin->lock);
-       if (config_prof && isthreaded == false)
-               arena_prof_accum(arena, size);
+       if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
+               prof_idump();
 
        if (zero == false) {
                if (config_fill) {
@@ -1464,8 +1465,8 @@ arena_malloc_small(arena_t *arena, size_
                }
                VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
                memset(ret, 0, size);
-               VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
        }
+       VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
 
        return (ret);
 }
@@ -1474,6 +1475,7 @@ void *
 arena_malloc_large(arena_t *arena, size_t size, bool zero)
 {
        void *ret;
+       UNUSED bool idump;
 
        /* Large allocation. */
        size = PAGE_CEILING(size);
@@ -1492,8 +1494,10 @@ arena_malloc_large(arena_t *arena, size_
                arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
        }
        if (config_prof)
-               arena_prof_accum_locked(arena, size);
+               idump = arena_prof_accum_locked(arena, size);
        malloc_mutex_unlock(&arena->lock);
+       if (config_prof && idump)
+               prof_idump();
 
        if (zero == false) {
                if (config_fill) {

Modified: head/contrib/jemalloc/src/base.c
==============================================================================
--- head/contrib/jemalloc/src/base.c    Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/src/base.c    Mon Jun  3 14:36:28 2013        
(r251300)
@@ -63,6 +63,7 @@ base_alloc(size_t size)
        ret = base_next_addr;
        base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
        malloc_mutex_unlock(&base_mtx);
+       VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
 
        return (ret);
 }
@@ -88,6 +89,7 @@ base_node_alloc(void)
                ret = base_nodes;
                base_nodes = *(extent_node_t **)ret;
                malloc_mutex_unlock(&base_mtx);
+               VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
        } else {
                malloc_mutex_unlock(&base_mtx);
                ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
@@ -100,6 +102,7 @@ void
 base_node_dealloc(extent_node_t *node)
 {
 
+       VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
        malloc_mutex_lock(&base_mtx);
        *(extent_node_t **)node = base_nodes;
        base_nodes = node;

Modified: head/contrib/jemalloc/src/chunk.c
==============================================================================
--- head/contrib/jemalloc/src/chunk.c   Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/src/chunk.c   Mon Jun  3 14:36:28 2013        
(r251300)
@@ -111,6 +111,7 @@ chunk_recycle(extent_tree_t *chunks_szad
                }
                node->addr = (void *)((uintptr_t)(ret) + size);
                node->size = trailsize;
+               node->zeroed = zeroed;
                extent_tree_szad_insert(chunks_szad, node);
                extent_tree_ad_insert(chunks_ad, node);
                node = NULL;
@@ -119,7 +120,6 @@ chunk_recycle(extent_tree_t *chunks_szad
 
        if (node != NULL)
                base_node_dealloc(node);
-       VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
        if (*zero) {
                if (zeroed == false)
                        memset(ret, 0, size);
@@ -130,7 +130,6 @@ chunk_recycle(extent_tree_t *chunks_szad
                        VALGRIND_MAKE_MEM_DEFINED(ret, size);
                        for (i = 0; i < size / sizeof(size_t); i++)
                                assert(p[i] == 0);
-                       VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
                }
        }
        return (ret);
@@ -179,27 +178,32 @@ chunk_alloc(size_t size, size_t alignmen
        /* All strategies for allocation failed. */
        ret = NULL;
 label_return:
-       if (config_ivsalloc && base == false && ret != NULL) {
-               if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
-                       chunk_dealloc(ret, size, true);
-                       return (NULL);
+       if (ret != NULL) {
+               if (config_ivsalloc && base == false) {
+                       if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
+                               chunk_dealloc(ret, size, true);
+                               return (NULL);
+                       }
                }
-       }
-       if ((config_stats || config_prof) && ret != NULL) {
-               bool gdump;
-               malloc_mutex_lock(&chunks_mtx);
-               if (config_stats)
-                       stats_chunks.nchunks += (size / chunksize);
-               stats_chunks.curchunks += (size / chunksize);
-               if (stats_chunks.curchunks > stats_chunks.highchunks) {
-                       stats_chunks.highchunks = stats_chunks.curchunks;
-                       if (config_prof)
-                               gdump = true;
-               } else if (config_prof)
-                       gdump = false;
-               malloc_mutex_unlock(&chunks_mtx);
-               if (config_prof && opt_prof && opt_prof_gdump && gdump)
-                       prof_gdump();
+               if (config_stats || config_prof) {
+                       bool gdump;
+                       malloc_mutex_lock(&chunks_mtx);
+                       if (config_stats)
+                               stats_chunks.nchunks += (size / chunksize);
+                       stats_chunks.curchunks += (size / chunksize);
+                       if (stats_chunks.curchunks > stats_chunks.highchunks) {
+                               stats_chunks.highchunks =
+                                   stats_chunks.curchunks;
+                               if (config_prof)
+                                       gdump = true;
+                       } else if (config_prof)
+                               gdump = false;
+                       malloc_mutex_unlock(&chunks_mtx);
+                       if (config_prof && opt_prof && opt_prof_gdump && gdump)
+                               prof_gdump();
+               }
+               if (config_valgrind)
+                       VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
        }
        assert(CHUNK_ADDR2BASE(ret) == ret);
        return (ret);
@@ -210,9 +214,10 @@ chunk_record(extent_tree_t *chunks_szad,
     size_t size)
 {
        bool unzeroed;
-       extent_node_t *xnode, *node, *prev, key;
+       extent_node_t *xnode, *node, *prev, *xprev, key;
 
        unzeroed = pages_purge(chunk, size);
+       VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
 
        /*
         * Allocate a node before acquiring chunks_mtx even though it might not
@@ -221,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad,
         * held.
         */
        xnode = base_node_alloc();
+       /* Use xprev to implement conditional deferred deallocation of prev. */
+       xprev = NULL;
 
        malloc_mutex_lock(&chunks_mtx);
        key.addr = (void *)((uintptr_t)chunk + size);
@@ -237,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad,
                node->size += size;
                node->zeroed = (node->zeroed && (unzeroed == false));
                extent_tree_szad_insert(chunks_szad, node);
-               if (xnode != NULL)
-                       base_node_dealloc(xnode);
        } else {
                /* Coalescing forward failed, so insert a new node. */
                if (xnode == NULL) {
@@ -248,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad,
                         * already been purged, so this is only a virtual
                         * memory leak.
                         */
-                       malloc_mutex_unlock(&chunks_mtx);
-                       return;
+                       goto label_return;
                }
                node = xnode;
+               xnode = NULL; /* Prevent deallocation below. */
                node->addr = chunk;
                node->size = size;
                node->zeroed = (unzeroed == false);
@@ -277,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad,
                node->zeroed = (node->zeroed && prev->zeroed);
                extent_tree_szad_insert(chunks_szad, node);
 
-               base_node_dealloc(prev);
+               xprev = prev;
        }
+
+label_return:
        malloc_mutex_unlock(&chunks_mtx);
+       /*
+        * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
+        * avoid potential deadlock.
+        */
+       if (xnode != NULL)
+               base_node_dealloc(xnode);
+       if (xprev != NULL)
+               base_node_dealloc(prev);
 }
 
 void

Modified: head/contrib/jemalloc/src/chunk_dss.c
==============================================================================
--- head/contrib/jemalloc/src/chunk_dss.c       Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/src/chunk_dss.c       Mon Jun  3 14:36:28 2013        
(r251300)
@@ -127,7 +127,6 @@ chunk_alloc_dss(size_t size, size_t alig
                                if (*zero) {
                                        VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
                                        memset(ret, 0, size);
-                                       VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
                                }
                                return (ret);
                        }

Modified: head/contrib/jemalloc/src/jemalloc.c
==============================================================================
--- head/contrib/jemalloc/src/jemalloc.c        Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/src/jemalloc.c        Mon Jun  3 14:36:28 2013        
(r251300)
@@ -286,12 +286,30 @@ arenas_cleanup(void *arg)
        malloc_mutex_unlock(&arenas_lock);
 }
 
+static JEMALLOC_ATTR(always_inline) void
+malloc_thread_init(void)
+{
+
+       /*
+        * TSD initialization can't be safely done as a side effect of
+        * deallocation, because it is possible for a thread to do nothing but
+        * deallocate its TLS data via free(), in which case writing to TLS
+        * would cause write-after-free memory corruption.  The quarantine
+        * facility *only* gets used as a side effect of deallocation, so make
+        * a best effort attempt at initializing its TSD by hooking all
+        * allocation events.
+        */
+       if (config_fill && opt_quarantine)
+               quarantine_alloc_hook();
+}
+
 static JEMALLOC_ATTR(always_inline) bool
 malloc_init(void)
 {
 
-       if (malloc_initialized == false)
-               return (malloc_init_hard());
+       if (malloc_initialized == false && malloc_init_hard())
+               return (true);
+       malloc_thread_init();
 
        return (false);
 }
@@ -1100,6 +1118,7 @@ je_realloc(void *ptr, size_t size)
        if (size == 0) {
                if (ptr != NULL) {
                        /* realloc(ptr, 0) is equivalent to free(p). */
+                       assert(malloc_initialized || IS_INITIALIZER);
                        if (config_prof) {
                                old_size = isalloc(ptr, true);
                                if (config_valgrind && opt_valgrind)
@@ -1125,6 +1144,7 @@ je_realloc(void *ptr, size_t size)
 
        if (ptr != NULL) {
                assert(malloc_initialized || IS_INITIALIZER);
+               malloc_thread_init();
 
                if (config_prof) {
                        old_size = isalloc(ptr, true);
@@ -1328,6 +1348,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SI
        size_t ret;
 
        assert(malloc_initialized || IS_INITIALIZER);
+       malloc_thread_init();
 
        if (config_ivsalloc)
                ret = ivsalloc(ptr, config_prof);
@@ -1502,6 +1523,7 @@ je_rallocm(void **ptr, size_t *rsize, si
        assert(size != 0);
        assert(SIZE_T_MAX - size >= extra);
        assert(malloc_initialized || IS_INITIALIZER);
+       malloc_thread_init();
 
        if (arena_ind != UINT_MAX) {
                arena_chunk_t *chunk;
@@ -1616,6 +1638,7 @@ je_sallocm(const void *ptr, size_t *rsiz
        size_t sz;
 
        assert(malloc_initialized || IS_INITIALIZER);
+       malloc_thread_init();
 
        if (config_ivsalloc)
                sz = ivsalloc(ptr, config_prof);
@@ -1735,12 +1758,12 @@ _malloc_prefork(void)
 
        /* Acquire all mutexes in a safe order. */
        ctl_prefork();
+       prof_prefork();
        malloc_mutex_prefork(&arenas_lock);
        for (i = 0; i < narenas_total; i++) {
                if (arenas[i] != NULL)
                        arena_prefork(arenas[i]);
        }
-       prof_prefork();
        chunk_prefork();
        base_prefork();
        huge_prefork();
@@ -1766,12 +1789,12 @@ _malloc_postfork(void)
        huge_postfork_parent();
        base_postfork_parent();
        chunk_postfork_parent();
-       prof_postfork_parent();
        for (i = 0; i < narenas_total; i++) {
                if (arenas[i] != NULL)
                        arena_postfork_parent(arenas[i]);
        }
        malloc_mutex_postfork_parent(&arenas_lock);
+       prof_postfork_parent();
        ctl_postfork_parent();
 }
 
@@ -1786,12 +1809,12 @@ jemalloc_postfork_child(void)
        huge_postfork_child();
        base_postfork_child();
        chunk_postfork_child();
-       prof_postfork_child();
        for (i = 0; i < narenas_total; i++) {
                if (arenas[i] != NULL)
                        arena_postfork_child(arenas[i]);
        }
        malloc_mutex_postfork_child(&arenas_lock);
+       prof_postfork_child();
        ctl_postfork_child();
 }
 

Modified: head/contrib/jemalloc/src/prof.c
==============================================================================
--- head/contrib/jemalloc/src/prof.c    Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/src/prof.c    Mon Jun  3 14:36:28 2013        
(r251300)
@@ -438,7 +438,7 @@ prof_lookup(prof_bt_t *bt)
 
        cassert(config_prof);
 
-       prof_tdata = prof_tdata_get();
+       prof_tdata = prof_tdata_get(false);
        if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
                return (NULL);
 
@@ -684,7 +684,7 @@ prof_ctx_destroy(prof_ctx_t *ctx)
         * avoid a race between the main body of prof_ctx_merge() and entry
         * into this function.
         */
-       prof_tdata = *prof_tdata_tsd_get();
+       prof_tdata = prof_tdata_get(false);
        assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
        prof_enter(prof_tdata);
        malloc_mutex_lock(ctx->lock);
@@ -844,7 +844,7 @@ prof_dump(bool propagate_err, const char
 
        cassert(config_prof);
 
-       prof_tdata = prof_tdata_get();
+       prof_tdata = prof_tdata_get(false);
        if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
                return (true);
        prof_enter(prof_tdata);
@@ -966,11 +966,7 @@ prof_idump(void)
 
        if (prof_booted == false)
                return;
-       /*
-        * Don't call prof_tdata_get() here, because it could cause recursive
-        * allocation.
-        */
-       prof_tdata = *prof_tdata_tsd_get();
+       prof_tdata = prof_tdata_get(false);
        if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
                return;
        if (prof_tdata->enq) {
@@ -1020,11 +1016,7 @@ prof_gdump(void)
 
        if (prof_booted == false)
                return;
-       /*
-        * Don't call prof_tdata_get() here, because it could cause recursive
-        * allocation.
-        */
-       prof_tdata = *prof_tdata_tsd_get();
+       prof_tdata = prof_tdata_get(false);
        if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
                return;
        if (prof_tdata->enq) {

Modified: head/contrib/jemalloc/src/quarantine.c
==============================================================================
--- head/contrib/jemalloc/src/quarantine.c      Mon Jun  3 14:33:10 2013        
(r251299)
+++ head/contrib/jemalloc/src/quarantine.c      Mon Jun  3 14:36:28 2013        
(r251300)
@@ -1,3 +1,4 @@
+#define        JEMALLOC_QUARANTINE_C_
 #include "jemalloc/internal/jemalloc_internal.h"
 
 /*
@@ -11,39 +12,18 @@
 
/******************************************************************************/
 /* Data. */
 
-typedef struct quarantine_obj_s quarantine_obj_t;
-typedef struct quarantine_s quarantine_t;
-
-struct quarantine_obj_s {
-       void    *ptr;
-       size_t  usize;
-};
-
-struct quarantine_s {
-       size_t                  curbytes;
-       size_t                  curobjs;
-       size_t                  first;
-#define        LG_MAXOBJS_INIT 10
-       size_t                  lg_maxobjs;
-       quarantine_obj_t        objs[1]; /* Dynamically sized ring buffer. */
-};
-
-static void    quarantine_cleanup(void *arg);
-
-malloc_tsd_data(static, quarantine, quarantine_t *, NULL)

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to