Date: Wednesday, June 21, 2017 @ 07:08:48
  Author: eworm
Revision: 299098

upgpkg: jemalloc 1:5.0.0-3

Back to 5.0.0, include some extra patches (FS#54483: [jemalloc] Crashes QEMU)

https://bugs.archlinux.org/task/54483
https://github.com/jemalloc/jemalloc/issues/915

Added:
  jemalloc/trunk/0002-clear-tcache_ql-after-fork-in-child.patch
  jemalloc/trunk/0003-Add-minimal-initialized-tsd.patch
Modified:
  jemalloc/trunk/PKGBUILD

------------------------------------------------+
 0002-clear-tcache_ql-after-fork-in-child.patch |  194 +++++++++++++++++++++++
 0003-Add-minimal-initialized-tsd.patch         |   37 ++++
 PKGBUILD                                       |   21 ++
 3 files changed, 248 insertions(+), 4 deletions(-)

Added: 0002-clear-tcache_ql-after-fork-in-child.patch
===================================================================
--- 0002-clear-tcache_ql-after-fork-in-child.patch                              
(rev 0)
+++ 0002-clear-tcache_ql-after-fork-in-child.patch      2017-06-21 07:08:48 UTC 
(rev 299098)
@@ -0,0 +1,194 @@
+From 9b1befabbb7a7105501d27843873d14e1c2de54b Mon Sep 17 00:00:00 2001
+From: Qi Wang <[email protected]>
+Date: Thu, 15 Jun 2017 16:53:22 -0700
+Subject: [PATCH] Add minimal initialized TSD.
+
+We use the minimal_initilized tsd (which requires no cleanup) for free()
+specifically, if tsd hasn't been initialized yet.
+
+Any other activity will transit the state from minimal to normal.  This is to
+workaround the case where a thread has no malloc calls in its lifetime until
+during thread termination, free() happens after tls destructors.
+---
+ include/jemalloc/internal/tsd.h | 30 ++++++++++++++++++++--------
+ src/jemalloc.c                  | 10 +++++++++-
+ src/tsd.c                       | 44 +++++++++++++++++++++++++++--------------
+ 3 files changed, 60 insertions(+), 24 deletions(-)
+
+diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h
+index 631fbf1f..155a2ec6 100644
+--- a/include/jemalloc/internal/tsd.h
++++ b/include/jemalloc/internal/tsd.h
+@@ -99,9 +99,10 @@ enum {
+       tsd_state_nominal_slow = 1, /* Initialized but on slow path. */
+       /* the above 2 nominal states should be lower values. */
+       tsd_state_nominal_max = 1, /* used for comparison only. */
+-      tsd_state_purgatory = 2,
+-      tsd_state_reincarnated = 3,
+-      tsd_state_uninitialized = 4
++      tsd_state_minimal_initialized = 2,
++      tsd_state_purgatory = 3,
++      tsd_state_reincarnated = 4,
++      tsd_state_uninitialized = 5
+ };
+ 
+ /* Manually limit tsd_state_t to a single byte. */
+@@ -190,7 +191,8 @@ JEMALLOC_ALWAYS_INLINE t *                                 
        \
+ tsd_##n##p_get(tsd_t *tsd) {                                          \
+       assert(tsd->state == tsd_state_nominal ||                       \
+           tsd->state == tsd_state_nominal_slow ||                     \
+-          tsd->state == tsd_state_reincarnated);                      \
++          tsd->state == tsd_state_reincarnated ||                     \
++          tsd->state == tsd_state_minimal_initialized);               \
+       return tsd_##n##p_get_unsafe(tsd);                              \
+ }
+ MALLOC_TSD
+@@ -225,7 +227,8 @@ MALLOC_TSD
+ #define O(n, t, nt)                                                   \
+ JEMALLOC_ALWAYS_INLINE void                                           \
+ tsd_##n##_set(tsd_t *tsd, t val) {                                    \
+-      assert(tsd->state != tsd_state_reincarnated);                   \
++      assert(tsd->state != tsd_state_reincarnated &&                  \
++          tsd->state != tsd_state_minimal_initialized);               \
+       *tsd_##n##p_get(tsd) = val;                                     \
+ }
+ MALLOC_TSD
+@@ -248,7 +251,7 @@ tsd_fast(tsd_t *tsd) {
+ }
+ 
+ JEMALLOC_ALWAYS_INLINE tsd_t *
+-tsd_fetch_impl(bool init, bool internal) {
++tsd_fetch_impl(bool init, bool minimal) {
+       tsd_t *tsd = tsd_get(init);
+ 
+       if (!init && tsd_get_allocates() && tsd == NULL) {
+@@ -257,7 +260,7 @@ tsd_fetch_impl(bool init, bool internal) {
+       assert(tsd != NULL);
+ 
+       if (unlikely(tsd->state != tsd_state_nominal)) {
+-              return tsd_fetch_slow(tsd, internal);
++              return tsd_fetch_slow(tsd, minimal);
+       }
+       assert(tsd_fast(tsd));
+       tsd_assert_fast(tsd);
+@@ -265,11 +268,22 @@ tsd_fetch_impl(bool init, bool internal) {
+       return tsd;
+ }
+ 
++/* Get a minimal TSD that requires no cleanup.  See comments in free(). */
+ JEMALLOC_ALWAYS_INLINE tsd_t *
+-tsd_internal_fetch(void) {
++tsd_fetch_min(void) {
+       return tsd_fetch_impl(true, true);
+ }
+ 
++/* For internal background threads use only. */
++JEMALLOC_ALWAYS_INLINE tsd_t *
++tsd_internal_fetch(void) {
++      tsd_t *tsd = tsd_fetch_min();
++      /* Use reincarnated state to prevent full initialization. */
++      tsd->state = tsd_state_reincarnated;
++
++      return tsd;
++}
++
+ JEMALLOC_ALWAYS_INLINE tsd_t *
+ tsd_fetch(void) {
+       return tsd_fetch_impl(true, false);
+diff --git a/src/jemalloc.c b/src/jemalloc.c
+index 52c86aa6..c773cc44 100644
+--- a/src/jemalloc.c
++++ b/src/jemalloc.c
+@@ -2264,7 +2264,15 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+ je_free(void *ptr) {
+       UTRACE(ptr, 0, 0);
+       if (likely(ptr != NULL)) {
+-              tsd_t *tsd = tsd_fetch();
++              /*
++               * We avoid setting up tsd fully (e.g. tcache, arena binding)
++               * based on only free() calls -- other activities trigger the
++               * minimal to full transition.  This is because free() may
++               * happen during thread shutdown after tls deallocation: if a
++               * thread never had any malloc activities until then, a
++               * fully-setup tsd won't be destructed properly.
++               */
++              tsd_t *tsd = tsd_fetch_min();
+               check_entry_exit_locking(tsd_tsdn(tsd));
+ 
+               tcache_t *tcache;
+diff --git a/src/tsd.c b/src/tsd.c
+index 97330332..f968992f 100644
+--- a/src/tsd.c
++++ b/src/tsd.c
+@@ -87,7 +87,8 @@ assert_tsd_data_cleanup_done(tsd_t *tsd) {
+ 
+ static bool
+ tsd_data_init_nocleanup(tsd_t *tsd) {
+-      assert(tsd->state == tsd_state_reincarnated);
++      assert(tsd->state == tsd_state_reincarnated ||
++          tsd->state == tsd_state_minimal_initialized);
+       /*
+        * During reincarnation, there is no guarantee that the cleanup function
+        * will be called (deallocation may happen after all tsd destructors).
+@@ -103,15 +104,8 @@ tsd_data_init_nocleanup(tsd_t *tsd) {
+ }
+ 
+ tsd_t *
+-tsd_fetch_slow(tsd_t *tsd, bool internal) {
+-      if (internal) {
+-              /* For internal background threads use only. */
+-              assert(tsd->state == tsd_state_uninitialized);
+-              tsd->state = tsd_state_reincarnated;
+-              tsd_set(tsd);
+-              tsd_data_init_nocleanup(tsd);
+-              return tsd;
+-      }
++tsd_fetch_slow(tsd_t *tsd, bool minimal) {
++      assert(!tsd_fast(tsd));
+ 
+       if (tsd->state == tsd_state_nominal_slow) {
+               /* On slow path but no work needed. */
+@@ -119,11 +113,28 @@ tsd_fetch_slow(tsd_t *tsd, bool internal) {
+                   tsd_reentrancy_level_get(tsd) > 0 ||
+                   *tsd_arenas_tdata_bypassp_get(tsd));
+       } else if (tsd->state == tsd_state_uninitialized) {
+-              tsd->state = tsd_state_nominal;
+-              tsd_slow_update(tsd);
+-              /* Trigger cleanup handler registration. */
+-              tsd_set(tsd);
+-              tsd_data_init(tsd);
++              if (!minimal) {
++                      tsd->state = tsd_state_nominal;
++                      tsd_slow_update(tsd);
++                      /* Trigger cleanup handler registration. */
++                      tsd_set(tsd);
++                      tsd_data_init(tsd);
++              } else {
++                      tsd->state = tsd_state_minimal_initialized;
++                      tsd_set(tsd);
++                      tsd_data_init_nocleanup(tsd);
++              }
++      } else if (tsd->state == tsd_state_minimal_initialized) {
++              if (!minimal) {
++                      /* Switch to fully initialized. */
++                      tsd->state = tsd_state_nominal;
++                      assert(*tsd_reentrancy_levelp_get(tsd) >= 1);
++                      (*tsd_reentrancy_levelp_get(tsd))--;
++                      tsd_slow_update(tsd);
++                      tsd_data_init(tsd);
++              } else {
++                      assert_tsd_data_cleanup_done(tsd);
++              }
+       } else if (tsd->state == tsd_state_purgatory) {
+               tsd->state = tsd_state_reincarnated;
+               tsd_set(tsd);
+@@ -197,6 +208,9 @@ tsd_cleanup(void *arg) {
+       case tsd_state_uninitialized:
+               /* Do nothing. */
+               break;
++      case tsd_state_minimal_initialized:
++              /* This implies the thread only did free() in its life time. */
++              /* Fall through. */
+       case tsd_state_reincarnated:
+               /*
+                * Reincarnated means another destructor deallocated memory

Added: 0003-Add-minimal-initialized-tsd.patch
===================================================================
--- 0003-Add-minimal-initialized-tsd.patch                              (rev 0)
+++ 0003-Add-minimal-initialized-tsd.patch      2017-06-21 07:08:48 UTC (rev 
299098)
@@ -0,0 +1,37 @@
+From d35c037e03e1450794dcf595e49a1e1f97f87ac4 Mon Sep 17 00:00:00 2001
+From: Qi Wang <[email protected]>
+Date: Mon, 19 Jun 2017 21:19:15 -0700
+Subject: [PATCH] Clear tcache_ql after fork in child.
+
+---
+ src/arena.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/src/arena.c b/src/arena.c
+index 019dd877..d401808b 100644
+--- a/src/arena.c
++++ b/src/arena.c
+@@ -2133,6 +2133,23 @@ void
+ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
+       unsigned i;
+ 
++      atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
++      atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
++      if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
++              arena_nthreads_inc(arena, false);
++      }
++      if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
++              arena_nthreads_inc(arena, true);
++      }
++      if (config_stats) {
++              ql_new(&arena->tcache_ql);
++              tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
++              if (tcache != NULL && tcache->arena == arena) {
++                      ql_elm_new(tcache, link);
++                      ql_tail_insert(&arena->tcache_ql, tcache, link);
++              }
++      }
++
+       for (i = 0; i < NBINS; i++) {
+               malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
+       }

Modified: PKGBUILD
===================================================================
--- PKGBUILD    2017-06-21 06:36:57 UTC (rev 299097)
+++ PKGBUILD    2017-06-21 07:08:48 UTC (rev 299098)
@@ -5,8 +5,8 @@
 
 pkgname=jemalloc
 epoch=1
-pkgver=4.5.0
-pkgrel=1
+pkgver=5.0.0
+pkgrel=3
 pkgdesc='General-purpose scalable concurrent malloc implementation'
 arch=('i686' 'x86_64')
 license=('BSD')
@@ -14,9 +14,22 @@
 depends=('glibc')
 provides=('libjemalloc.so')
 optdepends=('perl: for jeprof')
-source=("https://github.com/jemalloc/jemalloc/releases/download/${pkgver}/${pkgname}-${pkgver}.tar.bz2";)
-sha256sums=('9409d85664b4f135b77518b0b118c549009dc10f6cba14557d170476611f6780')
+source=("https://github.com/jemalloc/jemalloc/releases/download/${pkgver}/${pkgname}-${pkgver}.tar.bz2";
+        '0001-only-abort-on-dlsym-when-necessary.patch'
+        '0002-clear-tcache_ql-after-fork-in-child.patch'
+        '0003-Add-minimal-initialized-tsd.patch')
+sha256sums=('9e4a9efba7dc4a7696f247c90c3fe89696de5f910f7deacf7e22ec521b1fa810'
+            'ef8b3afd9f7e8ee871bf6b228b0f9288881f6cc0243478bab727ba02eb2776e0'
+            'd627e0cf2b540bdeea3a4cd15c28b949faa30064eb7f53da11aa9a428732bb63'
+            'bb36a2802e5f78fc0a1f3c0ddf334d01bb04116ba712f1fd05eb47231af094c6')
 
+prepare() {
+  cd $pkgname-$pkgver
+  patch -Np1 < "$srcdir"/0001-only-abort-on-dlsym-when-necessary.patch
+  patch -Np1 < "$srcdir"/0002-clear-tcache_ql-after-fork-in-child.patch
+  patch -Np1 < "$srcdir"/0003-Add-minimal-initialized-tsd.patch
+}
+
 build() {
   cd $pkgname-$pkgver
   ./configure --prefix=/usr

Reply via email to