Date: Monday, January 23, 2023 @ 10:23:40
  Author: freswa
Revision: 467230

archrelease: copy trunk to testing-x86_64

Added:
  db5.3/repos/testing-x86_64/
  db5.3/repos/testing-x86_64/PKGBUILD
    (from rev 467229, db5.3/trunk/PKGBUILD)
  db5.3/repos/testing-x86_64/db-5.3.21-memp_stat-upstream-fix.patch
    (from rev 467229, db5.3/trunk/db-5.3.21-memp_stat-upstream-fix.patch)
  db5.3/repos/testing-x86_64/db-5.3.21-mutex_leak.patch
    (from rev 467229, db5.3/trunk/db-5.3.21-mutex_leak.patch)
  db5.3/repos/testing-x86_64/db-5.3.28-atomic_compare_exchange.patch
    (from rev 467229, db5.3/trunk/db-5.3.28-atomic_compare_exchange.patch)
  db5.3/repos/testing-x86_64/db-5.3.28-lemon_hash.patch
    (from rev 467229, db5.3/trunk/db-5.3.28-lemon_hash.patch)
  db5.3/repos/testing-x86_64/db-5.3.28-mmap-high-cpu-usage.patch
    (from rev 467229, db5.3/trunk/db-5.3.28-mmap-high-cpu-usage.patch)
  db5.3/repos/testing-x86_64/db-5.3.28_cve-2019-2708.patch
    (from rev 467229, db5.3/trunk/db-5.3.28_cve-2019-2708.patch)

-----------------------------------------+
 PKGBUILD                                |   72 +++
 db-5.3.21-memp_stat-upstream-fix.patch  |  183 +++++++
 db-5.3.21-mutex_leak.patch              |  718 ++++++++++++++++++++++++++++++
 db-5.3.28-atomic_compare_exchange.patch |   20 
 db-5.3.28-lemon_hash.patch              |   20 
 db-5.3.28-mmap-high-cpu-usage.patch     |   19 
 db-5.3.28_cve-2019-2708.patch           |  694 ++++++++++++++++++++++++++++
 7 files changed, 1726 insertions(+)

Copied: db5.3/repos/testing-x86_64/PKGBUILD (from rev 467229, 
db5.3/trunk/PKGBUILD)
===================================================================
--- testing-x86_64/PKGBUILD                             (rev 0)
+++ testing-x86_64/PKGBUILD     2023-01-23 10:23:40 UTC (rev 467230)
@@ -0,0 +1,72 @@
+# Maintainer: Frederik Schwan <freswa at archlinux dot org>
+# Contributor: Stéphane Gaudreault <[email protected]>
+# Contributor: Allan McRae <[email protected]>
+# Contributor: Andreas Radke <[email protected]>
+
+pkgname=db5.3
+pkgver=5.3.28
+pkgrel=3
+pkgdesc="The Berkeley DB embedded database system v5.3"
+arch=(x86_64)
+url='https://www.oracle.com/technology/software/products/berkeley-db/index.html'
+license=(custom:sleepycat)
+depends=(gcc-libs sh)
+provides=(libdb-5.3.so)
+source=(
+  https://download.oracle.com/berkeley-db/db-${pkgver}.tar.gz
+  db-5.3.21-memp_stat-upstream-fix.patch
+  db-5.3.21-mutex_leak.patch
+  db-5.3.28-lemon_hash.patch
+  db-5.3.28_cve-2019-2708.patch
+  db-5.3.28-mmap-high-cpu-usage.patch
+  db-5.3.28-atomic_compare_exchange.patch
+)
+b2sums=('5b77a4ec0945fa96ce6c2b452fb4b029295e037fa64d5ff8dcb3b5e01ee3501fd75d9a73b8f5fa54d8907717d93546fbb5348ee70119e877fa2f17d301ecf50d'
+        
'cfbb212c60c0e75c0d8c1cdbee9bcf0be9aec718ab462f779d5b880a4f976de1423279adde20159eef067be95d0d725eaa0cb950d0a8aaae720de78b94299e72'
+        
'ba917cf9b69de39337b7141bf145153ae89849921c79b4d2f40734feefe2c0007abcc8c85ed776227875c5102354d8f61069fcee12c7db3b45c4ccabc6993760'
+        
'682539a382f4423ad4404ad2aa446356d97485a7883a26d31cc2061611b138f20e39d941c575b8997538c4dd609e3e0343505759bba3b89c669ad1830cac1fbf'
+        
'bc72ba9703cdbabbece3480fd36c22fca26f270cc4b8c66f8f19799b5120010dfdf53772a94374e45d38b25bb8a271d29ea84318aa8bc847a88940d52ee22a8f'
+        
'bc50b556653a4e33073ed4cbaf716035f0840af2659bb4d6ef900621f92515d9490a698cec70546bd3aededa02b264fcd6e6cba3c4e6412125d2d587094f872d'
+        
'c32c5236d5f1c9c112e5c6d7aee8d1d67e4f80c3826020ba71d864649bd12f2402a6d68334cfbf1bc17c920878bf64296d884436608a5b3fb9a27ba1fd4d9c75')
+
+# All Patches retrieved from Fedora
+prepare() {
+  cd db-${pkgver}
+  # memp_stat fix provided by upstream
+  patch -Np1 -i ../db-5.3.21-memp_stat-upstream-fix.patch
+  # fix for mutexes not being released provided by upstream
+  patch -Np1 -i ../db-5.3.21-mutex_leak.patch
+  # fix for overflowing hash variable inside bundled lemon
+  patch -Np1 -i ../db-5.3.28-lemon_hash.patch
+  # cve-2019-2708 fixed by mmuzila
+  patch -Np1 -i ../db-5.3.28_cve-2019-2708.patch
+  # Prevents high CPU usage
+  patch -Np1 -i ../db-5.3.28-mmap-high-cpu-usage.patch
+  # gcc fix
+  patch -Np1 -i ../db-5.3.28-atomic_compare_exchange.patch
+}
+
+
+build() {
+  cd db-${pkgver}/build_unix
+  ../dist/configure \
+    --prefix=/usr \
+    --bindir=/usr/bin/db5.3 \
+    --includedir=/usr/include/db5.3 \
+    --enable-compat185 \
+    --enable-shared \
+    --enable-static \
+    --enable-cxx \
+    --enable-dbm \
+    --enable-stl
+  make LIBSO_LIBS=-lpthread
+}
+
+package() {
+  make -C db-${pkgver}/build_unix DESTDIR="${pkgdir}" install
+  install -Dm644 db-${pkgver}/LICENSE 
"${pkgdir}"/usr/share/licenses/${pkgname}/LICENSE
+  rm "${pkgdir}"/usr/lib/libdb.so
+  rm "${pkgdir}"/usr/lib/libdb_cxx.so
+  rm "${pkgdir}"/usr/lib/libdb_stl.so
+  rm -r "${pkgdir}"/usr/docs
+}

Copied: db5.3/repos/testing-x86_64/db-5.3.21-memp_stat-upstream-fix.patch (from 
rev 467229, db5.3/trunk/db-5.3.21-memp_stat-upstream-fix.patch)
===================================================================
--- testing-x86_64/db-5.3.21-memp_stat-upstream-fix.patch                       
        (rev 0)
+++ testing-x86_64/db-5.3.21-memp_stat-upstream-fix.patch       2023-01-23 
10:23:40 UTC (rev 467230)
@@ -0,0 +1,183 @@
+diff -r -u db-5.3.21_orig/src/mp/mp_stat.c db-5.3.21/src/mp/mp_stat.c
+--- db-5.3.21_orig/src/mp/mp_stat.c    2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_stat.c 2015-05-19 15:07:09.000000000 +0800
+@@ -87,6 +87,13 @@
+       u_int32_t i;
+       uintmax_t tmp_wait, tmp_nowait;
+ 
++      /*
++       * The array holding the lengths related to the buffer allocated for 
*fspp.
++       * The first element of the array holds the number of entries allocated.
++       * The second element of the array holds the total number of bytes 
allocated.
++       */
++      u_int32_t fsp_len[2];
++
+       dbmp = env->mp_handle;
+       mp = dbmp->reginfo[0].primary;
+ 
+@@ -193,32 +200,53 @@
+       if (fspp != NULL) {
+               *fspp = NULL;
+ 
+-              /* Count the MPOOLFILE structures. */
+-              i = 0;
+-              len = 0;
+-              if ((ret = __memp_walk_files(env,
+-                   mp, __memp_count_files, &len, &i, flags)) != 0)
+-                      return (ret);
++              while (*fspp == NULL) {
++                      /* Count the MPOOLFILE structures. */
++                      i = 0;
++                      /*
++                       * Allow space for the first __memp_get_files() to 
align the
++                       * structure array to uintmax_t, DB_MPOOL_STAT's most
++                       * restrictive field.  [#23150]
++                       */
++                      len = sizeof(uintmax_t);
++                      if ((ret = __memp_walk_files(env,
++                           mp, __memp_count_files, &len, &i, flags)) != 0)
++                              return (ret);
++
++                      if (i == 0)
++                              return (0);
++
++                      /* 
++                       * Copy the number of DB_MPOOL_FSTAT entries and the 
number of
++                       * bytes allocated for them into fsp_len. Do not count 
the space
++                       * reserved for allignment.
++                       */
++                      fsp_len[0] = i;
++                      fsp_len[1] = len - sizeof(uintmax_t);
+ 
+-              if (i == 0)
+-                      return (0);
+-              len += sizeof(DB_MPOOL_FSTAT *);        /* Trailing NULL */
++                      len += sizeof(DB_MPOOL_FSTAT *);        /* Trailing 
NULL */
+ 
+-              /* Allocate space */
+-              if ((ret = __os_umalloc(env, len, fspp)) != 0)
+-                      return (ret);
++                      /* Allocate space */
++                      if ((ret = __os_umalloc(env, len, fspp)) != 0)
++                              return (ret);
+ 
+-              tfsp = *fspp;
+-              *tfsp = NULL;
+-
+-              /*
+-               * Files may have been opened since we counted, don't walk
+-               * off the end of the allocated space.
+-               */
+-              if ((ret = __memp_walk_files(env,
+-                  mp, __memp_get_files, &tfsp, &i, flags)) != 0)
+-                      return (ret);
++                      tfsp = *fspp;
++                      *tfsp = NULL;
+ 
++                      /*
++                       * Files may have been opened since we counted, if we 
walk off
++                       * the end of the allocated space specified in fsp_len, 
retry.
++                       */
++                      if ((ret = __memp_walk_files(env,
++                          mp, __memp_get_files, &tfsp, fsp_len, flags)) != 0) 
{
++                              if (ret == DB_BUFFER_SMALL) {
++                                      __os_ufree(env, *fspp);
++                                      *fspp = NULL;
++                                      tfsp = NULL;
++                              } else
++                                      return (ret);
++                      }
++              }
+               *++tfsp = NULL;
+       }
+ 
+@@ -286,28 +314,35 @@
+  * for the text file names.
+  */
+ static int
+-__memp_get_files(env, mfp, argp, countp, flags)
++__memp_get_files(env, mfp, argp, fsp_len, flags)
+       ENV *env;
+       MPOOLFILE *mfp;
+       void *argp;
+-      u_int32_t *countp;
++      u_int32_t fsp_len[];
+       u_int32_t flags;
+ {
+       DB_MPOOL *dbmp;
+       DB_MPOOL_FSTAT **tfsp, *tstruct;
+       char *name, *tname;
+-      size_t nlen;
++      size_t nlen, tlen;
+ 
+-      if (*countp == 0)
+-              return (0);
++      /* We walked through more files than argp was allocated for. */
++      if (fsp_len[0] == 0)
++              return DB_BUFFER_SMALL;
+ 
+       dbmp = env->mp_handle;
+       tfsp = *(DB_MPOOL_FSTAT ***)argp;
+ 
+       if (*tfsp == NULL) {
+-              /* Add 1 to count because we need to skip over the NULL. */
+-              tstruct = (DB_MPOOL_FSTAT *)(tfsp + *countp + 1);
+-              tname = (char *)(tstruct + *countp);
++              /*
++               * Add 1 to count because to skip over the NULL end marker.
++               * Align it further for DB_MPOOL_STAT's most restrictive field
++               * because uintmax_t might require stricter alignment than
++               * pointers; e.g., IP32 LL64 SPARC. [#23150]
++               */
++              tstruct = (DB_MPOOL_FSTAT *)&tfsp[fsp_len[0] + 1];
++              tstruct = ALIGNP_INC(tstruct, sizeof(uintmax_t));
++              tname = (char *)&tstruct[fsp_len[0]];
+               *tfsp = tstruct;
+       } else {
+               tstruct = *tfsp + 1;
+@@ -317,6 +352,15 @@
+ 
+       name = __memp_fns(dbmp, mfp);
+       nlen = strlen(name) + 1;
++
++      /* The space required for file names is larger than argp was allocated 
for. */
++      tlen = sizeof(DB_MPOOL_FSTAT *) + sizeof(DB_MPOOL_FSTAT) + nlen;
++      if (fsp_len[1] < tlen)
++              return DB_BUFFER_SMALL;
++      else
++              /* Count down the number of bytes left in argp. */
++              fsp_len[1] -= tlen;
++
+       memcpy(tname, name, nlen);
+       memcpy(tstruct, &mfp->stat, sizeof(mfp->stat));
+       tstruct->file_name = tname;
+@@ -325,7 +369,9 @@
+       tstruct->st_pagesize = mfp->pagesize;
+ 
+       *(DB_MPOOL_FSTAT ***)argp = tfsp;
+-      (*countp)--;
++
++      /* Count down the number of entries left in argp. */
++      fsp_len[0]--;
+ 
+       if (LF_ISSET(DB_STAT_CLEAR))
+               memset(&mfp->stat, 0, sizeof(mfp->stat));
+diff -r -u db-5.3.21_orig/src/mp/mp_sync.c db-5.3.21/src/mp/mp_sync.c
+--- db-5.3.21_orig/src/mp/mp_sync.c    2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_sync.c 2015-05-19 15:08:05.000000000 +0800
+@@ -57,11 +57,13 @@
+                       if ((t_ret = func(env,
+                           mfp, arg, countp, flags)) != 0 && ret == 0)
+                               ret = t_ret;
+-                      if (ret != 0 && !LF_ISSET(DB_STAT_MEMP_NOERROR))
++                      if (ret != 0 &&
++                          (!LF_ISSET(DB_STAT_MEMP_NOERROR) || ret == 
DB_BUFFER_SMALL))
+                               break;
+               }
+               MUTEX_UNLOCK(env, hp->mtx_hash);
+-              if (ret != 0 && !LF_ISSET(DB_STAT_MEMP_NOERROR))
++              if (ret != 0 &&
++                  (!LF_ISSET(DB_STAT_MEMP_NOERROR) || ret == DB_BUFFER_SMALL))
+                       break;
+       }
+       return (ret);

Copied: db5.3/repos/testing-x86_64/db-5.3.21-mutex_leak.patch (from rev 467229, 
db5.3/trunk/db-5.3.21-mutex_leak.patch)
===================================================================
--- testing-x86_64/db-5.3.21-mutex_leak.patch                           (rev 0)
+++ testing-x86_64/db-5.3.21-mutex_leak.patch   2023-01-23 10:23:40 UTC (rev 
467230)
@@ -0,0 +1,718 @@
+diff -U 5 -r db-5.3.21.old/src/dbinc_auto/int_def.in 
db-5.3.21/src/dbinc_auto/int_def.in
+--- db-5.3.21.old/src/dbinc_auto/int_def.in    2012-05-12 01:57:53.000000000 
+0800
++++ db-5.3.21/src/dbinc_auto/int_def.in        2016-10-25 22:40:58.000000000 
+0800
+@@ -1371,10 +1371,11 @@
+ #define       __memp_failchk __memp_failchk@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_bhwrite __memp_bhwrite@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_pgread __memp_pgread@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_pg __memp_pg@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_bhfree __memp_bhfree@DB_VERSION_UNIQUE_NAME@
++#define       __memp_bh_clear_dirty 
__memp_bh_clear_dirty@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_fget_pp __memp_fget_pp@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_fget __memp_fget@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_fcreate_pp __memp_fcreate_pp@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_fcreate __memp_fcreate@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_set_clear_len 
__memp_set_clear_len@DB_VERSION_UNIQUE_NAME@
+@@ -1395,10 +1396,11 @@
+ #define       __memp_fopen __memp_fopen@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_fclose_pp __memp_fclose_pp@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_fclose __memp_fclose@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_mf_discard __memp_mf_discard@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_inmemlist __memp_inmemlist@DB_VERSION_UNIQUE_NAME@
++#define       __memp_mf_mark_dead __memp_mf_mark_dead@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_fput_pp __memp_fput_pp@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_fput __memp_fput@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_unpin_buffers 
__memp_unpin_buffers@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_dirty __memp_dirty@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_shared __memp_shared@DB_VERSION_UNIQUE_NAME@
+@@ -1453,10 +1455,11 @@
+ #define       __memp_fsync_pp __memp_fsync_pp@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_fsync __memp_fsync@DB_VERSION_UNIQUE_NAME@
+ #define       __mp_xxx_fh __mp_xxx_fh@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_sync_int __memp_sync_int@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_mf_sync __memp_mf_sync@DB_VERSION_UNIQUE_NAME@
++#define       __memp_purge_dead_files 
__memp_purge_dead_files@DB_VERSION_UNIQUE_NAME@
+ #define       __memp_trickle_pp __memp_trickle_pp@DB_VERSION_UNIQUE_NAME@
+ #define       __mutex_alloc __mutex_alloc@DB_VERSION_UNIQUE_NAME@
+ #define       __mutex_alloc_int __mutex_alloc_int@DB_VERSION_UNIQUE_NAME@
+ #define       __mutex_free __mutex_free@DB_VERSION_UNIQUE_NAME@
+ #define       __mutex_free_int __mutex_free_int@DB_VERSION_UNIQUE_NAME@
+diff -U 5 -r db-5.3.21.old/src/dbinc_auto/mp_ext.h 
db-5.3.21/src/dbinc_auto/mp_ext.h
+--- db-5.3.21.old/src/dbinc_auto/mp_ext.h      2012-05-12 01:57:53.000000000 
+0800
++++ db-5.3.21/src/dbinc_auto/mp_ext.h  2016-10-25 22:40:58.000000000 +0800
+@@ -14,10 +14,11 @@
+ int __memp_failchk __P((ENV *));
+ int __memp_bhwrite __P((DB_MPOOL *, DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
+ int __memp_pgread __P((DB_MPOOLFILE *, BH *, int));
+ int __memp_pg __P((DB_MPOOLFILE *, db_pgno_t, void *, int));
+ int __memp_bhfree __P((DB_MPOOL *, REGINFO *, MPOOLFILE *, DB_MPOOL_HASH *, 
BH *, u_int32_t));
++void __memp_bh_clear_dirty __P((ENV*, DB_MPOOL_HASH *, BH *));
+ int __memp_fget_pp __P((DB_MPOOLFILE *, db_pgno_t *, DB_TXN *, u_int32_t, 
void *));
+ int __memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, DB_THREAD_INFO *, DB_TXN *, 
u_int32_t, void *));
+ int __memp_fcreate_pp __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ int __memp_fcreate __P((ENV *, DB_MPOOLFILE **));
+ int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t));
+@@ -38,10 +39,11 @@
+ int __memp_fopen __P((DB_MPOOLFILE *, MPOOLFILE *, const char *, const char 
**, u_int32_t, int, size_t));
+ int __memp_fclose_pp __P((DB_MPOOLFILE *, u_int32_t));
+ int __memp_fclose __P((DB_MPOOLFILE *, u_int32_t));
+ int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *, int));
+ int __memp_inmemlist __P((ENV *, char ***, int *));
++void __memp_mf_mark_dead __P((DB_MPOOL *, MPOOLFILE *, int*));
+ int __memp_fput_pp __P((DB_MPOOLFILE *, void *, DB_CACHE_PRIORITY, 
u_int32_t));
+ int __memp_fput __P((DB_MPOOLFILE *, DB_THREAD_INFO *, void *, 
DB_CACHE_PRIORITY));
+ int __memp_unpin_buffers __P((ENV *, DB_THREAD_INFO *));
+ int __memp_dirty __P((DB_MPOOLFILE *, void *, DB_THREAD_INFO *, DB_TXN *, 
DB_CACHE_PRIORITY, u_int32_t));
+ int __memp_shared __P((DB_MPOOLFILE *, void *));
+@@ -96,10 +98,11 @@
+ int __memp_fsync_pp __P((DB_MPOOLFILE *));
+ int __memp_fsync __P((DB_MPOOLFILE *));
+ int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
+ int __memp_sync_int __P((ENV *, DB_MPOOLFILE *, u_int32_t, u_int32_t, 
u_int32_t *, int *));
+ int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *, int));
++int __memp_purge_dead_files __P((ENV *));
+ int __memp_trickle_pp __P((DB_ENV *, int, int *));
+ 
+ #if defined(__cplusplus)
+ }
+ #endif
+diff -U 5 -r db-5.3.21.old/src/mp/mp_bh.c db-5.3.21/src/mp/mp_bh.c
+--- db-5.3.21.old/src/mp/mp_bh.c       2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_bh.c   2016-10-25 17:09:35.000000000 +0800
+@@ -472,15 +472,12 @@
+        * a shared latch.
+        */
+       if (F_ISSET(bhp, BH_DIRTY | BH_TRASH)) {
+               MUTEX_LOCK(env, hp->mtx_hash);
+               DB_ASSERT(env, !SH_CHAIN_HASNEXT(bhp, vc));
+-              if (ret == 0 && F_ISSET(bhp, BH_DIRTY)) {
+-                      F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
+-                      DB_ASSERT(env, atomic_read(&hp->hash_page_dirty) > 0);
+-                      atomic_dec(env, &hp->hash_page_dirty);
+-              }
++              if (ret == 0)
++                      __memp_bh_clear_dirty(env, hp, bhp);
+ 
+               /* put the page back if necessary. */
+               if ((ret != 0 || BH_REFCOUNT(bhp) > 1) &&
+                   F_ISSET(bhp, BH_TRASH)) {
+                       ret = __memp_pg(dbmfp, bhp->pgno, bhp->buf, 1);
+@@ -686,5 +683,31 @@
+       } else
+               MUTEX_UNLOCK(env, mfp->mutex);
+ 
+       return (ret);
+ }
++
++/*
++ * __memp_bh_clear_dirty --
++ *    Clear the dirty flag of of a buffer. Calls on the same buffer must be
++ *    serialized to get the accounting correct. This can be achieved by
++ *    acquiring an exclusive lock on the buffer, a shared lock on the
++ *    buffer plus an exclusive lock on the hash bucket, or some other
++ *    mechanism that guarantees single-thread access to the entire region
++ *    (e.g. during __memp_region_bhfree()).
++ *
++ * PUBLIC: void __memp_bh_clear_dirty __P((ENV*, DB_MPOOL_HASH *, BH *));
++ */
++void
++__memp_bh_clear_dirty(env, hp, bhp)
++      ENV *env;
++      DB_MPOOL_HASH *hp;
++      BH *bhp;
++{
++      COMPQUIET(env, env);
++      if (F_ISSET(bhp, BH_DIRTY)) {
++              F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
++              DB_ASSERT(env, atomic_read(&hp->hash_page_dirty) > 0);
++              (void)atomic_dec(env, &hp->hash_page_dirty);
++      }
++}
++
+diff -U 5 -r db-5.3.21.old/src/mp/mp_fget.c db-5.3.21/src/mp/mp_fget.c
+--- db-5.3.21.old/src/mp/mp_fget.c     2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_fget.c 2016-10-25 17:11:08.000000000 +0800
+@@ -437,16 +437,11 @@
+                * complain and get out.
+                */
+               if (flags == DB_MPOOL_FREE) {
+ freebuf:              MUTEX_LOCK(env, hp->mtx_hash);
+                       h_locked = 1;
+-                      if (F_ISSET(bhp, BH_DIRTY)) {
+-                              F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
+-                              DB_ASSERT(env,
+-                                 atomic_read(&hp->hash_page_dirty) > 0);
+-                              atomic_dec(env, &hp->hash_page_dirty);
+-                      }
++                      __memp_bh_clear_dirty(env, hp, bhp);
+ 
+                       /*
+                        * If the buffer we found is already freed, we're done.
+                        * If the ref count is not 1 then someone may be
+                        * peeking at the buffer.  We cannot free it until they
+diff -U 5 -r db-5.3.21.old/src/mp/mp_fopen.c db-5.3.21/src/mp/mp_fopen.c
+--- db-5.3.21.old/src/mp/mp_fopen.c    2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_fopen.c        2016-10-25 22:31:05.000000000 +0800
+@@ -12,10 +12,11 @@
+ #include "dbinc/log.h"
+ #include "dbinc/mp.h"
+ #include "dbinc/db_page.h"
+ #include "dbinc/hash.h"
+ 
++static int __memp_count_dead_mutex __P((DB_MPOOL *, u_int32_t *));
+ static int __memp_mpf_alloc __P((DB_MPOOL *,
+     DB_MPOOLFILE *, const char *, u_int32_t, u_int32_t, MPOOLFILE **));
+ static int __memp_mpf_find __P((ENV *,
+     DB_MPOOLFILE *, DB_MPOOL_HASH *, const char *, u_int32_t, MPOOLFILE **));
+ 
+@@ -709,11 +710,15 @@
+                * We should be able to set mfp to NULL and break out of the
+                * loop, but I like the idea of checking all the entries.
+                */
+               if (LF_ISSET(DB_TRUNCATE)) {
+                       MUTEX_LOCK(env, mfp->mutex);
+-                      mfp->deadfile = 1;
++                      /*
++                       * We cannot purge dead files here, because the caller
++                       * is holding the mutex of the hash bucket of mfp.
++                       */
++                      __memp_mf_mark_dead(dbmp, mfp, NULL);
+                       MUTEX_UNLOCK(env, mfp->mutex);
+                       continue;
+               }
+ 
+               /*
+@@ -907,14 +912,15 @@
+       DB_MPOOL *dbmp;
+       ENV *env;
+       MPOOLFILE *mfp;
+       char *rpath;
+       u_int32_t ref;
+-      int deleted, ret, t_ret;
++      int deleted, purge_dead, ret, t_ret;
+ 
+       env = dbmfp->env;
+       dbmp = env->mp_handle;
++      purge_dead = 0;
+       ret = 0;
+ 
+       /*
+        * Remove the DB_MPOOLFILE from the process' list.
+        *
+@@ -1004,11 +1010,11 @@
+       }
+       DB_ASSERT(env, mfp->neutral_cnt < mfp->mpf_cnt);
+       if (--mfp->mpf_cnt == 0 || LF_ISSET(DB_MPOOL_DISCARD)) {
+               if (LF_ISSET(DB_MPOOL_DISCARD) ||
+                   F_ISSET(mfp, MP_TEMP) || mfp->unlink_on_close) {
+-                      mfp->deadfile = 1;
++                      __memp_mf_mark_dead(dbmp, mfp, &purge_dead);
+               }
+               if (mfp->unlink_on_close) {
+                       if ((t_ret = __db_appname(dbmp->env, DB_APP_DATA,
+                           R_ADDR(dbmp->reginfo, mfp->path_off), NULL,
+                           &rpath)) != 0 && ret == 0)
+@@ -1037,10 +1043,12 @@
+                       deleted = 1;
+               }
+       }
+       if (!deleted && !LF_ISSET(DB_MPOOL_NOLOCK))
+               MUTEX_UNLOCK(env, mfp->mutex);
++      if (purge_dead)
++              (void)__memp_purge_dead_files(env);
+ 
+ done: /* Discard the DB_MPOOLFILE structure. */
+       if (dbmfp->pgcookie != NULL) {
+               __os_free(env, dbmfp->pgcookie->data);
+               __os_free(env, dbmfp->pgcookie);
+@@ -1091,11 +1099,11 @@
+       /*
+        * We have to release the MPOOLFILE mutex before acquiring the region
+        * mutex so we don't deadlock.  Make sure nobody ever looks at this
+        * structure again.
+        */
+-      mfp->deadfile = 1;
++      __memp_mf_mark_dead(dbmp, mfp, NULL);
+ 
+       /* Discard the mutex we're holding and return it too the pool. */
+       MUTEX_UNLOCK(env, mfp->mutex);
+       if ((t_ret = __mutex_free(env, &mfp->mutex)) != 0 && ret == 0)
+               ret = t_ret;
+@@ -1216,5 +1224,106 @@
+       /* Make sure we don't return any garbage. */
+       *cntp = 0;
+       *namesp = NULL;
+       return (ret);
+ }
++
++/*
++ * __memp_mf_mark_dead --
++ *    Mark an MPOOLFILE as dead because its contents are no longer necessary.
++ *    This happens when removing, truncation, or closing an unnamed in-memory
++ *    database. Return, in the purgep parameter, whether the caller should
++ *    call __memp_purge_dead_files() after the lock on mfp is released. The
++ *    caller must hold an exclusive lock on the mfp handle.
++ *
++ * PUBLIC: void __memp_mf_mark_dead __P((DB_MPOOL *, MPOOLFILE *, int*));
++ */
++void
++__memp_mf_mark_dead(dbmp, mfp, purgep)
++      DB_MPOOL *dbmp; 
++      MPOOLFILE *mfp;
++      int *purgep;
++{
++      ENV *env;
++#ifdef HAVE_MUTEX_SUPPORT
++      REGINFO *infop;
++      DB_MUTEXREGION *mtxregion;
++      u_int32_t mutex_max, mutex_inuse, dead_mutex;
++#endif
++
++      if (purgep != NULL)
++              *purgep = 0;
++
++      env = dbmp->env;
++
++#ifdef HAVE_MUTEX_SUPPORT
++      MUTEX_REQUIRED(env, mfp->mutex);
++
++      if (MUTEX_ON(env) && mfp->deadfile == 0) {
++              infop = &env->mutex_handle->reginfo;
++              mtxregion = infop->primary;
++
++              mutex_inuse = mtxregion->stat.st_mutex_inuse;
++              if ((mutex_max = env->dbenv->mutex_max) == 0)
++                      mutex_max = infop->rp->max / mtxregion->mutex_size;
++
++              /*
++               * Purging dead pages requires a full scan of the entire cache
++               * buffer, so it is a slow operation. We only want to do it
++               * when it is necessary and provides enough benefits. Below is
++               * a simple heuristic that determines when to purge all dead
++               * pages.
++               */
++              if (purgep != NULL && mutex_inuse > mutex_max - 200) {
++                      /*
++                       * If the mutex region is almost full and there are
++                       * many mutexes held by dead files, purge dead files.
++                       */
++                      (void)__memp_count_dead_mutex(dbmp, &dead_mutex);
++                      dead_mutex += mfp->block_cnt + 1;
++
++                      if (dead_mutex > mutex_inuse / 20)
++                              *purgep = 1;
++              }
++      }
++#endif
++
++      mfp->deadfile = 1;
++}
++
++/*
++ * __memp_count_dead_mutex --
++ *    Estimate the number of mutexes held by dead files.
++ */
++static int
++__memp_count_dead_mutex(dbmp, dead_mutex)
++      DB_MPOOL *dbmp;
++      u_int32_t *dead_mutex;
++{
++      ENV *env;
++      DB_MPOOL_HASH *hp;
++      MPOOL *mp;
++      MPOOLFILE *mfp;
++      u_int32_t mutex_per_file;
++      int busy, i;
++
++      env = dbmp->env;
++      *dead_mutex = 0;
++      mutex_per_file = 1;
++#ifndef HAVE_ATOMICFILEREAD
++      mutex_per_file = 2;
++#endif
++      mp = dbmp->reginfo[0].primary;
++      hp = R_ADDR(dbmp->reginfo, mp->ftab);
++      for (i = 0; i < MPOOL_FILE_BUCKETS; i++, hp++) {
++              busy = MUTEX_TRYLOCK(env, hp->mtx_hash);
++              if (busy)
++                      continue;
++              SH_TAILQ_FOREACH(mfp, &hp->hash_bucket, q, __mpoolfile) {
++                      if (mfp->deadfile)
++                              *dead_mutex += mfp->block_cnt + mutex_per_file;
++              }
++              MUTEX_UNLOCK(env, hp->mtx_hash);
++      }
++
++      return (0);
++}
+diff -U 5 -r db-5.3.21.old/src/mp/mp_method.c db-5.3.21/src/mp/mp_method.c
+--- db-5.3.21.old/src/mp/mp_method.c   2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_method.c       2016-10-25 17:22:23.000000000 +0800
+@@ -638,11 +638,11 @@
+       DB_MPOOL_HASH *hp, *nhp;
+       MPOOL *mp;
+       MPOOLFILE *mfp;
+       roff_t newname_off;
+       u_int32_t bucket;
+-      int locked, ret;
++      int locked, purge_dead, ret;
+       size_t nlen;
+       void *p;
+ 
+ #undef        op_is_remove
+ #define       op_is_remove    (newname == NULL)
+@@ -655,10 +655,11 @@
+       dbmp = NULL;
+       mfp = NULL;
+       nhp = NULL;
+       p = NULL;
+       locked = ret = 0;
++      purge_dead = 0;
+ 
+       if (!MPOOL_ON(env))
+               goto fsop;
+ 
+       dbmp = env->mp_handle;
+@@ -747,11 +748,11 @@
+                * they do not get reclaimed as long as they exist.  Since we
+                * are now deleting the database, we need to dec that count.
+                */
+               if (mfp->no_backing_file)
+                       mfp->mpf_cnt--;
+-              mfp->deadfile = 1;
++              __memp_mf_mark_dead(dbmp, mfp, &purge_dead);
+               MUTEX_UNLOCK(env, mfp->mutex);
+       } else {
+               /*
+                * Else, it's a rename.  We've allocated memory for the new
+                * name.  Swap it with the old one.  If it's in memory we
+@@ -806,10 +807,16 @@
+       if (locked == 1) {
+               MUTEX_UNLOCK(env, hp->mtx_hash);
+               if (nhp != NULL && nhp != hp)
+                       MUTEX_UNLOCK(env, nhp->mtx_hash);
+       }
++      /* 
++       * __memp_purge_dead_files() must be called when the hash bucket is
++       * unlocked.
++       */
++      if (purge_dead)
++              (void)__memp_purge_dead_files(env);
+       return (ret);
+ }
+ 
+ /*
+  * __memp_ftruncate __
+diff -U 5 -r db-5.3.21.old/src/mp/mp_sync.c db-5.3.21/src/mp/mp_sync.c
+--- db-5.3.21.old/src/mp/mp_sync.c     2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_sync.c 2016-10-25 17:26:58.000000000 +0800
+@@ -24,10 +24,11 @@
+ static int __bhcmp __P((const void *, const void *));
+ static int __memp_close_flush_files __P((ENV *, int));
+ static int __memp_sync_files __P((ENV *));
+ static int __memp_sync_file __P((ENV *,
+               MPOOLFILE *, void *, u_int32_t *, u_int32_t));
++static inline void __update_err_ret(int, int*);
+ 
+ /*
+  * __memp_walk_files --
+  * PUBLIC: int __memp_walk_files __P((ENV *, MPOOL *,
+  * PUBLIC:    int (*) __P((ENV *, MPOOLFILE *, void *,
+@@ -961,5 +962,125 @@
+               return (-1);
+       if (bhp1->track_pgno > bhp2->track_pgno)
+               return (1);
+       return (0);
+ }
++
++/*
++ * __memp_purge_dead_files --
++ *    Remove all dead files and their buffers from the mpool. The caller
++ *    cannot hold any lock on the dead MPOOLFILE handles, their buffers
++ *    or their hash buckets.
++ *
++ * PUBLIC: int __memp_purge_dead_files __P((ENV *));
++ */
++int
++__memp_purge_dead_files(env)
++      ENV *env;
++{
++      BH *bhp;
++      DB_MPOOL *dbmp;
++      DB_MPOOL_HASH *hp, *hp_end;
++      REGINFO *infop;
++      MPOOL *c_mp, *mp;
++      MPOOLFILE *mfp;
++      u_int32_t i_cache;
++      int ret, t_ret, h_lock;
++
++      if (!MPOOL_ON(env))
++              return (0);
++
++      dbmp = env->mp_handle;
++      mp = dbmp->reginfo[0].primary;
++      ret = t_ret = h_lock = 0;
++
++      /*
++       * Walk each cache's list of buffers and free all buffers whose
++       * MPOOLFILE is marked as dead.
++       */
++      for (i_cache = 0; i_cache < mp->nreg; i_cache++) {
++              infop = &dbmp->reginfo[i_cache]; 
++              c_mp = infop->primary;
++
++              hp = R_ADDR(infop, c_mp->htab);
++              hp_end = &hp[c_mp->htab_buckets];
++              for (; hp < hp_end; hp++) {
++                      /* Skip empty buckets. */
++                      if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
++                              continue;
++
++                      /* 
++                       * Search for a dead buffer. Other places that call
++                       * __memp_bhfree() acquire the buffer lock before the
++                       * hash bucket lock. Even though we acquire the two
++                       * locks in reverse order, we cannot deadlock here
++                       * because we don't block waiting for the locks.
++                       */
++                      t_ret = MUTEX_TRYLOCK(env, hp->mtx_hash);
++                      if (t_ret != 0) {
++                              __update_err_ret(t_ret, &ret);
++                              continue;
++                      }
++                      h_lock = 1;
++                      SH_TAILQ_FOREACH(bhp, &hp->hash_bucket, hq, __bh) {
++                              /* Skip buffers that are being used. */
++                              if (BH_REFCOUNT(bhp) > 0)
++                                      continue;
++
++                              mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
++                              if (!mfp->deadfile)
++                                      continue;
++
++                              /* Found a dead buffer. Prepare to free it. */
++                              t_ret = MUTEX_TRYLOCK(env, bhp->mtx_buf);
++                              if (t_ret != 0) {
++                                      __update_err_ret(t_ret, &ret);
++                                      continue;
++                              }
++
++                              DB_ASSERT(env, (!F_ISSET(bhp, BH_EXCLUSIVE) &&
++                                  BH_REFCOUNT(bhp) == 0));
++                              F_SET(bhp, BH_EXCLUSIVE);
++                              (void)atomic_inc(env, &bhp->ref);
++
++                              __memp_bh_clear_dirty(env, hp, bhp);
++
++                              /*
++                               * Free the buffer. The buffer and hash bucket
++                               * are unlocked by __memp_bhfree.
++                               */
++                              if ((t_ret = __memp_bhfree(dbmp, infop, mfp,
++                                  hp, bhp, BH_FREE_FREEMEM)) == 0)
++                                      /*
++                                       * Decrement hp, so the next turn will
++                                       * search the same bucket again.
++                                       */
++                                      hp--;
++                              else
++                                      __update_err_ret(t_ret, &ret);
++
++                              /*
++                               * The hash bucket is unlocked, we need to
++                               * start over again.
++                               */
++                              h_lock = 0;
++                              break;
++                      }
++
++                      if (h_lock) {
++                              MUTEX_UNLOCK(env, hp->mtx_hash);
++                              h_lock = 0;
++                      }
++              }
++      }
++
++      return (ret);
++}
++
++static inline void
++__update_err_ret(t_ret, retp)
++      int t_ret;
++      int *retp;
++{
++      if (t_ret != 0 && t_ret != DB_LOCK_NOTGRANTED && *retp == 0)
++              *retp = t_ret;
++}
+diff -U 5 -r db-5.3.21.old/src/mp/mp_trickle.c db-5.3.21/src/mp/mp_trickle.c
+--- db-5.3.21.old/src/mp/mp_trickle.c  2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_trickle.c      2016-10-25 17:27:57.000000000 +0800
+@@ -65,10 +65,14 @@
+           "DB_ENV->memp_trickle: %d: percent must be between 1 and 100",
+                   "%d"), pct);
+               return (EINVAL);
+       }
+ 
++      /* First we purge all dead files and their buffers. */
++      if ((ret = __memp_purge_dead_files(env)) != 0)
++              return (ret);
++
+       /*
+        * Loop through the caches counting total/dirty buffers.
+        *
+        * XXX
+        * Using hash_page_dirty is our only choice at the moment, but it's not
+diff -U 5 -r db-5.3.21.old/src/mutex/mut_region.c 
db-5.3.21/src/mutex/mut_region.c
+--- db-5.3.21.old/src/mutex/mut_region.c       2012-05-12 01:57:54.000000000 
+0800
++++ db-5.3.21/src/mutex/mut_region.c   2016-10-25 17:34:22.000000000 +0800
+@@ -15,11 +15,11 @@
+ #include "dbinc/txn.h"
+ 
+ static db_size_t __mutex_align_size __P((ENV *));
+ static int __mutex_region_init __P((ENV *, DB_MUTEXMGR *));
+ static size_t __mutex_region_size __P((ENV *));
+-static size_t __mutex_region_max __P((ENV *));
++static size_t __mutex_region_max __P((ENV *, u_int32_t));
+ 
+ /*
+  * __mutex_open --
+  *    Open a mutex region.
+  *
+@@ -32,11 +32,11 @@
+ {
+       DB_ENV *dbenv;
+       DB_MUTEXMGR *mtxmgr;
+       DB_MUTEXREGION *mtxregion;
+       size_t size;
+-      u_int32_t cpu_count;
++      u_int32_t cpu_count, mutex_needed;
+       int ret;
+ #ifndef HAVE_ATOMIC_SUPPORT
+       u_int i;
+ #endif
+ 
+@@ -59,23 +59,24 @@
+                   cpu_count : cpu_count * MUTEX_SPINS_PER_PROCESSOR)) != 0)
+                       return (ret);
+       }
+ 
+       /*
+-       * If the user didn't set an absolute value on the number of mutexes
+-       * we'll need, figure it out.  We're conservative in our allocation,
+-       * we need mutexes for DB handles, group-commit queues and other things
+-       * applications allocate at run-time.  The application may have kicked
+-       * up our count to allocate its own mutexes, add that in.
++       * Figure out the number of mutexes we'll need.  We're conservative in
++       * our allocation, we need mutexes for DB handles, group-commit queues
++       * and other things applications allocate at run-time.  The application
++       * may have kicked up our count to allocate its own mutexes, add that
++       * in.
+        */
++      mutex_needed =
++          __lock_region_mutex_count(env) +
++          __log_region_mutex_count(env) +
++          __memp_region_mutex_count(env) +
++          __txn_region_mutex_count(env);
+       if (dbenv->mutex_cnt == 0 &&
+           F_ISSET(env, ENV_PRIVATE | ENV_THREAD) != ENV_PRIVATE)
+-              dbenv->mutex_cnt =
+-                  __lock_region_mutex_count(env) +
+-                  __log_region_mutex_count(env) +
+-                  __memp_region_mutex_count(env) +
+-                  __txn_region_mutex_count(env);
++              dbenv->mutex_cnt = mutex_needed;
+ 
+       if (dbenv->mutex_max != 0 && dbenv->mutex_cnt > dbenv->mutex_max)
+               dbenv->mutex_cnt = dbenv->mutex_max;
+ 
+       /* Create/initialize the mutex manager structure. */
+@@ -88,12 +89,12 @@
+       mtxmgr->reginfo.id = INVALID_REGION_ID;
+       mtxmgr->reginfo.flags = REGION_JOIN_OK;
+       size = __mutex_region_size(env);
+       if (create_ok)
+               F_SET(&mtxmgr->reginfo, REGION_CREATE_OK);
+-      if ((ret = __env_region_attach(env,
+-          &mtxmgr->reginfo, size, size + __mutex_region_max(env))) != 0)
++      if ((ret = __env_region_attach(env, &mtxmgr->reginfo,
++          size, size + __mutex_region_max(env, mutex_needed))) != 0)
+               goto err;
+ 
+       /* If we created the region, initialize it. */
+       if (F_ISSET(&mtxmgr->reginfo, REGION_CREATE))
+               if ((ret = __mutex_region_init(env, mtxmgr)) != 0)
+@@ -350,44 +351,62 @@
+ 
+       dbenv = env->dbenv;
+ 
+       s = sizeof(DB_MUTEXMGR) + 1024;
+ 
+-      /* We discard one mutex for the OOB slot. */
++      /* 
++       * We discard one mutex for the OOB slot. Make sure mutex_cnt doesn't
++       * overflow.
++       */
+       s += __env_alloc_size(
+-          (dbenv->mutex_cnt + 1) *__mutex_align_size(env));
++          (dbenv->mutex_cnt + (dbenv->mutex_cnt == UINT32_MAX ? 0 : 1)) *
++          __mutex_align_size(env));
+ 
+       return (s);
+ }
+ 
+ /*
+  * __mutex_region_max --
+  *     Return the amount of space needed to reach the maximum size.
+  */
+ static size_t
+-__mutex_region_max(env)
++__mutex_region_max(env, mutex_needed)
+       ENV *env;
++      u_int32_t mutex_needed;
+ {
+       DB_ENV *dbenv;
+-      u_int32_t max;
++      u_int32_t max, mutex_cnt;
+ 
+       dbenv = env->dbenv;
++      mutex_cnt = dbenv->mutex_cnt;
+ 
+-      if ((max = dbenv->mutex_max) == 0) {
++      /*
++       * We want to limit the region size to accommodate at most UINT32_MAX
++       * mutexes. If mutex_cnt is UINT32_MAX, no more space is allowed.
++       */
++      if ((max = dbenv->mutex_max) == 0 && mutex_cnt != UINT32_MAX)
+               if (F_ISSET(env, ENV_PRIVATE | ENV_THREAD) == ENV_PRIVATE)
+-                      max = dbenv->mutex_inc + 1;
+-              else
++                      if (dbenv->mutex_inc + 1 < UINT32_MAX - mutex_cnt)
++                              max = dbenv->mutex_inc + 1 + mutex_cnt;
++                      else
++                              max = UINT32_MAX;
++              else {
+                       max = __lock_region_mutex_max(env) +
+                           __txn_region_mutex_max(env) +
+                           __log_region_mutex_max(env) +
+                           dbenv->mutex_inc + 100;
+-      } else if (max <= dbenv->mutex_cnt)
++                      if (max < UINT32_MAX - mutex_needed)
++                              max += mutex_needed;
++                      else
++                              max = UINT32_MAX;
++              }
++
++      if (max <= mutex_cnt)
+               return (0);
+       else
+-              max -= dbenv->mutex_cnt;
+-
+-      return ( __env_alloc_size(max * __mutex_align_size(env)));
++              return (__env_alloc_size(
++                  (max - mutex_cnt) * __mutex_align_size(env)));
+ }
+ 
+ #ifdef        HAVE_MUTEX_SYSTEM_RESOURCES
+ /*
+  * __mutex_resource_return
+

Copied: db5.3/repos/testing-x86_64/db-5.3.28-atomic_compare_exchange.patch 
(from rev 467229, db5.3/trunk/db-5.3.28-atomic_compare_exchange.patch)
===================================================================
--- testing-x86_64/db-5.3.28-atomic_compare_exchange.patch                      
        (rev 0)
+++ testing-x86_64/db-5.3.28-atomic_compare_exchange.patch      2023-01-23 
10:23:40 UTC (rev 467230)
@@ -0,0 +1,20 @@
+--- db-5.3.28/src/dbinc/atomic.h.old   2018-05-23 09:20:04.216914922 +0200
++++ db-5.3.28/src/dbinc/atomic.h       2018-05-23 09:20:49.510057897 +0200
+@@ -144,7 +144,7 @@
+ #define       atomic_inc(env, p)      __atomic_inc(p)
+ #define       atomic_dec(env, p)      __atomic_dec(p)
+ #define       atomic_compare_exchange(env, p, o, n)   \
+-      __atomic_compare_exchange((p), (o), (n))
++      __db_atomic_compare_exchange((p), (o), (n))
+ static inline int __atomic_inc(db_atomic_t *p)
+ {
+       int     temp;
+@@ -176,7 +176,7 @@
+  * http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
+  * which configure could be changed to use.
+  */
+-static inline int __atomic_compare_exchange(
++static inline int __db_atomic_compare_exchange(
+       db_atomic_t *p, atomic_value_t oldval, atomic_value_t newval)
+ {
+       atomic_value_t was;

Copied: db5.3/repos/testing-x86_64/db-5.3.28-lemon_hash.patch (from rev 467229, 
db5.3/trunk/db-5.3.28-lemon_hash.patch)
===================================================================
--- testing-x86_64/db-5.3.28-lemon_hash.patch                           (rev 0)
+++ testing-x86_64/db-5.3.28-lemon_hash.patch   2023-01-23 10:23:40 UTC (rev 
467230)
@@ -0,0 +1,20 @@
+--- db-5.3.28/lang/sql/sqlite/tool/lemon.c.lemon_hash  2013-09-09 
17:35:07.000000000 +0200
++++ db-5.3.28/lang/sql/sqlite/tool/lemon.c     2017-02-22 13:12:08.564106051 
+0100
+@@ -3428,7 +3428,7 @@
+   int maxdtlength;          /* Maximum length of any ".datatype" field. */
+   char *stddt;              /* Standardized name for a datatype */
+   int i,j;                  /* Loop counters */
+-  int hash;                 /* For hashing the name of a type */
++  unsigned hash;            /* For hashing the name of a type */
+   const char *name;         /* Name of the parser */
+ 
+   /* Allocate and initialize types[] and allocate stddt[] */
+@@ -3491,7 +3491,7 @@
+         break;
+       }
+       hash++;
+-      if( hash>=arraysize ) hash = 0;
++      if( hash>=(unsigned)arraysize ) hash = 0;
+     }
+     if( types[hash]==0 ){
+       sp->dtnum = hash + 1;

Copied: db5.3/repos/testing-x86_64/db-5.3.28-mmap-high-cpu-usage.patch (from 
rev 467229, db5.3/trunk/db-5.3.28-mmap-high-cpu-usage.patch)
===================================================================
--- testing-x86_64/db-5.3.28-mmap-high-cpu-usage.patch                          
(rev 0)
+++ testing-x86_64/db-5.3.28-mmap-high-cpu-usage.patch  2023-01-23 10:23:40 UTC 
(rev 467230)
@@ -0,0 +1,19 @@
+Author: Filip Januš <[email protected]>
+Date: 6 Sep 2021
+Related: https://bugzilla.redhat.com/show_bug.cgi?id=1992402
+Patch was created based on the discussion in the previous link
+diff -ur db-5.3.28/src/os/os_map.c db_patch/src/os/os_map.c
+--- db-5.3.28/src/os/os_map.c  2013-09-09 17:35:09.000000000 +0200
++++ db_patch/src/os/os_map.c   2021-09-09 07:33:12.027328265 +0200
+@@ -213,7 +213,10 @@
+       if (rp->max < rp->size)
+               rp->max = rp->size;
+       if (ret == 0 && F_ISSET(infop, REGION_CREATE)) {
+-              if (F_ISSET(dbenv, DB_ENV_REGION_INIT))
++
++              rp->size = rp->max;
++
++        if (F_ISSET(dbenv, DB_ENV_REGION_INIT))
+                       ret = __db_file_write(env, infop->fhp,
+                           rp->size / MEGABYTE, rp->size % MEGABYTE, 0x00);
+               else

Copied: db5.3/repos/testing-x86_64/db-5.3.28_cve-2019-2708.patch (from rev 
467229, db5.3/trunk/db-5.3.28_cve-2019-2708.patch)
===================================================================
--- testing-x86_64/db-5.3.28_cve-2019-2708.patch                                
(rev 0)
+++ testing-x86_64/db-5.3.28_cve-2019-2708.patch        2023-01-23 10:23:40 UTC 
(rev 467230)
@@ -0,0 +1,694 @@
+--- db-18.1.32/src/btree/bt_cursor.c   2019-02-20 03:21:20.000000000 +0530
++++ db-18.1.40/src/btree/bt_cursor.c   2020-05-29 23:28:22.000000000 +0530
+@@ -282,6 +282,8 @@
+        *
+        * Recno uses the btree bt_ovflsize value -- it's close enough.
+        */
++      if (t->bt_minkey == 0)
++              return (DB_RECOVER);
+       cp->ovflsize = B_MINKEY_TO_OVFLSIZE(
+           dbp,  F_ISSET(dbc, DBC_OPD) ? 2 : t->bt_minkey, dbp->pgsize);
+ 
+--- db-18.1.32/src/btree/bt_verify.c   2019-02-20 03:21:20.000000000 +0530
++++ db-18.1.40/src/btree/bt_verify.c   2020-05-29 23:28:22.000000000 +0530
+@@ -700,7 +700,11 @@
+                       isbad = 1;
+                       goto err;
+               default:
++                      if (ret == 0) {
++                              isbad = 1;
++                              ret = DB_VERIFY_FATAL;
++                              goto err;
++                      }
+-                      DB_ASSERT(env, ret != 0);
+                       break;
+               }
+ 
+@@ -1074,7 +1078,7 @@
+       DBT dbta, dbtb, dup_1, dup_2, *p1, *p2, *tmp;
+       ENV *env;
+       PAGE *child;
++      db_pgno_t cpgno, grandparent;
+-      db_pgno_t cpgno;
+       VRFY_PAGEINFO *pip;
+       db_indx_t i, *inp;
+       int adj, cmp, freedup_1, freedup_2, isbad, ret, t_ret;
+@@ -1106,7 +1110,8 @@
+ 
+       buf1 = buf2 = NULL;
+ 
++      if (LF_ISSET(DB_NOORDERCHK))
++              return (EINVAL);
+-      DB_ASSERT(env, !LF_ISSET(DB_NOORDERCHK));
+ 
+       dupfunc = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+       if (TYPE(h) == P_LDUP)
+@@ -1115,6 +1120,7 @@
+               func = __bam_defcmp;
+               if (dbp->bt_internal != NULL) {
+                       bt = (BTREE *)dbp->bt_internal;
++                      grandparent = bt->bt_root;
+                       if (TYPE(h) == P_IBTREE && (bt->bt_compare != NULL ||
+                           dupfunc != __bam_defcmp)) {
+                               /*
+@@ -974,8 +980,24 @@
+                                */
+                               mpf = dbp->mpf;
+                               child = h;
++                              cpgno = pgno;
+                               while (TYPE(child) == P_IBTREE) {
++                                      if (NUM_ENT(child) == 0) {
++                                              EPRINT((env, DB_STR_A("1088",
++                  "Page %lu: internal page is empty and should not be",
++                                          "%lu"), (u_long)cpgno));
++                                              ret = DB_VERIFY_BAD;
++                                              goto err;
++                                      }
+                                       bi = GET_BINTERNAL(dbp, child, 0);
++                                      if (grandparent == bi->pgno) {
++                                              EPRINT((env, DB_STR_A("5552",
++                                            "Page %lu: found twice in the 
btree",
++                                        "%lu"), (u_long)grandparent));
++                                              ret = DB_VERIFY_FATAL;
++                                              goto err;
++                                      } else
++                                              grandparent = cpgno;
+                                       cpgno = bi->pgno;
+                                       if (child != h &&
+                                           (ret = __memp_fput(mpf,
+@@ -1402,7 +1416,10 @@
+                                        */
+                                       if (dup_1.data == NULL ||
+                                           dup_2.data == NULL) {
++                                              if (ovflok) {
++                                                      isbad = 1;
++                                                      goto err;
++                                              }
+-                                              DB_ASSERT(env, !ovflok);
+                                               if (pip != NULL)
+                                                       F_SET(pip,
+                                                           VRFY_INCOMPLETE);
+@@ -1747,9 +1764,10 @@
+                           (ret = __db_vrfy_ovfl_structure(dbp, vdp,
+                           child->pgno, child->tlen,
+                           flags | DB_ST_OVFL_LEAF)) != 0) {
++                              if (ret == DB_VERIFY_BAD) {
+-                              if (ret == DB_VERIFY_BAD)
+                                       isbad = 1;
++                                      break;
++                              } else
+-                              else
+                                       goto done;
+                       }
+ 
+@@ -1823,9 +1841,10 @@
+                                                   stflags | DB_ST_TOPLEVEL,
+                                                   NULL, NULL, NULL)) != 0) {
+                                                       if (ret ==
++                                                          DB_VERIFY_BAD) {
+-                                                          DB_VERIFY_BAD)
+                                                               isbad = 1;
++                                                              break;
++                                                      } else
+-                                                      else
+                                                               goto err;
+                                               }
+                                       }
+@@ -1969,7 +1988,10 @@
+                        */
+ 
+                       /* Otherwise, __db_vrfy_childput would be broken. */
++                      if (child->refcnt < 1) {
++                              isbad = 1;
++                              goto err;
++                      }
+-                      DB_ASSERT(env, child->refcnt >= 1);
+ 
+                       /*
+                        * An overflow referenced more than twice here
+@@ -1986,9 +2008,10 @@
+                                       if ((ret = __db_vrfy_ovfl_structure(dbp,
+                                           vdp, child->pgno, child->tlen,
+                                           flags)) != 0) {
++                                              if (ret == DB_VERIFY_BAD) {
+-                                              if (ret == DB_VERIFY_BAD)
+                                                       isbad = 1;
++                                                      break;
++                                              } else
+-                                              else
+                                                       goto done;
+                                       }
+               }
+@@ -2026,9 +2049,10 @@
+               if ((ret = __bam_vrfy_subtree(dbp, vdp, li->pgno,
+                   i == 0 ? NULL : li, ri, flags, &child_level,
+                   &child_nrecs, NULL)) != 0) {
++                      if (ret == DB_VERIFY_BAD) {
+-                      if (ret == DB_VERIFY_BAD)
+                               isbad = 1;
++                              break;
++                      } else
+-                      else
+                               goto done;
+               }
+ 
+@@ -2929,7 +2953,11 @@
+       db_pgno_t current, p;
+       int err_ret, ret;
+ 
++      if (pgset == NULL) {
++              EPRINT((dbp->env, DB_STR("5542",
++                      "Error, database contains no visible pages.")));
++              return (DB_RUNRECOVERY);
++      }
+-      DB_ASSERT(dbp->env, pgset != NULL);
+ 
+       mpf = dbp->mpf;
+       h = NULL;
+--- db-18.1.32/src/db/db_conv.c        2019-02-20 03:21:20.000000000 +0530
++++ db-18.1.40/src/db/db_conv.c        2020-05-29 23:28:22.000000000 +0530
+@@ -493,8 +493,11 @@
+       db_indx_t i, *inp, len, tmp;
+       u_int8_t *end, *p, *pgend;
+ 
+-      if (pagesize == 0)
+-              return (0);
++      /* This function is also used to byteswap logs, so
++       * the pagesize might not be an actual page size.
++       */
++      if (!(pagesize >= 24 && pagesize <= DB_MAX_PGSIZE))
++              return (EINVAL);
+ 
+       if (pgin) {
+               M_32_SWAP(h->lsn.file);
+@@ -513,26 +516,41 @@
+       pgend = (u_int8_t *)h + pagesize;
+ 
+       inp = P_INP(dbp, h);
+-      if ((u_int8_t *)inp >= pgend)
+-              goto out;
++      if ((u_int8_t *)inp > pgend)
++              return (__db_pgfmt(env, pg));
+ 
+       switch (TYPE(h)) {
+       case P_HASH_UNSORTED:
+       case P_HASH:
+               for (i = 0; i < NUM_ENT(h); i++) {
++                      if ((u_int8_t*)(inp + i) >= pgend)
++                              return (__db_pgfmt(env, pg));
++                      if (inp[i] == 0)
++                              continue;
+                       if (pgin)
+                               M_16_SWAP(inp[i]);
++                      if (inp[i] >= pagesize)
++                              return (__db_pgfmt(env, pg));
+ 
+-                      if (P_ENTRY(dbp, h, i) >= pgend)
+-                              continue;
++                      if (P_ENTRY(dbp, h, i) >= pgend)
++                              return (__db_pgfmt(env, pg));
+ 
+                       switch (HPAGE_TYPE(dbp, h, i)) {
+                       case H_KEYDATA:
+                               break;
+                       case H_DUPLICATE:
++                              if (LEN_HITEM(dbp, h, pagesize, i) < 
++                                  HKEYDATA_SIZE(0))
++                                      return (__db_pgfmt(env, pg));
++
+                               len = LEN_HKEYDATA(dbp, h, pagesize, i);
+                               p = HKEYDATA_DATA(P_ENTRY(dbp, h, i));
+-                              for (end = p + len; p < end;) {
++
++                              end = p + len;
++                              if (end > pgend)
++                                      return (__db_pgfmt(env, pg));
++
++                              while (p < end) {
+                                       if (pgin) {
+                                               P_16_SWAP(p);
+                                               memcpy(&tmp,
+@@ -544,14 +562,20 @@
+                                               SWAP16(p);
+                                       }
+                                       p += tmp;
++                                      if (p >= end)
++                                              return (__db_pgfmt(env, pg));
+                                       SWAP16(p);
+                               }
+                               break;
+                       case H_OFFDUP:
++                              if ((inp[i] + HOFFDUP_SIZE) > pagesize)
++                                      return (__db_pgfmt(env, pg));
+                               p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i));
+                               SWAP32(p);                      /* pgno */
+                               break;
+                       case H_OFFPAGE:
++                              if ((inp[i] + HOFFPAGE_SIZE) > pagesize)
++                                      return (__db_pgfmt(env, pg));
+                               p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i));
+                               SWAP32(p);                      /* pgno */
+                               SWAP32(p);                      /* tlen */
+@@ -559,7 +583,6 @@
+                       default:
+                               return (__db_pgfmt(env, pg));
+                       }
+-
+               }
+ 
+               /*
+@@ -576,8 +599,12 @@
+       case P_LDUP:
+       case P_LRECNO:
+               for (i = 0; i < NUM_ENT(h); i++) {
++                      if ((u_int8_t *)(inp + i) >= pgend)
++                              return (__db_pgfmt(env, pg));
+                       if (pgin)
+                               M_16_SWAP(inp[i]);
++                      if (inp[i] >= pagesize)
++                              return (__db_pgfmt(env, pg));
+ 
+                       /*
+                        * In the case of on-page duplicates, key information
+@@ -597,7 +624,7 @@
+ 
+                       bk = GET_BKEYDATA(dbp, h, i);
+                       if ((u_int8_t *)bk >= pgend)
+-                              continue;
++                              return (__db_pgfmt(env, pg));
+                       switch (B_TYPE(bk->type)) {
+                       case B_KEYDATA:
+                               M_16_SWAP(bk->len);
+@@ -605,6 +632,8 @@
+                       case B_DUPLICATE:
+                       case B_OVERFLOW:
+                               bo = (BOVERFLOW *)bk;
++                              if (((u_int8_t *)bo + BOVERFLOW_SIZE) > pgend)
++                                      return (__db_pgfmt(env, pg));
+                               M_32_SWAP(bo->pgno);
+                               M_32_SWAP(bo->tlen);
+                               break;
+@@ -618,12 +647,17 @@
+               break;
+       case P_IBTREE:
+               for (i = 0; i < NUM_ENT(h); i++) {
++                      if ((u_int8_t *)(inp + i) > pgend)
++                              return (__db_pgfmt(env, pg));
+                       if (pgin)
+                               M_16_SWAP(inp[i]);
++                      if ((u_int16_t)(inp[i] + 
++                          BINTERNAL_SIZE(0) - 1) > pagesize)
++                              break;
+ 
+                       bi = GET_BINTERNAL(dbp, h, i);
+-                      if ((u_int8_t *)bi >= pgend)
+-                              continue;
++                      if (((u_int8_t *)bi + BINTERNAL_SIZE(0)) > pgend)
++                              return (__db_pgfmt(env, pg));
+ 
+                       M_16_SWAP(bi->len);
+                       M_32_SWAP(bi->pgno);
+@@ -634,6 +668,10 @@
+                               break;
+                       case B_DUPLICATE:
+                       case B_OVERFLOW:
++                              if ((u_int16_t)(inp[i] + 
++                                  BINTERNAL_SIZE(BOVERFLOW_SIZE) - 1) >
++                                  pagesize)
++                                      goto out;
+                               bo = (BOVERFLOW *)bi->data;
+                               M_32_SWAP(bo->pgno);
+                               M_32_SWAP(bo->tlen);
+@@ -648,12 +686,16 @@
+               break;
+       case P_IRECNO:
+               for (i = 0; i < NUM_ENT(h); i++) {
++                      if ((u_int8_t *)(inp + i) >= pgend)
++                              return (__db_pgfmt(env, pg));
+                       if (pgin)
+                               M_16_SWAP(inp[i]);
++                      if (inp[i] >= pagesize)
++                              return (__db_pgfmt(env, pg));
+ 
+                       ri = GET_RINTERNAL(dbp, h, i);
+-                      if ((u_int8_t *)ri >= pgend)
+-                              continue;
++                      if ((((u_int8_t *)ri) + RINTERNAL_SIZE) > pgend)
++                              return (__db_pgfmt(env, pg));
+ 
+                       M_32_SWAP(ri->pgno);
+                       M_32_SWAP(ri->nrecs);
+--- db-18.1.32/src/db/db_vrfy.c        2019-02-20 03:21:20.000000000 +0530
++++ db-18.1.40/src/db/db_vrfy.c        2020-05-29 23:28:22.000000000 +0530
+@@ -381,8 +381,10 @@
+                   vdp, name, 0, lp, rp, flags)) != 0) {
+                       if (t_ret == DB_VERIFY_BAD)
+                               isbad = 1;
++                      else {
++                          ret = t_ret;
++                          goto err;
++                      }
+-                      else
+-                              goto err;
+               }
+ 
+       /*
+@@ -771,9 +773,10 @@
+                */
+               if ((t_ret = __memp_fget(mpf, &i,
+                   vdp->thread_info, NULL, 0, &h)) != 0) {
++                      if ((dbp->type == DB_HASH ||
+-                      if (dbp->type == DB_HASH ||
+                           (dbp->type == DB_QUEUE &&
++                          F_ISSET(dbp, DB_AM_INMEM))) &&
++                          t_ret != DB_RUNRECOVERY) {
+-                          F_ISSET(dbp, DB_AM_INMEM))) {
+                               if ((t_ret =
+                                   __db_vrfy_getpageinfo(vdp, i, &pip)) != 0)
+                                       goto err1;
+@@ -945,6 +948,8 @@
+                       return (ret == 0 ? t_ret : ret);
+       }
+ 
++      if (ret == DB_PAGE_NOTFOUND && isbad == 1)
++              ret = 0;
+       return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+ }
+ 
+@@ -1581,7 +1586,7 @@
+       if (pgno == PGNO_BASE_MD &&
+           dbtype != DB_QUEUE && meta->last_pgno != vdp->last_pgno) {
+ #ifdef HAVE_FTRUNCATE
++              ret = DB_VERIFY_FATAL;
+-              isbad = 1;
+               EPRINT((env, DB_STR_A("0552",
+                   "Page %lu: last_pgno is not correct: %lu != %lu",
+                   "%lu %lu %lu"), (u_long)pgno,
+@@ -1622,7 +1627,11 @@
+ 
+       env = dbp->env;
+       pgset = vdp->pgset;
++      if (pgset == NULL) {
++              EPRINT((env, DB_STR("5543",
++                      "Error, database contains no visible pages.")));
++              return (DB_RUNRECOVERY);
++      }
+-      DB_ASSERT(env, pgset != NULL);
+ 
+       if ((ret = __db_vrfy_getpageinfo(vdp, meta, &pip)) != 0)
+               return (ret);
+@@ -2014,7 +2023,8 @@
+       int keyflag, ret, t_ret;
+ 
+       env = dbp->env;
++      if (!LF_ISSET(DB_SALVAGE))
++              return (EINVAL);
+-      DB_ASSERT(env, LF_ISSET(DB_SALVAGE));
+ 
+       /*
+        * !!!
+@@ -2126,10 +2136,8 @@
+       int (*callback) __P((void *, const void *));
+       u_int32_t flags;
+ {
+-      ENV *env;
+-
+-      env = dbp->env;
+-      DB_ASSERT(env, LF_ISSET(DB_SALVAGE));
++      if (!LF_ISSET(DB_SALVAGE))
++              return (EINVAL);
+ 
+       /* If we got this page in the subdb pass, we can safely skip it. */
+       if (__db_salvage_isdone(vdp, pgno))
+@@ -2242,8 +2253,8 @@
+                               ret = t_ret;
+                       break;
+               case SALVAGE_OVERFLOW:
++                      EPRINT((env, DB_STR("5544", "Invalid page type to 
salvage.")));
++                      return (EINVAL);
+-                      DB_ASSERT(env, 0);      /* Shouldn't ever happen. */
+-                      break;
+               case SALVAGE_HASH:
+                       if ((t_ret = __ham_salvage(dbp, vdp,
+                           pgno, h, handle, callback, flags)) != 0 && ret == 0)
+@@ -2256,8 +2267,8 @@
+                        * Shouldn't happen, but if it does, just do what the
+                        * nice man says.
+                        */
++                      EPRINT((env, DB_STR("5545", "Invalid page type to 
salvage.")));
++                      return (EINVAL);
+-                      DB_ASSERT(env, 0);
+-                      break;
+               }
+               if ((t_ret = __memp_fput(mpf,
+                   vdp->thread_info, h, dbp->priority)) != 0 && ret == 0)
+@@ -2303,8 +2314,8 @@
+                                       ret = t_ret;
+                       break;
+               default:
++                      EPRINT((env, DB_STR("5546", "Invalid page type to 
salvage.")));
++                      return (EINVAL);
+-                      DB_ASSERT(env, 0);      /* Shouldn't ever happen. */
+-                      break;
+               }
+               if ((t_ret = __memp_fput(mpf,
+                   vdp->thread_info, h, dbp->priority)) != 0 && ret == 0)
+@@ -2361,7 +2372,10 @@
+ 
+       env = dbp->env;
+ 
++      if (himarkp == NULL) {
++              __db_msg(env, "Page %lu index has no end.", (u_long)pgno);
++              return (DB_VERIFY_FATAL);
++      }
+-      DB_ASSERT(env, himarkp != NULL);
+       inp = P_INP(dbp, h);
+ 
+       /*
+@@ -2783,7 +2797,11 @@
+                                       goto err;
+                               ovfl_bufsz = bkkey->len + 1;
+                       }
++                      if (subdbname == NULL) {
++                              EPRINT((env, DB_STR("5547", "Subdatabase cannot 
be null.")));
++                              ret = EINVAL;
++                              goto err;
++                      }
+-                      DB_ASSERT(env, subdbname != NULL);
+                       memcpy(subdbname, bkkey->data, bkkey->len);
+                       subdbname[bkkey->len] = '\0';
+               }
+--- db-18.1.32/src/db/db_vrfyutil.c    2019-02-20 03:21:20.000000000 +0530
++++ db-18.1.40/src/db/db_vrfyutil.c    2020-05-29 23:28:22.000000000 +0530
+@@ -214,7 +214,8 @@
+       if ((ret = __db_get(pgdbp,
+           vdp->thread_info, vdp->txn, &key, &data, 0)) == 0) {
+               /* Found it. */
++              if (data.size != sizeof(VRFY_PAGEINFO))
++                      return (DB_VERIFY_FATAL);
+-              DB_ASSERT(env, data.size == sizeof(VRFY_PAGEINFO));
+               pip = data.data;
+               LIST_INSERT_HEAD(&vdp->activepips, pip, links);
+               goto found;
+@@ -342,7 +343,8 @@
+       F_SET(&data, DB_DBT_USERMEM);
+ 
+       if ((ret = __db_get(dbp, ip, txn, &key, &data, 0)) == 0) {
++              if (data.size != sizeof(int))
++                      return (EINVAL);
+-              DB_ASSERT(dbp->env, data.size == sizeof(int));
+       } else if (ret == DB_NOTFOUND)
+               val = 0;
+       else
+@@ -382,7 +384,8 @@
+       F_SET(&data, DB_DBT_USERMEM);
+ 
+       if ((ret = __db_get(dbp, ip, txn, &key, &data, 0)) == 0) {
++              if (data.size != sizeof(int))
++                      return (DB_VERIFY_FATAL);
+-              DB_ASSERT(dbp->env, data.size == sizeof(int));
+       } else if (ret != DB_NOTFOUND)
+               return (ret);
+ 
+@@ -419,7 +422,8 @@
+       if ((ret = __dbc_get(dbc, &key, &data, DB_NEXT)) != 0)
+               return (ret);
+ 
++      if (key.size != sizeof(db_pgno_t))
++              return (DB_VERIFY_FATAL);
+-      DB_ASSERT(dbc->env, key.size == sizeof(db_pgno_t));
+       *pgnop = pgno;
+ 
+       return (0);
+@@ -566,7 +570,8 @@
+       if ((ret = __dbc_get(dbc, &key, &data, DB_SET)) != 0)
+               return (ret);
+ 
++      if (data.size != sizeof(VRFY_CHILDINFO))
++              return (DB_VERIFY_FATAL);
+-      DB_ASSERT(dbc->env, data.size == sizeof(VRFY_CHILDINFO));
+       *cipp = (VRFY_CHILDINFO *)data.data;
+ 
+       return (0);
+@@ -594,7 +599,8 @@
+       if ((ret = __dbc_get(dbc, &key, &data, DB_NEXT_DUP)) != 0)
+               return (ret);
+ 
++      if (data.size != sizeof(VRFY_CHILDINFO))
++              return (DB_VERIFY_FATAL);
+-      DB_ASSERT(dbc->env, data.size == sizeof(VRFY_CHILDINFO));
+       *cipp = (VRFY_CHILDINFO *)data.data;
+ 
+       return (0);
+@@ -721,7 +727,8 @@
+               return (ret);
+ 
+       while ((ret = __dbc_get(*dbcp, &key, &data, DB_NEXT)) == 0) {
++              if (data.size != sizeof(u_int32_t))
++                      return (DB_VERIFY_FATAL);
+-              DB_ASSERT(dbp->env, data.size == sizeof(u_int32_t));
+               memcpy(&pgtype, data.data, sizeof(pgtype));
+ 
+               if (skip_overflow && pgtype == SALVAGE_OVERFLOW)
+@@ -730,8 +737,9 @@
+               if ((ret = __dbc_del(*dbcp, 0)) != 0)
+                       return (ret);
+               if (pgtype != SALVAGE_IGNORE) {
++                      if (key.size != sizeof(db_pgno_t)
++                              || data.size != sizeof(u_int32_t))
++                              return (DB_VERIFY_FATAL);
+-                      DB_ASSERT(dbp->env, key.size == sizeof(db_pgno_t));
+-                      DB_ASSERT(dbp->env, data.size == sizeof(u_int32_t));
+ 
+                       *pgnop = *(db_pgno_t *)key.data;
+                       *pgtypep = *(u_int32_t *)data.data;
+--- db-18.1.32/src/db/partition.c      2019-02-20 03:21:20.000000000 +0530
++++ db-18.1.40/src/db/partition.c      2020-05-29 23:28:22.000000000 +0530
+@@ -461,9 +461,19 @@
+               } else
+                       part->nparts = meta->nparts;
+       } else if (meta->nparts != 0 && part->nparts != meta->nparts) {
++              ret = EINVAL;
+               __db_errx(env, DB_STR("0656",
+                   "Number of partitions does not match."));
+-              ret = EINVAL;
++              goto err;
++      }
++      /*
++       * There is no limit on the number of partitions, but I cannot imagine 
a real
++       * database having more than 10000.
++       */
++      if (meta->nparts > 10000) {
++              ret = EINVAL;
++              __db_errx(env, DB_STR_A("5553",
++                      "Too many partitions %lu", "%lu"), 
(u_long)(meta->nparts));
+               goto err;
+       }
+ 
+@@ -2106,10 +2116,13 @@
+                       memcpy(rp->data, key->data, key->size);
+                       B_TSET(rp->type, B_KEYDATA);
+               }
++vrfy:   if ((t_ret = __db_verify(*pdbp, ip, (*pdbp)->fname,
++            NULL, handle, callback,
++            lp, rp, flags | DB_VERIFY_PARTITION)) != 0 && ret == 0) {
++              ret = t_ret;
++            if (ret == ENOENT)
++                break;
++          }
+-vrfy:         if ((t_ret = __db_verify(*pdbp, ip, (*pdbp)->fname,
+-                  NULL, handle, callback,
+-                  lp, rp, flags | DB_VERIFY_PARTITION)) != 0 && ret == 0)
+-                      ret = t_ret;
+       }
+ 
+ err:  if (lp != NULL)
+--- db-18.1.32/src/hash/hash_page.c    2019-02-20 03:21:20.000000000 +0530
++++ db-18.1.40/src/hash/hash_page.c    2020-05-29 23:28:22.000000000 +0530
+@@ -869,7 +869,11 @@
+       /* Validate that next, prev pointers are OK */
+       n = NUM_ENT(p);
+       dbp = dbc->dbp;
++      if (n % 2 != 0) {
++              __db_errx(dbp->env, DB_STR_A("5549",
++                "Odd number of entries on page: %lu", "%lu"), 
(u_long)(p->pgno));
++              return (DB_VERIFY_FATAL);
++      }
+-      DB_ASSERT(dbp->env, n%2 == 0 );
+ 
+       env = dbp->env;
+       t = dbp->h_internal;
+@@ -940,7 +944,12 @@
+                       if ((ret = __db_prpage(dbp, p, DB_PR_PAGE)) != 0)
+                               return (ret);
+ #endif
++                      if (res >= 0) {
++                              __db_errx(env, DB_STR_A("5550",
++                                      "Odd number of entries on page: %lu", 
"%lu"),
++                                      (u_long)p->pgno);
++                              return (DB_VERIFY_FATAL);
++                      }
+-                      DB_ASSERT(dbp->env, res < 0);
+               }
+ 
+               prev = curr;
+--- db-18.1.32/src/hash/hash_verify.c  2019-02-20 03:21:20.000000000 +0530
++++ db-18.1.40/src/hash/hash_verify.c  2020-05-29 23:28:22.000000000 +0530
+@@ -615,7 +615,7 @@
+                               isbad = 1;
+                       else
+                               goto err;
++              }
+-                  }
+ 
+       /*
+        * There may be unused hash pages corresponding to buckets
+@@ -746,7 +746,7 @@
+                   "Page %lu: impossible first page in bucket %lu", "%lu %lu"),
+                   (u_long)pgno, (u_long)bucket));
+               /* Unsafe to continue. */
++              ret = DB_VERIFY_FATAL;
+-              isbad = 1;
+               goto err;
+       }
+ 
+@@ -776,7 +776,7 @@
+                       EPRINT((env, DB_STR_A("1116",
+                           "Page %lu: hash page referenced twice", "%lu"),
+                           (u_long)pgno));
++                      ret = DB_VERIFY_FATAL;
+-                      isbad = 1;
+                       /* Unsafe to continue. */
+                       goto err;
+               } else if ((ret = __db_vrfy_pgset_inc(vdp->pgset,
+@@ -1307,7 +1307,11 @@
+       COMPQUIET(flags, 0);
+       ip = vdp->thread_info;
+ 
++      if (pgset == NULL) {
++              EPRINT((dbp->env, DB_STR("5548",
++                      "Error, database contains no visible pages.")));
++              return (DB_VERIFY_FATAL);
++      }
+-      DB_ASSERT(dbp->env, pgset != NULL);
+ 
+       mpf = dbp->mpf;
+       totpgs = 0;
+--- db-18.1.32/src/qam/qam_verify.c    2019-02-20 03:21:20.000000000 +0530
++++ db-18.1.40/src/qam/qam_verify.c    2020-05-29 23:28:22.000000000 +0530
+@@ -465,7 +465,14 @@
+       /* Verify/salvage each page. */
+       if ((ret = __db_cursor(dbp, vdp->thread_info, NULL, &dbc, 0)) != 0)
+               return (ret);
+-begin:        for (; i <= stop; i++) {
++begin:        if ((stop - i) > 100000) {
++              EPRINT((env, DB_STR_A("5551",
++"Warning, many possible extends files (%lu), will take a long time to verify",
++          "%lu"), (u_long)(stop - i)));
++      }
++      for (; i <= stop; i++) {
++              if (i == UINT32_MAX)
++                      break;
+               /*
+                * If DB_SALVAGE is set, we inspect our database of completed
+                * pages, and skip any we've already printed in the subdb pass.

Reply via email to