RPM Package Manager, CVS Repository
  http://rpm5.org/cvs/
  ____________________________________________________________________________

  Server: rpm5.org                         Name:   Jeff Johnson
  Root:   /v/rpm/cvs                       Email:  j...@rpm5.org
  Module: rpm                              Date:   05-Jun-2017 10:25:52
  Branch: rpm-5_4                          Handle: 2017060508255101

  Added files:              (Branch: rpm-5_4)
    rpm/rpmio               xxhash.c xxhash.h
  Modified files:           (Branch: rpm-5_4)
    rpm/rpmio               Makefile.am digest.c rpmzstd.c

  Log:
    - zstd: turn on benchmark options (which need xxhash to verify
    blocks).

  Summary:
    Revision    Changes     Path
    1.293.2.88  +3  -2      rpm/rpmio/Makefile.am
    2.93.2.12   +3  -0      rpm/rpmio/digest.c
    1.1.2.7     +71 -59     rpm/rpmio/rpmzstd.c
    1.1.2.1     +876 -0     rpm/rpmio/xxhash.c
    1.1.2.1     +305 -0     rpm/rpmio/xxhash.h
  ____________________________________________________________________________

  patch -p0 <<'@@ .'
  Index: rpm/rpmio/Makefile.am
  ============================================================================
  $ cvs diff -u -r1.293.2.87 -r1.293.2.88 Makefile.am
  --- rpm/rpmio/Makefile.am     3 Jun 2017 17:39:04 -0000       1.293.2.87
  +++ rpm/rpmio/Makefile.am     5 Jun 2017 08:25:51 -0000       1.293.2.88
  @@ -14,9 +14,10 @@
        getdate.y html-parse.c html-parse.h libsqlio.c \
        rpmcpio.c rpmcpio.h rpmgenbasedir.c rpmgenpkglist.c rpmgensrclist.c \
        rpmjsio.msg rpmtar.c rpmtar.h \
  -     tdir.c tfts.c tget.c tgfs.c tgit.c tglob.c thkp.c thtml.c tinv.c tkey.c 
\
  -     tmire.c tmq.c tmqtt.c todbc.c tput.c tpython.c trpmio.c tsexp.c \
  +     tdir.c tfts.c tget.c tgfs.c tgit.c tglob.c thkp.c thtml.c tinv.c \
  +     tkey.c tmire.c tmq.c tmqtt.c todbc.c tput.c tpython.c trpmio.c tsexp.c \
        tsvn.c tsw.c lookup3.c duktape.c tjsmn.c tjson.c yajl.c testit.sh \
  +     xxhash.h xxhash.c \
        microjson.c mongoc-counters.defs
   
   EXTRA_PROGRAMS = rpmcpio rpmdpkg rpmtar rpmz
  @@ .
  patch -p0 <<'@@ .'
  Index: rpm/rpmio/digest.c
  ============================================================================
  $ cvs diff -u -r2.93.2.11 -r2.93.2.12 digest.c
  --- rpm/rpmio/digest.c        26 May 2017 20:49:22 -0000      2.93.2.11
  +++ rpm/rpmio/digest.c        5 Jun 2017 08:25:51 -0000       2.93.2.12
  @@ -121,6 +121,9 @@
   #define      _JLU3_jlu32l
   #include "lookup3.c"
   
  +/* Include Yann Collet XXH hash */
  +#define XXH_NAMESPACE   RPMZSTD_
  +#include "xxhash.c"
   
   /**
    * Digest private data.
  @@ .
  patch -p0 <<'@@ .'
  Index: rpm/rpmio/rpmzstd.c
  ============================================================================
  $ cvs diff -u -r1.1.2.6 -r1.1.2.7 rpmzstd.c
  --- rpm/rpmio/rpmzstd.c       5 Jun 2017 01:04:42 -0000       1.1.2.6
  +++ rpm/rpmio/rpmzstd.c       5 Jun 2017 08:25:51 -0000       1.1.2.7
  @@ -8,10 +8,9 @@
    * of patent rights can be found in the PATENTS file in the same directory.
    */
   
  -#define ZSTD_NOBENCH 1
   #define ZSTD_NODICT  1
   
  -#undef       POPTIO_OPTIONS          /* XXX NOTYET: <poptIO.h> option/arg 
processing. */
  +#undef       POPTIO_OPTIONS  /* XXX NOTYET: <poptIO.h> option/arg 
processing. */
   #define      USE_RPMIO
   #define      _FPIO   ""
   
  @@ -41,21 +40,21 @@
   #include <rpmdir.h>
   #else
   typedef      FILE * FD_t;
  -#define Fopen        fopen
  -#define      Fread   fread
  -#define      Fwrite  fwrite
  -#define      Fseek   fseek
  -#define      Fclose  fclose
  -#define      Ferror  ferror
  -#define      Stat    stat
  -#define      Lstat   lstat
  -#define Unlink       remove
  -#define Chmod        chmod
  -#define Chown        chown
  -#define      Utime   utime
  -#define      Opendir opendir
  -#define      Readdir readdir
  -#define      Closedir closedir
  +#define Fopen(...)   fopen(__VA_ARGS__)
  +#define      Fread(...)      fread(__VA_ARGS__)
  +#define      Fwrite(...)     fwrite(__VA_ARGS__)
  +#define      Fseek(...)      fseek(__VA_ARGS__)
  +#define      Fclose(...)     fclose(__VA_ARGS__)
  +#define      Ferror(...)     ferror(__VA_ARGS__)
  +#define      Stat(...)       stat(__VA_ARGS__)
  +#define      Lstat(...)      lstat(__VA_ARGS__)
  +#define Unlink(...)  remove(__VA_ARGS__)
  +#define Chmod(...)   chmod(__VA_ARGS__)
  +#define Chown(...)   chown(__VA_ARGS__)
  +#define      Utime(...)      utime(__VA_ARGS__)
  +#define      Opendir(...)    opendir(__VA_ARGS__)
  +#define      Readdir (...)   readdir(__VA_ARGS__)
  +#define      Closedir(...)   closedir(__VA_ARGS__)
   #endif
   
   #include <rpmmacro.h>
  @@ -761,7 +760,7 @@
   /* --- ../lib/compress/zstdmt_compress.h */
   
   #if defined (__cplusplus)
  -extern "C" {
  + xtern "C" {
   #endif
   
   
  @@ -2002,6 +2001,10 @@
   
   #ifndef ZSTD_NOBENCH
   
  +/* --- lib/common/xxhash.c */
  +#define      XXH_NAMESPACE   ZSTD_
  +#include "xxhash.c"
  +
   /* **************************************
   *  Tuning parameters
   ****************************************/
  @@ -2040,15 +2043,15 @@
   *  console display
   ***************************************/
   #define DISPLAY(...)         fprintf(stderr, __VA_ARGS__)
  -#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
  -static int g_displayLevel = 2;   /* 0 : no display;   1: errors;   2 : + 
result + interaction + warnings;   3 : + progression;   4 : + information */
  +#define DISPLAYLEVEL(l, ...) if (g_BMK_displayLevel>=l) { 
DISPLAY(__VA_ARGS__); }
  +static int g_BMK_displayLevel = 2;   /* 0 : no display;   1: errors;   2 : + 
result + interaction + warnings;   3 : + progression;   4 : + information */
   
  -#define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
  -            if ((clock() - g_time > refreshRate) || (g_displayLevel>=4)) \
  -            { g_time = clock(); DISPLAY(__VA_ARGS__); \
  -            if (g_displayLevel>=4) fflush(stderr); } }
  -static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
  -static clock_t g_time = 0;
  +#define DISPLAYUPDATE(l, ...) if (g_BMK_displayLevel>=l) { \
  +            if ((clock() - g_BMK_time > BMK_refreshRate) || 
(g_BMK_displayLevel>=4)) \
  +            { g_BMK_time = clock(); DISPLAY(__VA_ARGS__); \
  +            if (g_BMK_displayLevel>=4) fflush(stderr); } }
  +static const clock_t BMK_refreshRate = CLOCKS_PER_SEC * 15 / 100;
  +static clock_t g_BMK_time = 0;
   
   
   /* *************************************
  @@ -2074,7 +2077,7 @@
   static int g_additionalParam = 0;
   static U32 g_decodeOnly = 0;
   
  -void BMK_setNotificationLevel(unsigned level) { g_displayLevel=level; }
  +void BMK_setNotificationLevel(unsigned level) { g_BMK_displayLevel=level; }
   
   void BMK_setAdditionalParam(int additionalParam) { 
g_additionalParam=additionalParam; }
   
  @@ -2086,24 +2089,24 @@
       DISPLAYLEVEL(3, "- test >= %u seconds per compression / decompression - 
\n", g_nbSeconds);
   }
   
  -static size_t g_blockSize = 0;
  +static size_t g_BMK_blockSize = 0;
   void BMK_setBlockSize(size_t blockSize)
   {
   SPEW("-->\t%s()\n", __FUNCTION__);
  -    g_blockSize = blockSize;
  -    if (g_blockSize) DISPLAYLEVEL(2, "using blocks of size %u KB \n", 
(U32)(blockSize>>10));
  +    g_BMK_blockSize = blockSize;
  +    if (g_BMK_blockSize) DISPLAYLEVEL(2, "using blocks of size %u KB \n", 
(U32)(blockSize>>10));
   }
   
   void BMK_setDecodeOnlyMode(unsigned decodeFlag) { g_decodeOnly = 
(decodeFlag>0); }
   
  -static U32 g_nbThreads = 1;
  +static U32 g_BMK_nbThreads = 1;
   void BMK_setNbThreads(unsigned nbThreads)
   {
   SPEW("-->\t%s()\n", __FUNCTION__);
   #ifndef ZSTD_MULTITHREAD
       if (nbThreads > 1) DISPLAYLEVEL(2, "Note : multi-threading is disabled 
\n");
   #endif
  -    g_nbThreads = nbThreads;
  +    g_BMK_nbThreads = nbThreads;
   }
   
   
  @@ -2127,14 +2130,14 @@
                           const ZSTD_compressionParameters* comprParams)
   {
   SPEW("-->\t%s()\n", __FUNCTION__);
  -    size_t const blockSize = ((g_blockSize>=32 && !g_decodeOnly) ? 
g_blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
  +    size_t const blockSize = ((g_BMK_blockSize>=32 && !g_decodeOnly) ? 
g_BMK_blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
       size_t const avgSize = MIN(blockSize, (srcSize / nbFiles));
       U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + 
nbFiles;
       blockParam_t* const blockTable = (blockParam_t*) malloc(maxNbBlocks * 
sizeof(blockParam_t));
       size_t const maxCompressedSize = ZSTD_compressBound(srcSize) + 
(maxNbBlocks * 1024);   /* add some room for safety */
       void * const compressedBuffer = malloc(maxCompressedSize);
       void * resultBuffer = malloc(srcSize);
  -    ZSTDMT_CCtx* const mtctx = ZSTDMT_createCCtx(g_nbThreads);
  +    ZSTDMT_CCtx* const mtctx = ZSTDMT_createCCtx(g_BMK_nbThreads);
       ZSTD_CCtx* const ctx = ZSTD_createCCtx();
       ZSTD_DCtx* const dctx = ZSTD_createDCtx();
       size_t const loadedCompressedSize = srcSize;
  @@ -2374,7 +2377,7 @@
   #endif
           }   /* for (testNb = 1; testNb <= (g_nbSeconds + !g_nbSeconds); 
testNb++) */
   
  -        if (g_displayLevel == 1) {
  +        if (g_BMK_displayLevel == 1) {
               double cSpeed = (double)srcSize / fastestC;
               double dSpeed = (double)srcSize / fastestD;
               if (g_additionalParam)
  @@ -2413,7 +2416,7 @@
       } while (!testmem);
   
       free(testmem);
  -SPEW("<--\t%s() rc %llu\n", __FUNCTION__, requiredMem);
  +SPEW("<--\t%s() rc %llu\n", __FUNCTION__, (unsigned long long)requiredMem);
       return (size_t)(requiredMem);
   }
   
  @@ -2435,8 +2438,8 @@
           SET_REALTIME_PRIORITY;
       }
   
  -    if (g_displayLevel == 1 && !g_additionalParam)
  -        DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", 
ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, (U32)benchedSize, g_nbSeconds, 
(U32)(g_blockSize>>10));
  +    if (g_BMK_displayLevel == 1 && !g_additionalParam)
  +        DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", 
ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, (U32)benchedSize, g_nbSeconds, 
(U32)(g_BMK_blockSize>>10));
   
       if (cLevelLast < cLevel) cLevelLast = cLevel;
   
  @@ -2459,7 +2462,7 @@
   SPEW("-->\t%s()\n", __FUNCTION__);
       size_t totalSize = 0;
       size_t pos = 0;
  -    for (unsigned n = 0; n < ac; n++) {
  +    for (int n = 0; n < ac; n++) {
           FD_t fd;
           U64 fileSize = UTIL_getFileSize(av[n]);
           if (UTIL_isDirectory(av[n])) {
  @@ -2468,7 +2471,7 @@
               continue;
           }
           fd = Fopen(av[n], "rb");
  -        if (fp == NULL) EXM_THROW(10, "impossible to open file %s", av[n]);
  +        if (fd == NULL) EXM_THROW(10, "impossible to open file %s", av[n]);
           DISPLAYUPDATE(2, "Loading %s...       \r", av[n]);
        /* buffer too small - stop after this file */
           if (fileSize > bufferSize-pos) {
  @@ -2607,16 +2610,19 @@
   /*-*************************************
   *  Console display
   ***************************************/
  +#undef       DISPLAY
   #define DISPLAY(...)         fprintf(stderr, __VA_ARGS__)
  -#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
  -static int g_displayLevel = 0;   /* 0 : no display;   1: errors;   2: 
default;  4: full information */
  +#undef       DISPLAYLEVEL
  +#define DISPLAYLEVEL(l, ...) if (g_DiB_displayLevel>=l) { 
DISPLAY(__VA_ARGS__); }
  +static int g_DiB_displayLevel = 0;   /* 0 : no display;   1: errors;   2: 
default;  4: full information */
   
  -#define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
  -            if ((DIB_clockSpan(g_time) > refreshRate) || 
(g_displayLevel>=4)) \
  -            { g_time = clock(); DISPLAY(__VA_ARGS__); \
  -            if (g_displayLevel>=4) fflush(stderr); } }
  -static const clock_t refreshRate = CLOCKS_PER_SEC * 2 / 10;
  -static clock_t g_time = 0;
  +#undef       DISPLAYUPDATE
  +#define DISPLAYUPDATE(l, ...) if (g_DiB_displayLevel>=l) { \
  +            if ((DIB_clockSpan(g_DiB_time) > DiB_refreshRate) || 
(g_DiB_displayLevel>=4)) \
  +            { g_DiB_time = clock(); DISPLAY(__VA_ARGS__); \
  +            if (g_DiB_displayLevel>=4) fflush(stderr); } }
  +static const clock_t DiB_refreshRate = CLOCKS_PER_SEC * 2 / 10;
  +static clock_t g_DiB_time = 0;
   
   static clock_t DIB_clockSpan(clock_t nPrevious) { return clock() - 
nPrevious; }
   
  @@ -2629,6 +2635,7 @@
   #endif
   #define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__);
   
  +#undef       EXM_THROW
   #define EXM_THROW(error, ...)                                             \
   {                                                                         \
       DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
  @@ -2642,8 +2649,10 @@
   /* ********************************************************
   *  Helper functions
   **********************************************************/
  +RPM_GNUC_CONST
   unsigned DiB_isError(size_t errorCode) { return ERR_isError(errorCode); }
   
  +RPM_GNUC_CONST
   const char * DiB_getErrorName(size_t errorCode) { return 
ERR_getErrorName(errorCode); }
   
   
  @@ -2659,7 +2668,7 @@
   SPEW("-->\t%s()\n", __FUNCTION__);
       char * const buff = (char *)buffer;
       size_t pos = 0;
  -    unsigned n;
  +    int n = 0;
   
       for (n = 0; n < ac; n++) {
           const char * const fn = av[n];
  @@ -2668,7 +2677,7 @@
           if (fileSize > *bufferSizePtr-pos) break;
           {   
               FD_t fd = Fopen(fn, "rb");
  -            if (fp == NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fn, 
strerror(errno));
  +            if (fd == NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fn, 
strerror(errno));
               DISPLAYUPDATE(2, "Loading %s...       \r", fn);
               { size_t const readSize = Fread(buff+pos, 1, fileSize, fd);
                 if (readSize != fileSize) EXM_THROW(11, "Pb reading %s", fn);
  @@ -2769,13 +2778,13 @@
   {
   SPEW("-->\t%s()\n", __FUNCTION__);
       U64 total = 0;
  -    for (unsigned n = 0; n < ac; n++) {
  +    for (int n = 0; n < ac; n++) {
           U64 const fileSize = UTIL_getFileSize(av[n]);
           U64 const cappedFileSize = MIN(fileSize, SAMPLESIZE_MAX);
           total += cappedFileSize;
           g_tooLargeSamples |= (fileSize > 2*SAMPLESIZE_MAX);
       }
  -SPEW("<--\t%s() rc %llu\n", __FUNCTION__, total);
  +SPEW("<--\t%s() rc %llu\n", __FUNCTION__, (unsigned long long)total);
       return total;
   }
   
  @@ -2808,8 +2817,8 @@
       int result = 0;
   
       /* Checks */
  -    if (params) g_displayLevel = params->notificationLevel;
  -    else if (coverParams) g_displayLevel = coverParams->notificationLevel;
  +    if (params) g_DiB_displayLevel = params->notificationLevel;
  +    else if (coverParams) g_DiB_displayLevel = 
coverParams->notificationLevel;
       else EXM_THROW(13, "Neither dictionary algorith selected");   /* should 
not happen */
       if ((!fileSizes) || (!srcBuffer) || (!dictBuffer)) EXM_THROW(12, "not 
enough memory for DiB_trainFiles");   /* should not happen */
       if (g_tooLargeSamples) {
  @@ -2868,7 +2877,7 @@
       free(srcBuffer);
       free(dictBuffer);
       free(fileSizes);
  -SPEW("<--\t%s() rc %p\n", __FUNCTION__, result);
  +SPEW("<--\t%s() rc %d\n", __FUNCTION__, result);
       return result;
   }
   
  @@ -2942,18 +2951,20 @@
   ***************************************/
   
   #define DISPLAY(...)         fprintf(stderr, __VA_ARGS__)
  +#undef       DISPLAYLEVEL
   #define DISPLAYLEVEL(l, ...) { if (g_FIO_displayLevel>=l) { 
DISPLAY(__VA_ARGS__); } }
   
   static int g_FIO_displayLevel = 2;   /* 0 : no display;   1: errors;   2 : + 
result + interaction + warnings;   3 : + progression;   4 : + information */
   
   void FIO_setNotificationLevel(unsigned level) { g_FIO_displayLevel=level; }
   
  +#undef       DISPLAYUPDATE
   #define DISPLAYUPDATE(l, ...) { if (g_FIO_displayLevel>=l) { \
  -            if ((clock() - g_time > refreshRate) || (g_FIO_displayLevel>=4)) 
\
  -            { g_time = clock(); DISPLAY(__VA_ARGS__); \
  +            if ((clock() - g_FIO_time > FIO_refreshRate) || 
(g_FIO_displayLevel>=4)) \
  +            { g_FIO_time = clock(); DISPLAY(__VA_ARGS__); \
               if (g_FIO_displayLevel>=4) fflush(stderr); } } }
  -static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
  -static clock_t g_time = 0;
  +static const clock_t FIO_refreshRate = CLOCKS_PER_SEC * 15 / 100;
  +static clock_t g_FIO_time = 0;
   
   /* ************************************************************
   * Avoid fseek()'s 2GiB barrier with MSVC, MacOS, *BSD, MinGW
  @@ -3039,6 +3050,7 @@
   #  define DEBUG 0
   #endif
   #define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__);
  +#undef       EXM_THROW
   #define EXM_THROW(error, ...)                                             \
   {                                                                         \
       DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
  @@ .
  patch -p0 <<'@@ .'
  Index: rpm/rpmio/xxhash.c
  ============================================================================
  $ cvs diff -u -r0 -r1.1.2.1 xxhash.c
  --- /dev/null 2017-06-05 10:22:00.000000000 +0200
  +++ xxhash.c  2017-06-05 10:25:52.177912666 +0200
  @@ -0,0 +1,876 @@
  +/*
  +*  xxHash - Fast Hash algorithm
  +*  Copyright (C) 2012-2016, Yann Collet
  +*
  +*  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
  +*
  +*  Redistribution and use in source and binary forms, with or without
  +*  modification, are permitted provided that the following conditions are
  +*  met:
  +*
  +*  * Redistributions of source code must retain the above copyright
  +*  notice, this list of conditions and the following disclaimer.
  +*  * Redistributions in binary form must reproduce the above
  +*  copyright notice, this list of conditions and the following disclaimer
  +*  in the documentation and/or other materials provided with the
  +*  distribution.
  +*
  +*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  +*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  +*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  +*  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  +*  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  +*  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  +*  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  +*  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  +*  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  +*  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  +*  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  +*
  +*  You can contact the author at :
  +*  - xxHash homepage: http://www.xxhash.com
  +*  - xxHash source repository : https://github.com/Cyan4973/xxHash
  +*/
  +
  +
  +/* *************************************
  +*  Tuning parameters
  +***************************************/
  +/*!XXH_FORCE_MEMORY_ACCESS :
  + * By default, access to unaligned memory is controlled by `memcpy()`, which 
is safe and portable.
  + * Unfortunately, on some target/compiler combinations, the generated 
assembly is sub-optimal.
  + * The below switch allow to select different access method for improved 
performance.
  + * Method 0 (default) : use `memcpy()`. Safe and portable.
  + * Method 1 : `__packed` statement. It depends on compiler extension (ie, 
not portable).
  + *            This method is safe if your compiler supports it, and 
*generally* as fast or faster than `memcpy`.
  + * Method 2 : direct access. This method doesn't depend on compiler but 
violate C standard.
  + *            It can generate buggy code on targets which do not support 
unaligned memory accesses.
  + *            But in some circumstances, it's the only known way to get the 
most performance (ie GCC + ARMv6)
  + * See http://stackoverflow.com/a/32095106/646947 for details.
  + * Prefer these methods in priority order (0 > 1 > 2)
  + */
  +#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command 
line for example */
  +#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || 
defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || 
defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || 
defined(__ARM_ARCH_6T2__) )
  +#    define XXH_FORCE_MEMORY_ACCESS 2
  +#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
  +  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || 
defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || 
defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
  +#    define XXH_FORCE_MEMORY_ACCESS 1
  +#  endif
  +#endif
  +
  +/*!XXH_ACCEPT_NULL_INPUT_POINTER :
  + * If the input pointer is a null pointer, xxHash default behavior is to 
trigger a memory access error, since it is a bad pointer.
  + * When this option is enabled, xxHash output for null input pointers will 
be the same as a null-length input.
  + * By default, this option is disabled. To enable it, uncomment below define 
:
  + */
  +/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
  +
  +/*!XXH_FORCE_NATIVE_FORMAT :
  + * By default, xxHash library provides endian-independant Hash values, based 
on little-endian convention.
  + * Results are therefore identical for little-endian and big-endian CPU.
  + * This comes at a performance cost for big-endian CPU, since some swapping 
is required to emulate little-endian format.
  + * Should endian-independance be of no importance for your application, you 
may set the #define below to 1,
  + * to improve speed for Big-endian CPU.
  + * This option has no impact on Little_Endian CPU.
  + */
  +#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */
  +#  define XXH_FORCE_NATIVE_FORMAT 0
  +#endif
  +
  +/*!XXH_FORCE_ALIGN_CHECK :
  + * This is a minor performance trick, only useful with lots of very small 
keys.
  + * It means : check for aligned/unaligned input.
  + * The check costs one initial branch per hash; set to 0 when the input data
  + * is guaranteed to be aligned.
  + */
  +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
  +#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || 
defined(_M_X64)
  +#    define XXH_FORCE_ALIGN_CHECK 0
  +#  else
  +#    define XXH_FORCE_ALIGN_CHECK 1
  +#  endif
  +#endif
  +
  +
  +/* *************************************
  +*  Includes & Memory related functions
  +***************************************/
  +/* Modify the local functions below should you wish to use some other memory 
routines */
  +/* for malloc(), free() */
  +#include <stdlib.h>
  +static void* XXH_malloc(size_t s) { return malloc(s); }
  +static void  XXH_free  (void* p)  { free(p); }
  +/* for memcpy() */
  +#include <string.h>
  +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return 
memcpy(dest,src,size); }
  +
  +#ifndef XXH_STATIC_LINKING_ONLY
  +#  define XXH_STATIC_LINKING_ONLY
  +#endif
  +#include "xxhash.h"
  +
  +
  +/* *************************************
  +*  Compiler Specific Options
  +***************************************/
  +#ifdef _MSC_VER    /* Visual Studio */
  +#  pragma warning(disable : 4127)      /* disable: C4127: conditional 
expression is constant */
  +#  define FORCE_INLINE static __forceinline
  +#else
  +#  if defined (__cplusplus) || defined (__STDC_VERSION__) && 
__STDC_VERSION__ >= 199901L   /* C99 */
  +#    ifdef __GNUC__
  +#      define FORCE_INLINE static inline __attribute__((always_inline))
  +#    else
  +#      define FORCE_INLINE static inline
  +#    endif
  +#  else
  +#    define FORCE_INLINE static
  +#  endif /* __STDC_VERSION__ */
  +#endif
  +
  +
  +/* *************************************
  +*  Basic Types
  +***************************************/
  +#ifndef MEM_MODULE
  +# define MEM_MODULE
  +# if !defined (__VMS) && (defined (__cplusplus) || (defined 
(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  +#   include <stdint.h>
  +    typedef uint8_t  BYTE;
  +    typedef uint16_t U16;
  +    typedef uint32_t U32;
  +    typedef  int32_t S32;
  +    typedef uint64_t U64;
  +#  else
  +    typedef unsigned char      BYTE;
  +    typedef unsigned short     U16;
  +    typedef unsigned int       U32;
  +    typedef   signed int       S32;
  +    typedef unsigned long long U64;   /* if your compiler doesn't support 
unsigned long long, replace by another 64-bit type here. Note that xxhash.h 
will also need to be updated. */
  +#  endif
  +#endif
  +
  +
  +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
  +
  +/* Force direct memory access. Only works on CPU which support unaligned 
memory access in hardware */
  +static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
  +static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
  +
  +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
  +
  +/* __pack instructions are safer, but compiler specific, hence potentially 
problematic for some compilers */
  +/* currently only defined for gcc and icc */
  +typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
  +
  +static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
  +static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
  +
  +#else
  +
  +/* portable and safe solution. Generally efficient.
  + * see : http://stackoverflow.com/a/32095106/646947
  + */
  +
  +static U32 XXH_read32(const void* memPtr)
  +{
  +    U32 val;
  +    memcpy(&val, memPtr, sizeof(val));
  +    return val;
  +}
  +
  +static U64 XXH_read64(const void* memPtr)
  +{
  +    U64 val;
  +    memcpy(&val, memPtr, sizeof(val));
  +    return val;
  +}
  +
  +#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
  +
  +
  +/* ****************************************
  +*  Compiler-specific Functions and Macros
  +******************************************/
  +#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
  +
  +/* Note : although _rotl exists for minGW (GCC under windows), performance 
seems poor */
  +#if defined(_MSC_VER)
  +#  define XXH_rotl32(x,r) _rotl(x,r)
  +#  define XXH_rotl64(x,r) _rotl64(x,r)
  +#else
  +#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
  +#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
  +#endif
  +
  +#if defined(_MSC_VER)     /* Visual Studio */
  +#  define XXH_swap32 _byteswap_ulong
  +#  define XXH_swap64 _byteswap_uint64
  +#elif GCC_VERSION >= 403
  +#  define XXH_swap32 __builtin_bswap32
  +#  define XXH_swap64 __builtin_bswap64
  +#else
  +static U32 XXH_swap32 (U32 x)
  +{
  +    return  ((x << 24) & 0xff000000 ) |
  +            ((x <<  8) & 0x00ff0000 ) |
  +            ((x >>  8) & 0x0000ff00 ) |
  +            ((x >> 24) & 0x000000ff );
  +}
  +static U64 XXH_swap64 (U64 x)
  +{
  +    return  ((x << 56) & 0xff00000000000000ULL) |
  +            ((x << 40) & 0x00ff000000000000ULL) |
  +            ((x << 24) & 0x0000ff0000000000ULL) |
  +            ((x << 8)  & 0x000000ff00000000ULL) |
  +            ((x >> 8)  & 0x00000000ff000000ULL) |
  +            ((x >> 24) & 0x0000000000ff0000ULL) |
  +            ((x >> 40) & 0x000000000000ff00ULL) |
  +            ((x >> 56) & 0x00000000000000ffULL);
  +}
  +#endif
  +
  +
  +/* *************************************
  +*  Architecture Macros
  +***************************************/
  +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
  +
  +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the 
compiler command line */
  +#ifndef XXH_CPU_LITTLE_ENDIAN
  +    static const int g_one = 1;
  +#   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&g_one))
  +#endif
  +
  +
  +/* ***************************
  +*  Memory reads
  +*****************************/
  +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
  +
  +FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, 
XXH_alignment align)
  +{
  +    if (align==XXH_unaligned)
  +        return endian==XXH_littleEndian ? XXH_read32(ptr) : 
XXH_swap32(XXH_read32(ptr));
  +    else
  +        return endian==XXH_littleEndian ? *(const U32*)ptr : 
XXH_swap32(*(const U32*)ptr);
  +}
  +
  +FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
  +{
  +    return XXH_readLE32_align(ptr, endian, XXH_unaligned);
  +}
  +
  +static U32 XXH_readBE32(const void* ptr)
  +{
  +    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : 
XXH_read32(ptr);
  +}
  +
  +FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, 
XXH_alignment align)
  +{
  +    if (align==XXH_unaligned)
  +        return endian==XXH_littleEndian ? XXH_read64(ptr) : 
XXH_swap64(XXH_read64(ptr));
  +    else
  +        return endian==XXH_littleEndian ? *(const U64*)ptr : 
XXH_swap64(*(const U64*)ptr);
  +}
  +
  +FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
  +{
  +    return XXH_readLE64_align(ptr, endian, XXH_unaligned);
  +}
  +
  +static U64 XXH_readBE64(const void* ptr)
  +{
  +    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : 
XXH_read64(ptr);
  +}
  +
  +
  +/* *************************************
  +*  Macros
  +***************************************/
  +#define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) 
}; }    /* use only *after* variable declarations */
  +
  +
  +/* *************************************
  +*  Constants
  +***************************************/
  +static const U32 PRIME32_1 = 2654435761U;
  +static const U32 PRIME32_2 = 2246822519U;
  +static const U32 PRIME32_3 = 3266489917U;
  +static const U32 PRIME32_4 =  668265263U;
  +static const U32 PRIME32_5 =  374761393U;
  +
  +static const U64 PRIME64_1 = 11400714785074694791ULL;
  +static const U64 PRIME64_2 = 14029467366897019727ULL;
  +static const U64 PRIME64_3 =  1609587929392839161ULL;
  +static const U64 PRIME64_4 =  9650029242287828579ULL;
  +static const U64 PRIME64_5 =  2870177450012600261ULL;
  +
  +__attribute__((__const__))
  +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return 
XXH_VERSION_NUMBER; }
  +
  +
  +/* **************************
  +*  Utils
  +****************************/
  +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const 
XXH32_state_t* restrict srcState)
  +{
  +    memcpy(dstState, srcState, sizeof(*dstState));
  +}
  +
  +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const 
XXH64_state_t* restrict srcState)
  +{
  +    memcpy(dstState, srcState, sizeof(*dstState));
  +}
  +
  +
  +/* ***************************
  +*  Simple Hash Functions
  +*****************************/
  +
  +static U32 XXH32_round(U32 seed, U32 input)
  +{
  +    seed += input * PRIME32_2;
  +    seed  = XXH_rotl32(seed, 13);
  +    seed *= PRIME32_1;
  +    return seed;
  +}
  +
  +FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, 
XXH_endianess endian, XXH_alignment align)
  +{
  +    const BYTE* p = (const BYTE*)input;
  +    const BYTE* bEnd = p + len;
  +    U32 h32;
  +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
  +
  +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
  +    if (p==NULL) {
  +        len=0;
  +        bEnd=p=(const BYTE*)(size_t)16;
  +    }
  +#endif
  +
  +    if (len>=16) {
  +        const BYTE* const limit = bEnd - 16;
  +        U32 v1 = seed + PRIME32_1 + PRIME32_2;
  +        U32 v2 = seed + PRIME32_2;
  +        U32 v3 = seed + 0;
  +        U32 v4 = seed - PRIME32_1;
  +
  +        do {
  +            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
  +            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
  +            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
  +            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
  +        } while (p<=limit);
  +
  +        h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + 
XXH_rotl32(v4, 18);
  +    } else {
  +        h32  = seed + PRIME32_5;
  +    }
  +
  +    h32 += (U32) len;
  +
  +    while (p+4<=bEnd) {
  +        h32 += XXH_get32bits(p) * PRIME32_3;
  +        h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
  +        p+=4;
  +    }
  +
  +    while (p<bEnd) {
  +        h32 += (*p) * PRIME32_5;
  +        h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
  +        p++;
  +    }
  +
  +    h32 ^= h32 >> 15;
  +    h32 *= PRIME32_2;
  +    h32 ^= h32 >> 13;
  +    h32 *= PRIME32_3;
  +    h32 ^= h32 >> 16;
  +
  +    return h32;
  +}
  +
  +
  +__attribute__((__pure__))
  +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned 
int seed)
  +{
  +#if 0
  +    /* Simple version, good for code maintenance, but unfortunately slow for 
small inputs */
  +    XXH32_CREATESTATE_STATIC(state);
  +    XXH32_reset(state, seed);
  +    XXH32_update(state, input, len);
  +    return XXH32_digest(state);
  +#else
  +    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
  +
  +    if (XXH_FORCE_ALIGN_CHECK) {
  +        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, 
leverage the speed benefit */
  +            if ((endian_detected==XXH_littleEndian) || 
XXH_FORCE_NATIVE_FORMAT)
  +                return XXH32_endian_align(input, len, seed, 
XXH_littleEndian, XXH_aligned);
  +            else
  +                return XXH32_endian_align(input, len, seed, XXH_bigEndian, 
XXH_aligned);
  +    }   }
  +
  +    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
  +        return XXH32_endian_align(input, len, seed, XXH_littleEndian, 
XXH_unaligned);
  +    else
  +        return XXH32_endian_align(input, len, seed, XXH_bigEndian, 
XXH_unaligned);
  +#endif
  +}
  +
  +
  +static U64 XXH64_round(U64 acc, U64 input)
  +{
  +    acc += input * PRIME64_2;
  +    acc  = XXH_rotl64(acc, 31);
  +    acc *= PRIME64_1;
  +    return acc;
  +}
  +
  +static U64 XXH64_mergeRound(U64 acc, U64 val)
  +{
  +    val  = XXH64_round(0, val);
  +    acc ^= val;
  +    acc  = acc * PRIME64_1 + PRIME64_4;
  +    return acc;
  +}
  +
  +FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, 
XXH_endianess endian, XXH_alignment align)
  +{
  +    const BYTE* p = (const BYTE*)input;
  +    const BYTE* const bEnd = p + len;
  +    U64 h64;
  +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
  +
  +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
  +    if (p==NULL) {
  +        len=0;
  +        bEnd=p=(const BYTE*)(size_t)32;
  +    }
  +#endif
  +
  +    if (len>=32) {
  +        const BYTE* const limit = bEnd - 32;
  +        U64 v1 = seed + PRIME64_1 + PRIME64_2;
  +        U64 v2 = seed + PRIME64_2;
  +        U64 v3 = seed + 0;
  +        U64 v4 = seed - PRIME64_1;
  +
  +        do {
  +            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
  +            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
  +            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
  +            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
  +        } while (p<=limit);
  +
  +        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + 
XXH_rotl64(v4, 18);
  +        h64 = XXH64_mergeRound(h64, v1);
  +        h64 = XXH64_mergeRound(h64, v2);
  +        h64 = XXH64_mergeRound(h64, v3);
  +        h64 = XXH64_mergeRound(h64, v4);
  +
  +    } else {
  +        h64  = seed + PRIME64_5;
  +    }
  +
  +    h64 += (U64) len;
  +
  +    while (p+8<=bEnd) {
  +        U64 const k1 = XXH64_round(0, XXH_get64bits(p));
  +        h64 ^= k1;
  +        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
  +        p+=8;
  +    }
  +
  +    if (p+4<=bEnd) {
  +        h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
  +        h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
  +        p+=4;
  +    }
  +
  +    while (p<bEnd) {
  +        h64 ^= (*p) * PRIME64_5;
  +        h64 = XXH_rotl64(h64, 11) * PRIME64_1;
  +        p++;
  +    }
  +
  +    h64 ^= h64 >> 33;
  +    h64 *= PRIME64_2;
  +    h64 ^= h64 >> 29;
  +    h64 *= PRIME64_3;
  +    h64 ^= h64 >> 32;
  +
  +    return h64;
  +}
  +
  +
  +__attribute__((__pure__))
  +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, 
unsigned long long seed)
  +{
  +#if 0
  +    /* Simple version, good for code maintenance, but unfortunately slow for 
small inputs */
  +    XXH64_CREATESTATE_STATIC(state);
  +    XXH64_reset(state, seed);
  +    XXH64_update(state, input, len);
  +    return XXH64_digest(state);
  +#else
  +    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
  +
  +    if (XXH_FORCE_ALIGN_CHECK) {
  +        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage 
the speed advantage */
  +            if ((endian_detected==XXH_littleEndian) || 
XXH_FORCE_NATIVE_FORMAT)
  +                return XXH64_endian_align(input, len, seed, 
XXH_littleEndian, XXH_aligned);
  +            else
  +                return XXH64_endian_align(input, len, seed, XXH_bigEndian, 
XXH_aligned);
  +    }   }
  +
  +    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
  +        return XXH64_endian_align(input, len, seed, XXH_littleEndian, 
XXH_unaligned);
  +    else
  +        return XXH64_endian_align(input, len, seed, XXH_bigEndian, 
XXH_unaligned);
  +#endif
  +}
  +
  +
  +/* **************************************************
  +*  Advanced Hash Functions
  +****************************************************/
  +
  +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
  +{
  +    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
  +}
  +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
  +{
  +    XXH_free(statePtr);
  +    return XXH_OK;
  +}
  +
  +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
  +{
  +    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
  +}
  +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
  +{
  +    XXH_free(statePtr);
  +    return XXH_OK;
  +}
  +
  +
  +/*** Hash feed ***/
  +
  +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned 
int seed)
  +{
  +    XXH32_state_t state;   /* using a local state to memcpy() in order to 
avoid strict-aliasing warnings */
  +    memset(&state, 0, sizeof(state)-4);   /* do not write into reserved, for 
future removal */
  +    state.v1 = seed + PRIME32_1 + PRIME32_2;
  +    state.v2 = seed + PRIME32_2;
  +    state.v3 = seed + 0;
  +    state.v4 = seed - PRIME32_1;
  +    memcpy(statePtr, &state, sizeof(state));
  +    return XXH_OK;
  +}
  +
  +
  +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned 
long long seed)
  +{
  +    XXH64_state_t state;   /* using a local state to memcpy() in order to 
avoid strict-aliasing warnings */
  +    memset(&state, 0, sizeof(state)-8);   /* do not write into reserved, for 
future removal */
  +    state.v1 = seed + PRIME64_1 + PRIME64_2;
  +    state.v2 = seed + PRIME64_2;
  +    state.v3 = seed + 0;
  +    state.v4 = seed - PRIME64_1;
  +    memcpy(statePtr, &state, sizeof(state));
  +    return XXH_OK;
  +}
  +
  +
  +FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const 
void* input, size_t len, XXH_endianess endian)
  +{
  +    const BYTE* p = (const BYTE*)input;
  +    const BYTE* const bEnd = p + len;
  +
  +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
  +    if (input==NULL) return XXH_ERROR;
  +#endif
  +
  +    state->total_len_32 += (unsigned)len;
  +    state->large_len |= (len>=16) | (state->total_len_32>=16);
  +
  +    if (state->memsize + len < 16)  {   /* fill in tmp buffer */
  +        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
  +        state->memsize += (unsigned)len;
  +        return XXH_OK;
  +    }
  +
  +    if (state->memsize) {   /* some data left from previous update */
  +        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 
16-state->memsize);
  +        {   const U32* p32 = state->mem32;
  +            state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); 
p32++;
  +            state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); 
p32++;
  +            state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); 
p32++;
  +            state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); 
p32++;
  +        }
  +        p += 16-state->memsize;
  +        state->memsize = 0;
  +    }
  +
  +    if (p <= bEnd-16) {
  +        const BYTE* const limit = bEnd - 16;
  +        U32 v1 = state->v1;
  +        U32 v2 = state->v2;
  +        U32 v3 = state->v3;
  +        U32 v4 = state->v4;
  +
  +        do {
  +            v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
  +            v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
  +            v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
  +            v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
  +        } while (p<=limit);
  +
  +        state->v1 = v1;
  +        state->v2 = v2;
  +        state->v3 = v3;
  +        state->v4 = v4;
  +    }
  +
  +    if (p < bEnd) {
  +        XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
  +        state->memsize = (unsigned)(bEnd-p);
  +    }
  +
  +    return XXH_OK;
  +}
  +
  +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const 
void* input, size_t len)
  +{
  +    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
  +
  +    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
  +        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
  +    else
  +        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
  +}
  +
  +
  +
  +FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, 
XXH_endianess endian)
  +{
  +    const BYTE * p = (const BYTE*)state->mem32;
  +    const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
  +    U32 h32;
  +
  +    if (state->large_len) {
  +        h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + 
XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
  +    } else {
  +        h32 = state->v3 /* == seed */ + PRIME32_5;
  +    }
  +
  +    h32 += state->total_len_32;
  +
  +    while (p+4<=bEnd) {
  +        h32 += XXH_readLE32(p, endian) * PRIME32_3;
  +        h32  = XXH_rotl32(h32, 17) * PRIME32_4;
  +        p+=4;
  +    }
  +
  +    while (p<bEnd) {
  +        h32 += (*p) * PRIME32_5;
  +        h32  = XXH_rotl32(h32, 11) * PRIME32_1;
  +        p++;
  +    }
  +
  +    h32 ^= h32 >> 15;
  +    h32 *= PRIME32_2;
  +    h32 ^= h32 >> 13;
  +    h32 *= PRIME32_3;
  +    h32 ^= h32 >> 16;
  +
  +    return h32;
  +}
  +
  +
  +__attribute__((__pure__))
  +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
  +{
  +    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
  +
  +    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
  +        return XXH32_digest_endian(state_in, XXH_littleEndian);
  +    else
  +        return XXH32_digest_endian(state_in, XXH_bigEndian);
  +}
  +
  +
  +
  +/* **** XXH64 **** */
  +
  +FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const 
void* input, size_t len, XXH_endianess endian)
  +{
  +    const BYTE* p = (const BYTE*)input;
  +    const BYTE* const bEnd = p + len;
  +
  +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
  +    if (input==NULL) return XXH_ERROR;
  +#endif
  +
  +    state->total_len += len;
  +
  +    if (state->memsize + len < 32) {  /* fill in tmp buffer */
  +        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
  +        state->memsize += (U32)len;
  +        return XXH_OK;
  +    }
  +
  +    if (state->memsize) {   /* tmp buffer is full */
  +        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 
32-state->memsize);
  +        state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, 
endian));
  +        state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, 
endian));
  +        state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, 
endian));
  +        state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, 
endian));
  +        p += 32-state->memsize;
  +        state->memsize = 0;
  +    }
  +
  +    if (p+32 <= bEnd) {
  +        const BYTE* const limit = bEnd - 32;
  +        U64 v1 = state->v1;
  +        U64 v2 = state->v2;
  +        U64 v3 = state->v3;
  +        U64 v4 = state->v4;
  +
  +        do {
  +            v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
  +            v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
  +            v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
  +            v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
  +        } while (p<=limit);
  +
  +        state->v1 = v1;
  +        state->v2 = v2;
  +        state->v3 = v3;
  +        state->v4 = v4;
  +    }
  +
  +    if (p < bEnd) {
  +        XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
  +        state->memsize = (unsigned)(bEnd-p);
  +    }
  +
  +    return XXH_OK;
  +}
  +
  +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const 
void* input, size_t len)
  +{
  +    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
  +
  +    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
  +        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
  +    else
  +        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
  +}
  +
  +
  +
  +FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, 
XXH_endianess endian)
  +{
  +    const BYTE * p = (const BYTE*)state->mem64;
  +    const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
  +    U64 h64;
  +
  +    if (state->total_len >= 32) {
  +        U64 const v1 = state->v1;
  +        U64 const v2 = state->v2;
  +        U64 const v3 = state->v3;
  +        U64 const v4 = state->v4;
  +
  +        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + 
XXH_rotl64(v4, 18);
  +        h64 = XXH64_mergeRound(h64, v1);
  +        h64 = XXH64_mergeRound(h64, v2);
  +        h64 = XXH64_mergeRound(h64, v3);
  +        h64 = XXH64_mergeRound(h64, v4);
  +    } else {
  +        h64  = state->v3 + PRIME64_5;
  +    }
  +
  +    h64 += (U64) state->total_len;
  +
  +    while (p+8<=bEnd) {
  +        U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
  +        h64 ^= k1;
  +        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
  +        p+=8;
  +    }
  +
  +    if (p+4<=bEnd) {
  +        h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
  +        h64  = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
  +        p+=4;
  +    }
  +
  +    while (p<bEnd) {
  +        h64 ^= (*p) * PRIME64_5;
  +        h64  = XXH_rotl64(h64, 11) * PRIME64_1;
  +        p++;
  +    }
  +
  +    h64 ^= h64 >> 33;
  +    h64 *= PRIME64_2;
  +    h64 ^= h64 >> 29;
  +    h64 *= PRIME64_3;
  +    h64 ^= h64 >> 32;
  +
  +    return h64;
  +}
  +
  +
  +__attribute__((__pure__))
  +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* 
state_in)
  +{
  +    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
  +
  +    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
  +        return XXH64_digest_endian(state_in, XXH_littleEndian);
  +    else
  +        return XXH64_digest_endian(state_in, XXH_bigEndian);
  +}
  +
  +
  +/* **************************
  +*  Canonical representation
  +****************************/
  +
  +/*! Default XXH result types are basic unsigned 32 and 64 bits.
  +*   The canonical representation follows human-readable write convention, 
aka big-endian (large digits first).
  +*   These functions allow transformation of hash result into and from its 
canonical format.
  +*   This way, hash values can be written into a file or buffer, and remain 
comparable across different systems and programs.
  +*/
  +
  +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, 
XXH32_hash_t hash)
  +{
  +    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
  +    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
  +    memcpy(dst, &hash, sizeof(*dst));
  +}
  +
  +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, 
XXH64_hash_t hash)
  +{
  +    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
  +    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
  +    memcpy(dst, &hash, sizeof(*dst));
  +}
  +
  +__attribute__((__pure__))
  +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* 
src)
  +{
  +    return XXH_readBE32(src);
  +}
  +
  +__attribute__((__pure__))
  +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* 
src)
  +{
  +    return XXH_readBE64(src);
  +}
  @@ .
  patch -p0 <<'@@ .'
  Index: rpm/rpmio/xxhash.h
  ============================================================================
  $ cvs diff -u -r0 -r1.1.2.1 xxhash.h
  --- /dev/null 2017-06-05 10:22:00.000000000 +0200
  +++ xxhash.h  2017-06-05 10:25:52.188912760 +0200
  @@ -0,0 +1,305 @@
  +/*
  +   xxHash - Extremely Fast Hash algorithm
  +   Header File
  +   Copyright (C) 2012-2016, Yann Collet.
  +
  +   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
  +
  +   Redistribution and use in source and binary forms, with or without
  +   modification, are permitted provided that the following conditions are
  +   met:
  +
  +       * Redistributions of source code must retain the above copyright
  +   notice, this list of conditions and the following disclaimer.
  +       * Redistributions in binary form must reproduce the above
  +   copyright notice, this list of conditions and the following disclaimer
  +   in the documentation and/or other materials provided with the
  +   distribution.
  +
  +   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  +   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  +   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  +   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  +   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  +   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  +   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  +   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  +   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  +   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  +   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  +
  +   You can contact the author at :
  +   - xxHash source repository : https://github.com/Cyan4973/xxHash
  +*/
  +
  +/* Notice extracted from xxHash homepage :
  +
  +xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
  +It also successfully passes all tests from the SMHasher suite.
  +
  +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 
Duo @3GHz)
  +
  +Name            Speed       Q.Score   Author
  +xxHash          5.4 GB/s     10
  +CrapWow         3.2 GB/s      2       Andrew
  +MumurHash 3a    2.7 GB/s     10       Austin Appleby
  +SpookyHash      2.0 GB/s     10       Bob Jenkins
  +SBox            1.4 GB/s      9       Bret Mulvey
  +Lookup3         1.2 GB/s      9       Bob Jenkins
  +SuperFastHash   1.2 GB/s      1       Paul Hsieh
  +CityHash64      1.05 GB/s    10       Pike & Alakuijala
  +FNV             0.55 GB/s     5       Fowler, Noll, Vo
  +CRC32           0.43 GB/s     9
  +MD5-32          0.33 GB/s    10       Ronald L. Rivest
  +SHA1-32         0.28 GB/s    10
  +
  +Q.Score is a measure of quality of the hash function.
  +It depends on successfully passing SMHasher test set.
  +10 is a perfect score.
  +
  +A 64-bits version, named XXH64, is available since r35.
  +It offers much better speed, but for 64-bits applications only.
  +Name     Speed on 64 bits    Speed on 32 bits
  +XXH64       13.8 GB/s            1.9 GB/s
  +XXH32        6.8 GB/s            6.0 GB/s
  +*/
  +
  +#if defined (__cplusplus)
  +extern "C" {
  +#endif
  +
  +#ifndef XXHASH_H_5627135585666179
  +#define XXHASH_H_5627135585666179 1
  +
  +
  +/* ****************************
  +*  Definitions
  +******************************/
  +#include <stddef.h>   /* size_t */
  +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
  +
  +
  +/* ****************************
  +*  API modifier
  +******************************/
  +/** XXH_PRIVATE_API
  +*   This is useful if you want to include xxhash functions in `static` mode
  +*   in order to inline them, and remove their symbol from the public list.
  +*   Methodology :
  +*     #define XXH_PRIVATE_API
  +*     #include "xxhash.h"
  +*   `xxhash.c` is automatically included.
  +*   It's not useful to compile and link it as a separate module anymore.
  +*/
  +#ifdef XXH_PRIVATE_API
  +#  ifndef XXH_STATIC_LINKING_ONLY
  +#    define XXH_STATIC_LINKING_ONLY
  +#  endif
  +#  if defined(__GNUC__)
  +#    define XXH_PUBLIC_API static __inline __attribute__((unused))
  +#  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && 
(__STDC_VERSION__ >= 199901L) /* C99 */)
  +#    define XXH_PUBLIC_API static inline
  +#  elif defined(_MSC_VER)
  +#    define XXH_PUBLIC_API static __inline
  +#  else
  +#    define XXH_PUBLIC_API static   /* this version may generate warnings 
for unused static functions; disable the relevant warning */
  +#  endif
  +#else
  +#  define XXH_PUBLIC_API   /* do nothing */
  +#endif /* XXH_PRIVATE_API */
  +
  +/*!XXH_NAMESPACE, aka Namespace Emulation :
  +
  +If you want to include _and expose_ xxHash functions from within your own 
library,
  +but also want to avoid symbol collisions with another library which also 
includes xxHash,
  +
  +you can use XXH_NAMESPACE, to automatically prefix any public symbol from 
xxhash library
  +with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric 
values).
  +
  +Note that no change is required within the calling program as long as it 
includes `xxhash.h` :
  +regular symbol name will be automatically translated by this header.
  +*/
  +#ifdef XXH_NAMESPACE
  +#  define XXH_CAT(A,B) A##B
  +#  define XXH_NAME2(A,B) XXH_CAT(A,B)
  +#  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
  +#  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
  +#  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
  +#  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
  +#  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
  +#  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
  +#  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
  +#  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
  +#  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
  +#  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
  +#  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
  +#  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
  +#  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
  +#  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
  +#  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
  +#  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, 
XXH32_canonicalFromHash)
  +#  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, 
XXH64_canonicalFromHash)
  +#  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, 
XXH32_hashFromCanonical)
  +#  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, 
XXH64_hashFromCanonical)
  +#endif
  +
  +
  +/* *************************************
  +*  Version
  +***************************************/
  +#define XXH_VERSION_MAJOR    0
  +#define XXH_VERSION_MINOR    6
  +#define XXH_VERSION_RELEASE  2
  +#define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR 
*100 + XXH_VERSION_RELEASE)
  +XXH_PUBLIC_API unsigned XXH_versionNumber (void);
  +
  +
  +/* ****************************
  +*  Simple Hash Functions
  +******************************/
  +typedef unsigned int       XXH32_hash_t;
  +typedef unsigned long long XXH64_hash_t;
  +
  +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, 
unsigned int seed);
  +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, 
unsigned long long seed);
  +
  +/*!
  +XXH32() :
  +    Calculate the 32-bits hash of sequence "length" bytes stored at memory 
address "input".
  +    The memory between input & input+length must be valid (allocated and 
read-accessible).
  +    "seed" can be used to alter the result predictably.
  +    Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 
GB/s
  +XXH64() :
  +    Calculate the 64-bits hash of sequence of length "len" stored at memory 
address "input".
  +    "seed" can be used to alter the result predictably.
  +    This function runs 2x faster on 64-bits systems, but slower on 32-bits 
systems (see benchmark).
  +*/
  +
  +
  +/* ****************************
  +*  Streaming Hash Functions
  +******************************/
  +typedef struct XXH32_state_s XXH32_state_t;   /* incomplete type */
  +typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
  +
  +/*! State allocation, compatible with dynamic libraries */
  +
  +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
  +XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
  +
  +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
  +XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
  +
  +
  +/* hash streaming */
  +
  +XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, unsigned 
int seed);
  +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const 
void* input, size_t length);
  +XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
  +
  +XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, unsigned 
long long seed);
  +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const 
void* input, size_t length);
  +XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
  +
  +/*
  +These functions generate the xxHash of an input provided in multiple 
segments.
  +Note that, for small input, they are slower than single-call functions, due 
to state management.
  +For small input, prefer `XXH32()` and `XXH64()` .
  +
  +XXH state must first be allocated, using XXH*_createState() .
  +
  +Start a new hash by initializing state with a seed, using XXH*_reset().
  +
  +Then, feed the hash state by calling XXH*_update() as many times as 
necessary.
  +Obviously, input must be allocated and read accessible.
  +The function returns an error code, with 0 meaning OK, and any other value 
meaning there is an error.
  +
  +Finally, a hash value can be produced anytime, by using XXH*_digest().
  +This function returns the nn-bits hash as an int or long long.
  +
  +It's still possible to continue inserting input into the hash state after a 
digest,
  +and generate some new hashes later on, by calling again XXH*_digest().
  +
  +When done, free XXH state space if it was allocated dynamically.
  +*/
  +
  +
  +/* **************************
  +*  Utils
  +****************************/
  +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* ! C99 
*/
  +#  define restrict   /* disable restrict */
  +#endif
  +
  +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const 
XXH32_state_t* restrict src_state);
  +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const 
XXH64_state_t* restrict src_state);
  +
  +
  +/* **************************
  +*  Canonical representation
  +****************************/
  +/* Default result type for XXH functions are primitive unsigned 32 and 64 
bits.
  +*  The canonical representation uses human-readable write convention, aka 
big-endian (large digits first).
  +*  These functions allow transformation of hash result into and from its 
canonical format.
  +*  This way, hash values can be written into a file / memory, and remain 
comparable on different systems and programs.
  +*/
  +typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
  +typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
  +
  +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, 
XXH32_hash_t hash);
  +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, 
XXH64_hash_t hash);
  +
  +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* 
src);
  +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* 
src);
  +
  +#endif /* XXHASH_H_5627135585666179 */
  +
  +
  +
  +/* 
================================================================================================
  +   This section contains definitions which are not guaranteed to remain 
stable.
  +   They may change in future versions, becoming incompatible with a 
different version of the library.
  +   They shall only be used with static linking.
  +   Never use these definitions in association with dynamic linking !
  
+===================================================================================================
 */
  +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)
  +#define XXH_STATIC_H_3543687687345
  +
  +/* These definitions are only meant to allow allocation of XXH state
  +   statically, on stack, or in a struct for example.
  +   Do not use members directly. */
  +
  +   struct XXH32_state_s {
  +       unsigned total_len_32;
  +       unsigned large_len;
  +       unsigned v1;
  +       unsigned v2;
  +       unsigned v3;
  +       unsigned v4;
  +       unsigned mem32[4];   /* buffer defined as U32 for alignment */
  +       unsigned memsize;
  +       unsigned reserved;   /* never read nor write, will be removed in a 
future version */
  +   };   /* typedef'd to XXH32_state_t */
  +
  +   struct XXH64_state_s {
  +       unsigned long long total_len;
  +       unsigned long long v1;
  +       unsigned long long v2;
  +       unsigned long long v3;
  +       unsigned long long v4;
  +       unsigned long long mem64[4];   /* buffer defined as U64 for alignment 
*/
  +       unsigned memsize;
  +       unsigned reserved[2];          /* never read nor write, will be 
removed in a future version */
  +   };   /* typedef'd to XXH64_state_t */
  +
  +
  +#  ifdef XXH_PRIVATE_API
  +#    include "xxhash.c"   /* include xxhash functions as `static`, for 
inlining */
  +#  endif
  +
  +#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */
  +
  +
  +#if defined (__cplusplus)
  +}
  +#endif
  @@ .
______________________________________________________________________
RPM Package Manager                                    http://rpm5.org
CVS Sources Repository                                rpm-cvs@rpm5.org

Reply via email to