Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package redis for openSUSE:Factory checked in at 2023-07-14 15:35:43 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/redis (Old) and /work/SRC/openSUSE:Factory/.redis.new.3193 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "redis" Fri Jul 14 15:35:43 2023 rev:90 rq:1098399 version:7.0.12 Changes: -------- --- /work/SRC/openSUSE:Factory/redis/redis.changes 2023-05-21 19:08:31.530372465 +0200 +++ /work/SRC/openSUSE:Factory/.redis.new.3193/redis.changes 2023-07-14 15:35:46.453982547 +0200 @@ -1,0 +2,20 @@ +Wed Jul 12 14:10:43 UTC 2023 - Danilo Spinella <danilo.spine...@suse.com> + +- redis 7.0.12: + * (CVE-2022-24834) A specially crafted Lua script executing in Redis can trigger + a heap overflow in the cjson and cmsgpack libraries, and result in heap + corruption and potentially remote code execution. The problem exists in all + versions of Redis with Lua scripting support, starting from 2.6, and affects + only authenticated and authorized users. (bsc#1213193) + * (CVE-2023-36824) Extracting key names from a command and a list of arguments + may, in some cases, trigger a heap overflow and result in reading random heap + memory, heap corruption and potentially remote code execution. Specifically: + using COMMAND GETKEYS* and validation of key names in ACL rules. (bsc#1213249) + * Re-enable downscale rehashing while there is a fork child + * Fix possible hang in HRANDFIELD, SRANDMEMBER, ZRANDMEMBER when used with <count> + * Improve fairness issue in RANDOMKEY, HRANDFIELD, SRANDMEMBER, ZRANDMEMBER, + SPOP, and eviction + * Fix WAIT to be effective after a blocked module command being unblocked + * Avoid unnecessary full sync after master restart in a rare case + +------------------------------------------------------------------- Old: ---- redis-7.0.11.tar.gz New: ---- redis-7.0.12.tar.gz ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ redis.spec ++++++ --- /var/tmp/diff_new_pack.ISj57q/_old 2023-07-14 15:35:47.125986456 +0200 +++ /var/tmp/diff_new_pack.ISj57q/_new 2023-07-14 15:35:47.129986479 +0200 @@ -20,7 +20,7 @@ %define _log_dir %{_localstatedir}/log/%{name} %define _conf_dir %{_sysconfdir}/%{name} Name: redis -Version: 7.0.11 +Version: 7.0.12 Release: 0 Summary: Persistent key-value database License: BSD-3-Clause ++++++ redis-7.0.11.tar.gz -> redis-7.0.12.tar.gz ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/00-RELEASENOTES new/redis-7.0.12/00-RELEASENOTES --- old/redis-7.0.11/00-RELEASENOTES 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/00-RELEASENOTES 2023-07-10 13:39:42.000000000 +0200 @@ -13,6 +13,34 @@ ================================================================================ +Redis 7.0.12 Released Mon July 10 12:00:00 IDT 2023 +================================================================================ + +Upgrade urgency SECURITY: See security fixes below. + +Security Fixes: +* (CVE-2022-24834) A specially crafted Lua script executing in Redis can trigger + a heap overflow in the cjson and cmsgpack libraries, and result in heap + corruption and potentially remote code execution. The problem exists in all + versions of Redis with Lua scripting support, starting from 2.6, and affects + only authenticated and authorized users. +* (CVE-2023-36824) Extracting key names from a command and a list of arguments + may, in some cases, trigger a heap overflow and result in reading random heap + memory, heap corruption and potentially remote code execution. Specifically: + using COMMAND GETKEYS* and validation of key names in ACL rules. + + +Bug Fixes +========= + +* Re-enable downscale rehashing while there is a fork child (#12276) +* Fix possible hang in HRANDFIELD, SRANDMEMBER, ZRANDMEMBER when used with `<count>` (#12276) +* Improve fairness issue in RANDOMKEY, HRANDFIELD, SRANDMEMBER, ZRANDMEMBER, SPOP, and eviction (#12276) +* Fix WAIT to be effective after a blocked module command being unblocked (#12220) +* Avoid unnecessary full sync after master restart in a rare case (#12088) + + +================================================================================ Redis 7.0.11 Released Mon Apr 17 16:00:00 IST 2023 ================================================================================ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/deps/Makefile new/redis-7.0.12/deps/Makefile --- old/redis-7.0.11/deps/Makefile 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/deps/Makefile 2023-07-10 13:39:42.000000000 +0200 @@ -3,6 +3,7 @@ uname_S:= $(shell sh -c 'uname -s 2>/dev/null || echo not') LUA_DEBUG?=no +LUA_COVERAGE?=no CCCOLOR="\033[34m" LINKCOLOR="\033[34;1m" @@ -78,6 +79,11 @@ else LUA_CFLAGS+= -O2 endif +ifeq ($(LUA_COVERAGE),yes) + LUA_CFLAGS += -fprofile-arcs -ftest-coverage + LUA_LDFLAGS += -fprofile-arcs -ftest-coverage +endif + # lua's Makefile defines AR="ar rcu", which is unusual, and makes it more # challenging to cross-compile lua (and redis). These defines make it easier # to fit redis into cross-compilation environments, which typically set AR. diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/deps/lua/src/lua_cjson.c new/redis-7.0.12/deps/lua/src/lua_cjson.c --- old/redis-7.0.11/deps/lua/src/lua_cjson.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/deps/lua/src/lua_cjson.c 2023-07-10 13:39:42.000000000 +0200 @@ -39,6 +39,7 @@ #include <assert.h> #include <string.h> #include <math.h> +#include <stdint.h> #include <limits.h> #include "lua.h" #include "lauxlib.h" @@ -141,13 +142,13 @@ typedef struct { json_token_type_t type; - int index; + size_t index; union { const char *string; double number; int boolean; } value; - int string_len; + size_t string_len; } json_token_t; static const char *char2escape[256] = { @@ -473,6 +474,8 @@ * This buffer is reused constantly for small strings * If there are any excess pages, they won't be hit anyway. * This gains ~5% speedup. */ + if (len > SIZE_MAX / 6 - 3) + abort(); /* Overflow check */ strbuf_ensure_empty_length(json, len * 6 + 2); strbuf_append_char_unsafe(json, '\"'); @@ -706,7 +709,7 @@ strbuf_t local_encode_buf; strbuf_t *encode_buf; char *json; - int len; + size_t len; luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/deps/lua/src/lua_cmsgpack.c new/redis-7.0.12/deps/lua/src/lua_cmsgpack.c --- old/redis-7.0.11/deps/lua/src/lua_cmsgpack.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/deps/lua/src/lua_cmsgpack.c 2023-07-10 13:39:42.000000000 +0200 @@ -117,7 +117,9 @@ void mp_buf_append(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) { if (buf->free < len) { - size_t newsize = (buf->len+len)*2; + size_t newsize = buf->len+len; + if (newsize < buf->len || newsize >= SIZE_MAX/2) abort(); + newsize *= 2; buf->b = (unsigned char*)mp_realloc(L, buf->b, buf->len + buf->free, newsize); buf->free = newsize - buf->len; @@ -173,7 +175,7 @@ void mp_encode_bytes(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) { unsigned char hdr[5]; - int hdrlen; + size_t hdrlen; if (len < 32) { hdr[0] = 0xa0 | (len&0xff); /* fix raw */ @@ -220,7 +222,7 @@ void mp_encode_int(lua_State *L, mp_buf *buf, int64_t n) { unsigned char b[9]; - int enclen; + size_t enclen; if (n >= 0) { if (n <= 127) { @@ -290,9 +292,9 @@ mp_buf_append(L,buf,b,enclen); } -void mp_encode_array(lua_State *L, mp_buf *buf, int64_t n) { +void mp_encode_array(lua_State *L, mp_buf *buf, uint64_t n) { unsigned char b[5]; - int enclen; + size_t enclen; if (n <= 15) { b[0] = 0x90 | (n & 0xf); /* fix array */ @@ -313,7 +315,7 @@ mp_buf_append(L,buf,b,enclen); } -void mp_encode_map(lua_State *L, mp_buf *buf, int64_t n) { +void mp_encode_map(lua_State *L, mp_buf *buf, uint64_t n) { unsigned char b[5]; int enclen; @@ -791,7 +793,7 @@ } } -int mp_unpack_full(lua_State *L, int limit, int offset) { +int mp_unpack_full(lua_State *L, lua_Integer limit, lua_Integer offset) { size_t len; const char *s; mp_cur c; @@ -803,10 +805,10 @@ if (offset < 0 || limit < 0) /* requesting negative off or lim is invalid */ return luaL_error(L, "Invalid request to unpack with offset of %d and limit of %d.", - offset, len); + (int) offset, (int) len); else if (offset > len) return luaL_error(L, - "Start offset %d greater than input length %d.", offset, len); + "Start offset %d greater than input length %d.", (int) offset, (int) len); if (decode_all) limit = INT_MAX; @@ -828,12 +830,13 @@ /* c->left is the remaining size of the input buffer. * subtract the entire buffer size from the unprocessed size * to get our next start offset */ - int offset = len - c.left; + size_t new_offset = len - c.left; + if (new_offset > LONG_MAX) abort(); luaL_checkstack(L, 1, "in function mp_unpack_full"); /* Return offset -1 when we have have processed the entire buffer. */ - lua_pushinteger(L, c.left == 0 ? -1 : offset); + lua_pushinteger(L, c.left == 0 ? -1 : (lua_Integer) new_offset); /* Results are returned with the arg elements still * in place. Lua takes care of only returning * elements above the args for us. @@ -852,15 +855,15 @@ } int mp_unpack_one(lua_State *L) { - int offset = luaL_optinteger(L, 2, 0); + lua_Integer offset = luaL_optinteger(L, 2, 0); /* Variable pop because offset may not exist */ lua_pop(L, lua_gettop(L)-1); return mp_unpack_full(L, 1, offset); } int mp_unpack_limit(lua_State *L) { - int limit = luaL_checkinteger(L, 2); - int offset = luaL_optinteger(L, 3, 0); + lua_Integer limit = luaL_checkinteger(L, 2); + lua_Integer offset = luaL_optinteger(L, 3, 0); /* Variable pop because offset may not exist */ lua_pop(L, lua_gettop(L)-1); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/deps/lua/src/strbuf.c new/redis-7.0.12/deps/lua/src/strbuf.c --- old/redis-7.0.11/deps/lua/src/strbuf.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/deps/lua/src/strbuf.c 2023-07-10 13:39:42.000000000 +0200 @@ -26,6 +26,7 @@ #include <stdlib.h> #include <stdarg.h> #include <string.h> +#include <stdint.h> #include "strbuf.h" @@ -38,22 +39,22 @@ va_end(arg); fprintf(stderr, "\n"); - exit(-1); + abort(); } -void strbuf_init(strbuf_t *s, int len) +void strbuf_init(strbuf_t *s, size_t len) { - int size; + size_t size; - if (len <= 0) + if (!len) size = STRBUF_DEFAULT_SIZE; else - size = len + 1; /* \0 terminator */ - + size = len + 1; + if (size < len) + die("Overflow, len: %zu", len); s->buf = NULL; s->size = size; s->length = 0; - s->increment = STRBUF_DEFAULT_INCREMENT; s->dynamic = 0; s->reallocs = 0; s->debug = 0; @@ -65,7 +66,7 @@ strbuf_ensure_null(s); } -strbuf_t *strbuf_new(int len) +strbuf_t *strbuf_new(size_t len) { strbuf_t *s; @@ -81,20 +82,10 @@ return s; } -void strbuf_set_increment(strbuf_t *s, int increment) -{ - /* Increment > 0: Linear buffer growth rate - * Increment < -1: Exponential buffer growth rate */ - if (increment == 0 || increment == -1) - die("BUG: Invalid string increment"); - - s->increment = increment; -} - static inline void debug_stats(strbuf_t *s) { if (s->debug) { - fprintf(stderr, "strbuf(%lx) reallocs: %d, length: %d, size: %d\n", + fprintf(stderr, "strbuf(%lx) reallocs: %d, length: %zd, size: %zd\n", (long)s, s->reallocs, s->length, s->size); } } @@ -113,7 +104,7 @@ free(s); } -char *strbuf_free_to_string(strbuf_t *s, int *len) +char *strbuf_free_to_string(strbuf_t *s, size_t *len) { char *buf; @@ -131,57 +122,62 @@ return buf; } -static int calculate_new_size(strbuf_t *s, int len) +static size_t calculate_new_size(strbuf_t *s, size_t len) { - int reqsize, newsize; + size_t reqsize, newsize; if (len <= 0) die("BUG: Invalid strbuf length requested"); /* Ensure there is room for optional NULL termination */ reqsize = len + 1; + if (reqsize < len) + die("Overflow, len: %zu", len); /* If the user has requested to shrink the buffer, do it exactly */ if (s->size > reqsize) return reqsize; newsize = s->size; - if (s->increment < 0) { + if (reqsize >= SIZE_MAX / 2) { + newsize = reqsize; + } else { /* Exponential sizing */ while (newsize < reqsize) - newsize *= -s->increment; - } else { - /* Linear sizing */ - newsize = ((newsize + s->increment - 1) / s->increment) * s->increment; + newsize *= 2; } + if (newsize < reqsize) + die("BUG: strbuf length would overflow, len: %zu", len); + return newsize; } /* Ensure strbuf can handle a string length bytes long (ignoring NULL * optional termination). */ -void strbuf_resize(strbuf_t *s, int len) +void strbuf_resize(strbuf_t *s, size_t len) { - int newsize; + size_t newsize; newsize = calculate_new_size(s, len); if (s->debug > 1) { - fprintf(stderr, "strbuf(%lx) resize: %d => %d\n", + fprintf(stderr, "strbuf(%lx) resize: %zd => %zd\n", (long)s, s->size, newsize); } s->size = newsize; s->buf = realloc(s->buf, s->size); if (!s->buf) - die("Out of memory"); + die("Out of memory, len: %zu", len); s->reallocs++; } void strbuf_append_string(strbuf_t *s, const char *str) { - int space, i; + int i; + size_t space; space = strbuf_empty_length(s); @@ -197,55 +193,6 @@ } } -/* strbuf_append_fmt() should only be used when an upper bound - * is known for the output string. */ -void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...) -{ - va_list arg; - int fmt_len; - - strbuf_ensure_empty_length(s, len); - - va_start(arg, fmt); - fmt_len = vsnprintf(s->buf + s->length, len, fmt, arg); - va_end(arg); - - if (fmt_len < 0) - die("BUG: Unable to convert number"); /* This should never happen.. */ - - s->length += fmt_len; -} - -/* strbuf_append_fmt_retry() can be used when the there is no known - * upper bound for the output string. */ -void strbuf_append_fmt_retry(strbuf_t *s, const char *fmt, ...) -{ - va_list arg; - int fmt_len, try; - int empty_len; - - /* If the first attempt to append fails, resize the buffer appropriately - * and try again */ - for (try = 0; ; try++) { - va_start(arg, fmt); - /* Append the new formatted string */ - /* fmt_len is the length of the string required, excluding the - * trailing NULL */ - empty_len = strbuf_empty_length(s); - /* Add 1 since there is also space to store the terminating NULL. */ - fmt_len = vsnprintf(s->buf + s->length, empty_len + 1, fmt, arg); - va_end(arg); - - if (fmt_len <= empty_len) - break; /* SUCCESS */ - if (try > 0) - die("BUG: length of formatted string changed"); - - strbuf_resize(s, s->length + fmt_len); - } - - s->length += fmt_len; -} /* vi:ai et sw=4 ts=4: */ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/deps/lua/src/strbuf.h new/redis-7.0.12/deps/lua/src/strbuf.h --- old/redis-7.0.11/deps/lua/src/strbuf.h 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/deps/lua/src/strbuf.h 2023-07-10 13:39:42.000000000 +0200 @@ -27,15 +27,13 @@ /* Size: Total bytes allocated to *buf * Length: String length, excluding optional NULL terminator. - * Increment: Allocation increments when resizing the string buffer. * Dynamic: True if created via strbuf_new() */ typedef struct { char *buf; - int size; - int length; - int increment; + size_t size; + size_t length; int dynamic; int reallocs; int debug; @@ -44,32 +42,26 @@ #ifndef STRBUF_DEFAULT_SIZE #define STRBUF_DEFAULT_SIZE 1023 #endif -#ifndef STRBUF_DEFAULT_INCREMENT -#define STRBUF_DEFAULT_INCREMENT -2 -#endif /* Initialise */ -extern strbuf_t *strbuf_new(int len); -extern void strbuf_init(strbuf_t *s, int len); -extern void strbuf_set_increment(strbuf_t *s, int increment); +extern strbuf_t *strbuf_new(size_t len); +extern void strbuf_init(strbuf_t *s, size_t len); /* Release */ extern void strbuf_free(strbuf_t *s); -extern char *strbuf_free_to_string(strbuf_t *s, int *len); +extern char *strbuf_free_to_string(strbuf_t *s, size_t *len); /* Management */ -extern void strbuf_resize(strbuf_t *s, int len); -static int strbuf_empty_length(strbuf_t *s); -static int strbuf_length(strbuf_t *s); -static char *strbuf_string(strbuf_t *s, int *len); -static void strbuf_ensure_empty_length(strbuf_t *s, int len); +extern void strbuf_resize(strbuf_t *s, size_t len); +static size_t strbuf_empty_length(strbuf_t *s); +static size_t strbuf_length(strbuf_t *s); +static char *strbuf_string(strbuf_t *s, size_t *len); +static void strbuf_ensure_empty_length(strbuf_t *s, size_t len); static char *strbuf_empty_ptr(strbuf_t *s); -static void strbuf_extend_length(strbuf_t *s, int len); +static void strbuf_extend_length(strbuf_t *s, size_t len); /* Update */ -extern void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...); -extern void strbuf_append_fmt_retry(strbuf_t *s, const char *format, ...); -static void strbuf_append_mem(strbuf_t *s, const char *c, int len); +static void strbuf_append_mem(strbuf_t *s, const char *c, size_t len); extern void strbuf_append_string(strbuf_t *s, const char *str); static void strbuf_append_char(strbuf_t *s, const char c); static void strbuf_ensure_null(strbuf_t *s); @@ -87,12 +79,12 @@ /* Return bytes remaining in the string buffer * Ensure there is space for a NULL terminator. */ -static inline int strbuf_empty_length(strbuf_t *s) +static inline size_t strbuf_empty_length(strbuf_t *s) { return s->size - s->length - 1; } -static inline void strbuf_ensure_empty_length(strbuf_t *s, int len) +static inline void strbuf_ensure_empty_length(strbuf_t *s, size_t len) { if (len > strbuf_empty_length(s)) strbuf_resize(s, s->length + len); @@ -103,12 +95,12 @@ return s->buf + s->length; } -static inline void strbuf_extend_length(strbuf_t *s, int len) +static inline void strbuf_extend_length(strbuf_t *s, size_t len) { s->length += len; } -static inline int strbuf_length(strbuf_t *s) +static inline size_t strbuf_length(strbuf_t *s) { return s->length; } @@ -124,14 +116,14 @@ s->buf[s->length++] = c; } -static inline void strbuf_append_mem(strbuf_t *s, const char *c, int len) +static inline void strbuf_append_mem(strbuf_t *s, const char *c, size_t len) { strbuf_ensure_empty_length(s, len); memcpy(s->buf + s->length, c, len); s->length += len; } -static inline void strbuf_append_mem_unsafe(strbuf_t *s, const char *c, int len) +static inline void strbuf_append_mem_unsafe(strbuf_t *s, const char *c, size_t len) { memcpy(s->buf + s->length, c, len); s->length += len; @@ -142,7 +134,7 @@ s->buf[s->length] = 0; } -static inline char *strbuf_string(strbuf_t *s, int *len) +static inline char *strbuf_string(strbuf_t *s, size_t *len) { if (len) *len = s->length; diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/src/db.c new/redis-7.0.12/src/db.c --- old/redis-7.0.11/src/db.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/src/db.c 2023-07-10 13:39:42.000000000 +0200 @@ -1770,8 +1770,9 @@ * found in other valid keyspecs. */ int getKeysUsingKeySpecs(struct redisCommand *cmd, robj **argv, int argc, int search_flags, getKeysResult *result) { - int j, i, k = 0, last, first, step; + int j, i, last, first, step; keyReference *keys; + result->numkeys = 0; for (j = 0; j < cmd->key_specs_num; j++) { keySpec *spec = cmd->key_specs + j; @@ -1836,7 +1837,7 @@ } int count = ((last - first)+1); - keys = getKeysPrepareResult(result, count); + keys = getKeysPrepareResult(result, result->numkeys + count); /* First or last is out of bounds, which indicates a syntax error */ if (last >= argc || last < first || first >= argc) { @@ -1857,8 +1858,9 @@ serverPanic("Redis built-in command declared keys positions not matching the arity requirements."); } } - keys[k].pos = i; - keys[k++].flags = spec->flags; + keys[result->numkeys].pos = i; + keys[result->numkeys].flags = spec->flags; + result->numkeys++; } /* Handle incomplete specs (only after we added the current spec @@ -1879,8 +1881,7 @@ } } - result->numkeys = k; - return k; + return result->numkeys; } /* Return all the arguments that are keys in the command passed via argc / argv. diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/src/dict.c new/redis-7.0.12/src/dict.c --- old/redis-7.0.11/src/dict.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/src/dict.c 2023-07-10 13:39:42.000000000 +0200 @@ -210,9 +210,12 @@ * work it does would be unbound and the function may block for a long time. */ int dictRehash(dict *d, int n) { int empty_visits = n*10; /* Max number of empty buckets to visit. */ + unsigned long s0 = DICTHT_SIZE(d->ht_size_exp[0]); + unsigned long s1 = DICTHT_SIZE(d->ht_size_exp[1]); if (dict_can_resize == DICT_RESIZE_FORBID || !dictIsRehashing(d)) return 0; if (dict_can_resize == DICT_RESIZE_AVOID && - (DICTHT_SIZE(d->ht_size_exp[1]) / DICTHT_SIZE(d->ht_size_exp[0]) < dict_force_resize_ratio)) + ((s1 > s0 && s1 / s0 < dict_force_resize_ratio) || + (s1 < s0 && s0 / s1 < dict_force_resize_ratio))) { return 0; } @@ -759,19 +762,30 @@ } else { emptylen = 0; while (he) { - /* Collect all the elements of the buckets found non - * empty while iterating. */ - *des = he; - des++; + /* Collect all the elements of the buckets found non empty while iterating. + * To avoid the issue of being unable to sample the end of a long chain, + * we utilize the Reservoir Sampling algorithm to optimize the sampling process. + * This means that even when the maximum number of samples has been reached, + * we continue sampling until we reach the end of the chain. + * See https://en.wikipedia.org/wiki/Reservoir_sampling. */ + if (stored < count) { + des[stored] = he; + } else { + unsigned long r = randomULong() % (stored + 1); + if (r < count) des[r] = he; + } + he = he->next; stored++; - if (stored == count) return stored; } + if (stored >= count) goto end; } } i = (i+1) & maxsizemask; } - return stored; + +end: + return stored > count ? count : stored; } /* This is like dictGetRandomKey() from the POV of the API, but will do more @@ -1114,7 +1128,9 @@ if (d->ht_used[htidx] == 0) { return snprintf(buf,bufsize, - "No stats available for empty dictionaries\n"); + "Hash table %d stats (%s):\n" + "No stats available for empty dictionaries\n", + htidx, (htidx == 0) ? "main hash table" : "rehashing target"); } /* Compute stats. */ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/src/module.c new/redis-7.0.12/src/module.c --- old/redis-7.0.11/src/module.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/src/module.c 2023-07-10 13:39:42.000000000 +0200 @@ -577,7 +577,7 @@ * Helpers for modules API implementation * -------------------------------------------------------------------------- */ -client *moduleAllocTempClient(user *user) { +client *moduleAllocTempClient(void) { client *c = NULL; if (moduleTempClientCount > 0) { @@ -587,10 +587,8 @@ } else { c = createClient(NULL); c->flags |= CLIENT_MODULE; + c->user = NULL; /* Root user */ } - - c->user = user; - return c; } @@ -774,7 +772,7 @@ out_ctx->module = module; out_ctx->flags = ctx_flags; if (ctx_flags & REDISMODULE_CTX_TEMP_CLIENT) - out_ctx->client = moduleAllocTempClient(NULL); + out_ctx->client = moduleAllocTempClient(); else if (ctx_flags & REDISMODULE_CTX_NEW_CLIENT) out_ctx->client = createClient(NULL); @@ -5816,20 +5814,7 @@ error_as_call_replies = flags & REDISMODULE_ARGV_CALL_REPLIES_AS_ERRORS; va_end(ap); - user *user = NULL; - if (flags & REDISMODULE_ARGV_RUN_AS_USER) { - user = ctx->user ? ctx->user->user : ctx->client->user; - if (!user) { - errno = ENOTSUP; - if (error_as_call_replies) { - sds msg = sdsnew("cannot run as user, no user directly attached to context or context's client"); - reply = callReplyCreateError(msg, ctx); - } - return reply; - } - } - - c = moduleAllocTempClient(user); + c = moduleAllocTempClient(); /* We do not want to allow block, the module do not expect it */ c->flags |= CLIENT_DENY_BLOCKING; @@ -5846,6 +5831,20 @@ } if (ctx->module) ctx->module->in_call++; + user *user = NULL; + if (flags & REDISMODULE_ARGV_RUN_AS_USER) { + user = ctx->user ? ctx->user->user : ctx->client->user; + if (!user) { + errno = ENOTSUP; + if (error_as_call_replies) { + sds msg = sdsnew("cannot run as user, no user directly attached to context or context's client"); + reply = callReplyCreateError(msg, ctx); + } + goto cleanup; + } + c->user = user; + } + /* We handle the above format error only when the client is setup so that * we can free it normally. */ if (argv == NULL) { @@ -7227,8 +7226,8 @@ bc->disconnect_callback = NULL; /* Set by RM_SetDisconnectCallback() */ bc->free_privdata = free_privdata; bc->privdata = privdata; - bc->reply_client = moduleAllocTempClient(NULL); - bc->thread_safe_ctx_client = moduleAllocTempClient(NULL); + bc->reply_client = moduleAllocTempClient(); + bc->thread_safe_ctx_client = moduleAllocTempClient(); if (bc->client) bc->reply_client->resp = bc->client->resp; bc->dbid = c->db->id; @@ -7560,6 +7559,11 @@ * properly unblocked by the module. */ bc->disconnect_callback = NULL; unblockClient(c); + + /* Update the wait offset, we don't know if this blocked client propagated anything, + * currently we rather not add any API for that, so we just assume it did. */ + c->woff = server.master_repl_offset; + /* Put the client in the list of clients that need to write * if there are pending replies here. This is needed since * during a non blocking command the client may receive output. */ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/src/replication.c new/redis-7.0.12/src/replication.c --- old/redis-7.0.11/src/replication.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/src/replication.c 2023-07-10 13:39:42.000000000 +0200 @@ -410,13 +410,13 @@ } if (add_new_block) { createReplicationBacklogIndex(listLast(server.repl_buffer_blocks)); + + /* It is important to trim after adding replication data to keep the backlog size close to + * repl_backlog_size in the common case. We wait until we add a new block to avoid repeated + * unnecessary trimming attempts when small amounts of data are added. See comments in + * freeMemoryGetNotCountedMemory() for details on replication backlog memory tracking. */ + incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL); } - /* Try to trim replication backlog since replication backlog may exceed - * our setting when we add replication stream. Note that it is important to - * try to trim at least one node since in the common case this is where one - * new backlog node is added and one should be removed. See also comments - * in freeMemoryGetNotCountedMemory for details. */ - incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL); } } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/src/script.c new/redis-7.0.12/src/script.c --- old/redis-7.0.11/src/script.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/src/script.c 2023-07-10 13:39:42.000000000 +0200 @@ -299,6 +299,7 @@ if (mustObeyClient(curr_run_ctx->original_client)) { addReplyError(c, "-UNKILLABLE The busy script was sent by a master instance in the context of replication and cannot be killed."); + return; } if (curr_run_ctx->flags & SCRIPT_WRITE_DIRTY) { addReplyError(c, diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/src/server.c new/redis-7.0.12/src/server.c --- old/redis-7.0.11/src/server.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/src/server.c 2023-07-10 13:39:42.000000000 +0200 @@ -3060,6 +3060,7 @@ return NULL; } + serverAssert(argc > 0); /* Avoid warning `-Wmaybe-uninitialized` in lookupCommandLogic() */ robj objects[argc]; robj *argv[argc]; for (j = 0; j < argc; j++) { @@ -6591,6 +6592,7 @@ serverLog(LL_NOTICE, "DB loaded from append only file: %.3f seconds", (float)(ustime()-start)/1000000); } else { rdbSaveInfo rsi = RDB_SAVE_INFO_INIT; + int rsi_is_valid = 0; errno = 0; /* Prevent a stale value from affecting error checking */ int rdb_flags = RDBFLAGS_NONE; if (iAmMaster()) { @@ -6611,6 +6613,7 @@ * information in function rdbPopulateSaveInfo. */ rsi.repl_stream_db != -1) { + rsi_is_valid = 1; if (!iAmMaster()) { memcpy(server.replid,rsi.repl_id,sizeof(server.replid)); server.master_repl_offset = rsi.repl_offset; @@ -6644,7 +6647,7 @@ * if RDB doesn't have replication info or there is no rdb, it is not * possible to support partial resynchronization, to avoid extra memory * of replication backlog, we drop it. */ - if (server.master_repl_offset == 0 && server.repl_backlog) + if (!rsi_is_valid && server.repl_backlog) freeReplicationBacklog(); } } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/src/version.h new/redis-7.0.12/src/version.h --- old/redis-7.0.11/src/version.h 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/src/version.h 2023-07-10 13:39:42.000000000 +0200 @@ -1,2 +1,2 @@ -#define REDIS_VERSION "7.0.11" -#define REDIS_VERSION_NUM 0x0007000b +#define REDIS_VERSION "7.0.12" +#define REDIS_VERSION_NUM 0x0007000c diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/tests/integration/psync2-master-restart.tcl new/redis-7.0.12/tests/integration/psync2-master-restart.tcl --- old/redis-7.0.11/tests/integration/psync2-master-restart.tcl 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/tests/integration/psync2-master-restart.tcl 2023-07-10 13:39:42.000000000 +0200 @@ -11,6 +11,20 @@ set sub_replica [srv -2 client] + # Make sure the server saves an RDB on shutdown + $master config set save "3600 1" + + # Because we will test partial resync later, we donât want a timeout to cause + # the master-replica disconnect, then the extra reconnections will break the + # sync_partial_ok stat test + $master config set repl-timeout 3600 + $replica config set repl-timeout 3600 + $sub_replica config set repl-timeout 3600 + + # Avoid PINGs + $master config set repl-ping-replica-period 3600 + $master config rewrite + # Build replication chain $replica replicaof $master_host $master_port $sub_replica replicaof $replica_host $replica_port @@ -22,14 +36,43 @@ fail "Replication not started." } - # Avoid PINGs - $master config set repl-ping-replica-period 3600 - $master config rewrite + test "PSYNC2: Partial resync after Master restart using RDB aux fields when offset is 0" { + assert {[status $master master_repl_offset] == 0} + + set replid [status $master master_replid] + $replica config resetstat + + catch { + restart_server 0 true false true now + set master [srv 0 client] + } + wait_for_condition 50 1000 { + [status $replica master_link_status] eq {up} && + [status $sub_replica master_link_status] eq {up} + } else { + fail "Replicas didn't sync after master restart" + } + + # Make sure master restore replication info correctly + assert {[status $master master_replid] != $replid} + assert {[status $master master_repl_offset] == 0} + assert {[status $master master_replid2] eq $replid} + assert {[status $master second_repl_offset] == 1} + + # Make sure master set replication backlog correctly + assert {[status $master repl_backlog_active] == 1} + assert {[status $master repl_backlog_first_byte_offset] == 1} + assert {[status $master repl_backlog_histlen] == 0} + + # Partial resync after Master restart + assert {[status $master sync_partial_ok] == 1} + assert {[status $replica sync_partial_ok] == 1} + } # Generate some data createComplexDataset $master 1000 - test "PSYNC2: Partial resync after Master restart using RDB aux fields" { + test "PSYNC2: Partial resync after Master restart using RDB aux fields with data" { wait_for_condition 500 100 { [status $master master_repl_offset] == [status $replica master_repl_offset] && [status $master master_repl_offset] == [status $sub_replica master_repl_offset] diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/tests/modules/blockonkeys.c new/redis-7.0.12/tests/modules/blockonkeys.c --- old/redis-7.0.11/tests/modules/blockonkeys.c 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/tests/modules/blockonkeys.c 2023-07-10 13:39:42.000000000 +0200 @@ -113,6 +113,8 @@ fsl->list[fsl->length++] = ele; RedisModule_SignalKeyAsReady(ctx, argv[1]); + RedisModule_ReplicateVerbatim(ctx); + return RedisModule_ReplyWithSimpleString(ctx, "OK"); } @@ -126,6 +128,9 @@ return REDISMODULE_ERR; RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]); + + /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */ + RedisModule_ReplicateVerbatim(ctx); return REDISMODULE_OK; } @@ -161,6 +166,8 @@ NULL, timeout, &argv[1], 1, NULL); } else { RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]); + /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */ + RedisModule_ReplicateVerbatim(ctx); } return REDISMODULE_OK; @@ -180,6 +187,8 @@ return REDISMODULE_ERR; RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]); + /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */ + RedisModule_ReplicateVerbatim(ctx); return REDISMODULE_OK; } @@ -220,6 +229,8 @@ bpopgt_free_privdata, timeout, &argv[1], 1, pgt); } else { RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]); + /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */ + RedisModule_ReplicateVerbatim(ctx); } return REDISMODULE_OK; @@ -242,6 +253,8 @@ long long ele = src->list[--src->length]; dst->list[dst->length++] = ele; RedisModule_SignalKeyAsReady(ctx, dst_keyname); + /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */ + RedisModule_ReplicateVerbatim(ctx); return RedisModule_ReplyWithLongLong(ctx, ele); } @@ -284,6 +297,8 @@ dst->list[dst->length++] = ele; RedisModule_SignalKeyAsReady(ctx, argv[2]); RedisModule_ReplyWithLongLong(ctx, ele); + /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */ + RedisModule_ReplicateVerbatim(ctx); } return REDISMODULE_OK; @@ -320,6 +335,8 @@ RedisModule_ReplyWithString(ctx, elem); RedisModule_FreeString(ctx, elem); } + /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */ + RedisModule_ReplicateVerbatim(ctx); RedisModule_ReplySetArrayLength(ctx, len); } else { RedisModule_ReplyWithError(ctx, "ERR Not a list"); @@ -385,6 +402,7 @@ if (!strncasecmp(str, "blockonkeys.lpush_unblock", len)) { RedisModule_SignalKeyAsReady(ctx, argv[1]); } + RedisModule_ReplicateVerbatim(ctx); return RedisModule_ReplyWithSimpleString(ctx, "OK"); } @@ -403,6 +421,8 @@ RedisModule_ReplyWithString(ctx, elem); RedisModule_FreeString(ctx, elem); } + /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */ + RedisModule_ReplicateVerbatim(ctx); result = REDISMODULE_OK; } else if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_LIST || RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_EMPTY) { @@ -446,6 +466,8 @@ RedisModule_ReplyWithString(ctx, elem); RedisModule_FreeString(ctx, elem); } + /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */ + RedisModule_ReplicateVerbatim(ctx); } else { RedisModule_BlockClientOnKeys(ctx, blockonkeys_blpopn_reply_callback, blockonkeys_blpopn_timeout_callback, diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/tests/support/util.tcl new/redis-7.0.12/tests/support/util.tcl --- old/redis-7.0.11/tests/support/util.tcl 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/tests/support/util.tcl 2023-07-10 13:39:42.000000000 +0200 @@ -634,7 +634,7 @@ } proc process_is_alive pid { - if {[catch {exec ps -p $pid} err]} { + if {[catch {exec ps -p $pid -f} err]} { return 0 } else { if {[string match "*<defunct>*" $err]} { return 0 } @@ -642,6 +642,20 @@ } } +proc pause_process pid { + exec kill -SIGSTOP $pid + wait_for_condition 50 100 { + [string match {*T*} [lindex [exec ps j $pid] 16]] + } else { + puts [exec ps j $pid] + fail "process didn't stop" + } +} + +proc resume_process pid { + exec kill -SIGCONT $pid +} + proc cmdrstat {cmd r} { if {[regexp "\r\ncmdstat_$cmd:(.*?)\r\n" [$r info commandstats] _ value]} { set _ $value @@ -877,17 +891,17 @@ r $level debug digest } -proc wait_for_blocked_client {} { +proc wait_for_blocked_client {{idx 0}} { wait_for_condition 50 100 { - [s blocked_clients] ne 0 + [s $idx blocked_clients] ne 0 } else { fail "no blocked clients" } } -proc wait_for_blocked_clients_count {count {maxtries 100} {delay 10}} { +proc wait_for_blocked_clients_count {count {maxtries 100} {delay 10} {idx 0}} { wait_for_condition $maxtries $delay { - [s blocked_clients] == $count + [s $idx blocked_clients] == $count } else { fail "Timeout waiting for blocked clients" } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/tests/unit/introspection-2.tcl new/redis-7.0.12/tests/unit/introspection-2.tcl --- old/redis-7.0.11/tests/unit/introspection-2.tcl 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/tests/unit/introspection-2.tcl 2023-07-10 13:39:42.000000000 +0200 @@ -117,6 +117,21 @@ assert_equal {key1 key2} [r command getkeys lcs key1 key2] } + test {COMMAND GETKEYS MORE THAN 256 KEYS} { + set all_keys [list] + set numkeys 260 + for {set i 1} {$i <= $numkeys} {incr i} { + lappend all_keys "key$i" + } + set all_keys_with_target [linsert $all_keys 0 target] + # we are using ZUNIONSTORE command since in order to reproduce allocation of a new buffer in getKeysPrepareResult + # when numkeys in result > 0 + # we need a command that the final number of keys is not known in the first call to getKeysPrepareResult + # before the fix in that case data of old buffer was not copied to the new result buffer + # causing all previous keys (numkeys) data to be uninitialize + assert_equal $all_keys_with_target [r command getkeys ZUNIONSTORE target $numkeys {*}$all_keys] + } + test "COMMAND LIST syntax error" { assert_error "ERR syntax error*" {r command list bad_arg} assert_error "ERR syntax error*" {r command list filterby bad_arg} diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/tests/unit/moduleapi/blockedclient.tcl new/redis-7.0.12/tests/unit/moduleapi/blockedclient.tcl --- old/redis-7.0.11/tests/unit/moduleapi/blockedclient.tcl 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/tests/unit/moduleapi/blockedclient.tcl 2023-07-10 13:39:42.000000000 +0200 @@ -247,6 +247,30 @@ assert_match {*calls=2,*,rejected_calls=0,failed_calls=2} [cmdrstat do_bg_rm_call r] } + set master [srv 0 client] + set master_host [srv 0 host] + set master_port [srv 0 port] + start_server [list overrides [list loadmodule "$testmodule"]] { + set replica [srv 0 client] + set replica_host [srv 0 host] + set replica_port [srv 0 port] + + # Start the replication process... + $replica replicaof $master_host $master_port + wait_for_sync $replica + + test {WAIT command on module blocked client} { + pause_process [srv 0 pid] + + $master do_bg_rm_call_format ! hset bk1 foo bar + + assert_equal [$master wait 1 1000] 0 + resume_process [srv 0 pid] + assert_equal [$master wait 1 1000] 1 + assert_equal [$replica hget bk1 foo] bar + } + } + test "Unload the module - blockedclient" { assert_equal {OK} [r module unload blockedclient] } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/tests/unit/moduleapi/blockonkeys.tcl new/redis-7.0.12/tests/unit/moduleapi/blockonkeys.tcl --- old/redis-7.0.11/tests/unit/moduleapi/blockonkeys.tcl 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/tests/unit/moduleapi/blockonkeys.tcl 2023-07-10 13:39:42.000000000 +0200 @@ -268,4 +268,39 @@ assert_equal {gg ff ee dd cc} [$rd read] $rd close } + + set master [srv 0 client] + set master_host [srv 0 host] + set master_port [srv 0 port] + start_server [list overrides [list loadmodule "$testmodule"]] { + set replica [srv 0 client] + set replica_host [srv 0 host] + set replica_port [srv 0 port] + + # Start the replication process... + $replica replicaof $master_host $master_port + wait_for_sync $replica + + test {WAIT command on module blocked client on keys} { + set rd [redis_deferring_client -1] + $rd set x y + $rd read + + pause_process [srv 0 pid] + + $master del k + $rd fsl.bpop k 0 + wait_for_blocked_client -1 + $master fsl.push k 34 + $master fsl.push k 35 + assert_equal {34} [$rd read] + + assert_equal [$master wait 1 1000] 0 + resume_process [srv 0 pid] + assert_equal [$master wait 1 1000] 1 + $rd close + assert_equal {35} [$replica fsl.getall k] + } + } + } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/tests/unit/scripting.tcl new/redis-7.0.12/tests/unit/scripting.tcl --- old/redis-7.0.11/tests/unit/scripting.tcl 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/tests/unit/scripting.tcl 2023-07-10 13:39:42.000000000 +0200 @@ -316,6 +316,66 @@ } 0 } {a b} + test {EVAL - JSON smoke test} { + run_script { + local some_map = { + s1="Some string", + n1=100, + a1={"Some","String","Array"}, + nil1=nil, + b1=true, + b2=false} + local encoded = cjson.encode(some_map) + local decoded = cjson.decode(encoded) + assert(table.concat(some_map) == table.concat(decoded)) + + cjson.encode_keep_buffer(false) + encoded = cjson.encode(some_map) + decoded = cjson.decode(encoded) + assert(table.concat(some_map) == table.concat(decoded)) + + -- Table with numeric keys + local table1 = {one="one", [1]="one"} + encoded = cjson.encode(table1) + decoded = cjson.decode(encoded) + assert(decoded["one"] == table1["one"]) + assert(decoded["1"] == table1[1]) + + -- Array + local array1 = {[1]="one", [2]="two"} + encoded = cjson.encode(array1) + decoded = cjson.decode(encoded) + assert(table.concat(array1) == table.concat(decoded)) + + -- Invalid keys + local invalid_map = {} + invalid_map[false] = "false" + local ok, encoded = pcall(cjson.encode, invalid_map) + assert(ok == false) + + -- Max depth + cjson.encode_max_depth(1) + ok, encoded = pcall(cjson.encode, some_map) + assert(ok == false) + + cjson.decode_max_depth(1) + ok, decoded = pcall(cjson.decode, '{"obj": {"array": [1,2,3,4]}}') + assert(ok == false) + + -- Invalid numbers + ok, encoded = pcall(cjson.encode, {num1=0/0}) + assert(ok == false) + cjson.encode_invalid_numbers(true) + ok, encoded = pcall(cjson.encode, {num1=0/0}) + assert(ok == true) + + -- Restore defaults + cjson.decode_max_depth(1000) + cjson.encode_max_depth(1000) + cjson.encode_invalid_numbers(false) + } 0 + } + test {EVAL - cmsgpack can pack double?} { run_script {local encoded = cmsgpack.pack(0.1) local h = "" @@ -336,6 +396,68 @@ } 0 } {d3ffffff0000000000} + test {EVAL - cmsgpack pack/unpack smoke test} { + run_script { + local str_lt_32 = string.rep("x", 30) + local str_lt_255 = string.rep("x", 250) + local str_lt_65535 = string.rep("x", 65530) + local str_long = string.rep("x", 100000) + local array_lt_15 = {1, 2, 3, 4, 5} + local array_lt_65535 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18} + local array_big = {} + for i=1, 100000 do + array_big[i] = i + end + local map_lt_15 = {a=1, b=2} + local map_big = {} + for i=1, 100000 do + map_big[tostring(i)] = i + end + local some_map = { + s1=str_lt_32, + s2=str_lt_255, + s3=str_lt_65535, + s4=str_long, + d1=0.1, + i1=1, + i2=250, + i3=65530, + i4=100000, + i5=2^40, + i6=-1, + i7=-120, + i8=-32000, + i9=-100000, + i10=-3147483648, + a1=array_lt_15, + a2=array_lt_65535, + a3=array_big, + m1=map_lt_15, + m2=map_big, + b1=false, + b2=true, + n=nil + } + local encoded = cmsgpack.pack(some_map) + local decoded = cmsgpack.unpack(encoded) + assert(table.concat(some_map) == table.concat(decoded)) + local offset, decoded_one = cmsgpack.unpack_one(encoded, 0) + assert(table.concat(some_map) == table.concat(decoded_one)) + assert(offset == -1) + + local encoded_multiple = cmsgpack.pack(str_lt_32, str_lt_255, str_lt_65535, str_long) + local offset, obj = cmsgpack.unpack_limit(encoded_multiple, 1, 0) + assert(obj == str_lt_32) + offset, obj = cmsgpack.unpack_limit(encoded_multiple, 1, offset) + assert(obj == str_lt_255) + offset, obj = cmsgpack.unpack_limit(encoded_multiple, 1, offset) + assert(obj == str_lt_65535) + offset, obj = cmsgpack.unpack_limit(encoded_multiple, 1, offset) + assert(obj == str_long) + assert(offset == -1) + } 0 + } + test {EVAL - cmsgpack can pack and unpack circular references?} { run_script {local a = {x=nil,y=5} local b = {x=a} @@ -517,6 +639,7 @@ } ;# is_eval test {EVAL does not leak in the Lua stack} { + r script flush ;# reset Lua VM r set x 0 # Use a non blocking client to speedup the loop. set rd [redis_deferring_client] diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/redis-7.0.11/tests/unit/type/set.tcl new/redis-7.0.12/tests/unit/type/set.tcl --- old/redis-7.0.11/tests/unit/type/set.tcl 2023-04-17 14:54:03.000000000 +0200 +++ new/redis-7.0.12/tests/unit/type/set.tcl 2023-07-10 13:39:42.000000000 +0200 @@ -810,6 +810,130 @@ } } + proc is_rehashing {myset} { + set htstats [r debug HTSTATS-KEY $myset] + return [string match {*rehashing target*} $htstats] + } + + proc rem_hash_set_top_N {myset n} { + set cursor 0 + set members {} + set enough 0 + while 1 { + set res [r sscan $myset $cursor] + set cursor [lindex $res 0] + set k [lindex $res 1] + foreach m $k { + lappend members $m + if {[llength $members] >= $n} { + set enough 1 + break + } + } + if {$enough || $cursor == 0} { + break + } + } + r srem $myset {*}$members + } + + test "SRANDMEMBER with a dict containing long chain" { + set origin_save [config_get_set save ""] + set origin_max_is [config_get_set set-max-intset-entries 0] + set origin_save_delay [config_get_set rdb-key-save-delay 2147483647] + + # 1) Create a hash set with 100000 members. + set members {} + for {set i 0} {$i < 100000} {incr i} { + lappend members [format "m:%d" $i] + } + create_set myset $members + + # 2) Wait for the hash set rehashing to finish. + while {[is_rehashing myset]} { + r srandmember myset 100 + } + + # 3) Turn off the rehashing of this set, and remove the members to 500. + r bgsave + rem_hash_set_top_N myset [expr {[r scard myset] - 500}] + assert_equal [r scard myset] 500 + + # 4) Kill RDB child process to restart rehashing. + set pid1 [get_child_pid 0] + catch {exec kill -9 $pid1} + waitForBgsave r + + # 5) Let the set hash to start rehashing + r spop myset 1 + assert [is_rehashing myset] + + # 6) Verify that when rdb saving is in progress, rehashing will still be performed (because + # the ratio is extreme) by waiting for it to finish during an active bgsave. + r bgsave + + while {[is_rehashing myset]} { + r srandmember myset 1 + } + if {$::verbose} { + puts [r debug HTSTATS-KEY myset] + } + + set pid1 [get_child_pid 0] + catch {exec kill -9 $pid1} + waitForBgsave r + + # 7) Check that eventually, SRANDMEMBER returns all elements. + array set allmyset {} + foreach ele [r smembers myset] { + set allmyset($ele) 1 + } + unset -nocomplain auxset + set iterations 1000 + while {$iterations != 0} { + incr iterations -1 + set res [r srandmember myset -10] + foreach ele $res { + set auxset($ele) 1 + } + if {[lsort [array names allmyset]] eq + [lsort [array names auxset]]} { + break; + } + } + assert {$iterations != 0} + + # 8) Remove the members to 30 in order to calculate the value of Chi-Square Distribution, + # otherwise we would need more iterations. + rem_hash_set_top_N myset [expr {[r scard myset] - 30}] + assert_equal [r scard myset] 30 + assert {[is_rehashing myset]} + + # Now that we have a hash set with only one long chain bucket. + set htstats [r debug HTSTATS-KEY myset] + assert {[regexp {different slots: ([0-9]+)} $htstats - different_slots]} + assert {[regexp {max chain length: ([0-9]+)} $htstats - max_chain_length]} + assert {$different_slots == 1 && $max_chain_length == 30} + + # 9) Use positive count (PATH 4) to get 10 elements (out of 30) each time. + unset -nocomplain allkey + set iterations 1000 + while {$iterations != 0} { + incr iterations -1 + set res [r srandmember myset 10] + foreach ele $res { + lappend allkey $ele + } + } + # validate even distribution of random sampling (df = 29, 73 means 0.00001 probability) + assert_lessthan [chi_square_value $allkey] 73 + + r config set save $origin_save + r config set set-max-intset-entries $origin_max_is + r config set rdb-key-save-delay $origin_save_delay + r save + } {OK} {needs:debug slow} + proc setup_move {} { r del myset3{t} myset4{t} create_set myset1{t} {1 a b} ++++++ redis.hashes ++++++ --- /var/tmp/diff_new_pack.ISj57q/_old 2023-07-14 15:35:47.733989992 +0200 +++ /var/tmp/diff_new_pack.ISj57q/_new 2023-07-14 15:35:47.737990015 +0200 @@ -159,4 +159,8 @@ hash redis-6.2.12.tar.gz sha256 75352eef41e97e84bfa94292cbac79e5add5345fc79787df5cbdff703353fb1b http://download.redis.io/releases/redis-6.2.12.tar.gz hash redis-6.0.19.tar.gz sha256 55e26318c3d9c53a77a6e802f60524afdddd057a2e965cebcf781a0a72f0e3e6 http://download.redis.io/releases/redis-6.0.19.tar.gz hash redis-7.2-rc2.tar.gz sha256 4e075e79ad18f16c41e18b14ab60e1edfdb6633907fe9a39a34c62f4a758740b http://download.redis.io/releases/redis-7.2-rc2.tar.gz +hash redis-6.0.20.tar.gz sha256 173d4c5f44b5d7186da96c4adc5cb20e8018b50ec3a8dfe0d191dbbab53952f0 http://download.redis.io/releases/redis-6.0.20.tar.gz +hash redis-6.2.13.tar.gz sha256 89ff27c80d420456a721ccfb3beb7cc628d883c53059803513749e13214a23d1 http://download.redis.io/releases/redis-6.2.13.tar.gz +hash redis-7.0.12.tar.gz sha256 9dd83d5b278bb2bf0e39bfeb75c3e8170024edbaf11ba13b7037b2945cf48ab7 http://download.redis.io/releases/redis-7.0.12.tar.gz +hash redis-7.2-rc3.tar.gz sha256 4035e2b146ca1eb43b4188ca30a6d7be1a4d40ac2dfdf58db8f885517bbab41a http://download.redis.io/releases/redis-7.2-rc3.tar.gz