On Tue, Nov 25, 2014 at 3:33 PM, Michael Paquier
<[email protected]> wrote:
> For now here are the patches either way, so feel free to comment.
And of course the patches are incorrect...
--
Michael
From f0f4f8789c774d6b6fe69e66df0efffb63a9de52 Mon Sep 17 00:00:00 2001
From: Michael Paquier <[email protected]>
Date: Tue, 25 Nov 2014 14:05:59 +0900
Subject: [PATCH 1/2] Move pg_lzcompress.c to src/port
Exposing compression and decompression APIs of pglz makes possible its
use by extensions and contrib modules. pglz_decompress contained a call
to elog to emit an error message in case of corrupted data. This function
is changed to return a boolean status to let its callers return an error
instead.
---
src/backend/access/heap/tuptoaster.c | 9 +-
src/backend/utils/adt/Makefile | 4 +-
src/backend/utils/adt/pg_lzcompress.c | 779 ---------------------------------
src/include/utils/pg_lzcompress.h | 2 +-
src/port/Makefile | 4 +-
src/port/pg_lzcompress.c | 781 ++++++++++++++++++++++++++++++++++
src/tools/msvc/Mkvcbuild.pm | 4 +-
7 files changed, 794 insertions(+), 789 deletions(-)
delete mode 100644 src/backend/utils/adt/pg_lzcompress.c
create mode 100644 src/port/pg_lzcompress.c
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index ce44bbd..48b5d38 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -142,7 +142,8 @@ heap_tuple_untoast_attr(struct varlena * attr)
attr = (struct varlena *) palloc(PGLZ_RAW_SIZE(tmp) + VARHDRSZ);
SET_VARSIZE(attr, PGLZ_RAW_SIZE(tmp) + VARHDRSZ);
- pglz_decompress(tmp, VARDATA(attr));
+ if (!pglz_decompress(tmp, VARDATA(attr)))
+ elog(ERROR, "compressed data is corrupted");
pfree(tmp);
}
}
@@ -167,7 +168,8 @@ heap_tuple_untoast_attr(struct varlena * attr)
attr = (struct varlena *) palloc(PGLZ_RAW_SIZE(tmp) + VARHDRSZ);
SET_VARSIZE(attr, PGLZ_RAW_SIZE(tmp) + VARHDRSZ);
- pglz_decompress(tmp, VARDATA(attr));
+ if (!pglz_decompress(tmp, VARDATA(attr)))
+ elog(ERROR, "compressed data is corrupted");
}
else if (VARATT_IS_SHORT(attr))
{
@@ -239,7 +241,8 @@ heap_tuple_untoast_attr_slice(struct varlena * attr,
preslice = (struct varlena *) palloc(size);
SET_VARSIZE(preslice, size);
- pglz_decompress(tmp, VARDATA(preslice));
+ if (!pglz_decompress(tmp, VARDATA(preslice)))
+ elog(ERROR, "compressed data is corrupted");
if (tmp != (PGLZ_Header *) attr)
pfree(tmp);
diff --git a/src/backend/utils/adt/Makefile b/src/backend/utils/adt/Makefile
index 3ea9bf4..20e5ff1 100644
--- a/src/backend/utils/adt/Makefile
+++ b/src/backend/utils/adt/Makefile
@@ -25,8 +25,8 @@ OBJS = acl.o arrayfuncs.o array_selfuncs.o array_typanalyze.o \
jsonfuncs.o like.o lockfuncs.o mac.o misc.o nabstime.o name.o \
network.o network_gist.o network_selfuncs.o \
numeric.o numutils.o oid.o oracle_compat.o \
- orderedsetaggs.o pg_lzcompress.o pg_locale.o pg_lsn.o \
- pgstatfuncs.o pseudotypes.o quote.o rangetypes.o rangetypes_gist.o \
+ orderedsetaggs.o pg_locale.o pg_lsn.o pgstatfuncs.o \
+ pseudotypes.o quote.o rangetypes.o rangetypes_gist.o \
rangetypes_selfuncs.o rangetypes_spgist.o rangetypes_typanalyze.o \
regexp.o regproc.o ri_triggers.o rowtypes.o ruleutils.o \
selfuncs.o tid.o timestamp.o trigfuncs.o \
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
deleted file mode 100644
index fe08890..0000000
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ /dev/null
@@ -1,779 +0,0 @@
-/* ----------
- * pg_lzcompress.c -
- *
- * This is an implementation of LZ compression for PostgreSQL.
- * It uses a simple history table and generates 2-3 byte tags
- * capable of backward copy information for 3-273 bytes with
- * a max offset of 4095.
- *
- * Entry routines:
- *
- * bool
- * pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
- * const PGLZ_Strategy *strategy);
- *
- * source is the input data to be compressed.
- *
- * slen is the length of the input data.
- *
- * dest is the output area for the compressed result.
- * It must be at least as big as PGLZ_MAX_OUTPUT(slen).
- *
- * strategy is a pointer to some information controlling
- * the compression algorithm. If NULL, the compiled
- * in default strategy is used.
- *
- * The return value is TRUE if compression succeeded,
- * FALSE if not; in the latter case the contents of dest
- * are undefined.
- *
- * void
- * pglz_decompress(const PGLZ_Header *source, char *dest)
- *
- * source is the compressed input.
- *
- * dest is the area where the uncompressed data will be
- * written to. It is the callers responsibility to
- * provide enough space. The required amount can be
- * obtained with the macro PGLZ_RAW_SIZE(source).
- *
- * The data is written to buff exactly as it was handed
- * to pglz_compress(). No terminating zero byte is added.
- *
- * The decompression algorithm and internal data format:
- *
- * PGLZ_Header is defined as
- *
- * typedef struct PGLZ_Header {
- * int32 vl_len_;
- * int32 rawsize;
- * }
- *
- * The header is followed by the compressed data itself.
- *
- * The data representation is easiest explained by describing
- * the process of decompression.
- *
- * If VARSIZE(x) == rawsize + sizeof(PGLZ_Header), then the data
- * is stored uncompressed as plain bytes. Thus, the decompressor
- * simply copies rawsize bytes from the location after the
- * header to the destination.
- *
- * Otherwise the first byte after the header tells what to do
- * the next 8 times. We call this the control byte.
- *
- * An unset bit in the control byte means, that one uncompressed
- * byte follows, which is copied from input to output.
- *
- * A set bit in the control byte means, that a tag of 2-3 bytes
- * follows. A tag contains information to copy some bytes, that
- * are already in the output buffer, to the current location in
- * the output. Let's call the three tag bytes T1, T2 and T3. The
- * position of the data to copy is coded as an offset from the
- * actual output position.
- *
- * The offset is in the upper nibble of T1 and in T2.
- * The length is in the lower nibble of T1.
- *
- * So the 16 bits of a 2 byte tag are coded as
- *
- * 7---T1--0 7---T2--0
- * OOOO LLLL OOOO OOOO
- *
- * This limits the offset to 1-4095 (12 bits) and the length
- * to 3-18 (4 bits) because 3 is always added to it. To emit
- * a tag of 2 bytes with a length of 2 only saves one control
- * bit. But we lose one byte in the possible length of a tag.
- *
- * In the actual implementation, the 2 byte tag's length is
- * limited to 3-17, because the value 0xF in the length nibble
- * has special meaning. It means, that the next following
- * byte (T3) has to be added to the length value of 18. That
- * makes total limits of 1-4095 for offset and 3-273 for length.
- *
- * Now that we have successfully decoded a tag. We simply copy
- * the output that occurred <offset> bytes back to the current
- * output location in the specified <length>. Thus, a
- * sequence of 200 spaces (think about bpchar fields) could be
- * coded in 4 bytes. One literal space and a three byte tag to
- * copy 199 bytes with a -1 offset. Whow - that's a compression
- * rate of 98%! Well, the implementation needs to save the
- * original data size too, so we need another 4 bytes for it
- * and end up with a total compression rate of 96%, what's still
- * worth a Whow.
- *
- * The compression algorithm
- *
- * The following uses numbers used in the default strategy.
- *
- * The compressor works best for attributes of a size between
- * 1K and 1M. For smaller items there's not that much chance of
- * redundancy in the character sequence (except for large areas
- * of identical bytes like trailing spaces) and for bigger ones
- * our 4K maximum look-back distance is too small.
- *
- * The compressor creates a table for lists of positions.
- * For each input position (except the last 3), a hash key is
- * built from the 4 next input bytes and the position remembered
- * in the appropriate list. Thus, the table points to linked
- * lists of likely to be at least in the first 4 characters
- * matching strings. This is done on the fly while the input
- * is compressed into the output area. Table entries are only
- * kept for the last 4096 input positions, since we cannot use
- * back-pointers larger than that anyway. The size of the hash
- * table is chosen based on the size of the input - a larger table
- * has a larger startup cost, as it needs to be initialized to
- * zero, but reduces the number of hash collisions on long inputs.
- *
- * For each byte in the input, its hash key (built from this
- * byte and the next 3) is used to find the appropriate list
- * in the table. The lists remember the positions of all bytes
- * that had the same hash key in the past in increasing backward
- * offset order. Now for all entries in the used lists, the
- * match length is computed by comparing the characters from the
- * entries position with the characters from the actual input
- * position.
- *
- * The compressor starts with a so called "good_match" of 128.
- * It is a "prefer speed against compression ratio" optimizer.
- * So if the first entry looked at already has 128 or more
- * matching characters, the lookup stops and that position is
- * used for the next tag in the output.
- *
- * For each subsequent entry in the history list, the "good_match"
- * is lowered by 10%. So the compressor will be more happy with
- * short matches the farer it has to go back in the history.
- * Another "speed against ratio" preference characteristic of
- * the algorithm.
- *
- * Thus there are 3 stop conditions for the lookup of matches:
- *
- * - a match >= good_match is found
- * - there are no more history entries to look at
- * - the next history entry is already too far back
- * to be coded into a tag.
- *
- * Finally the match algorithm checks that at least a match
- * of 3 or more bytes has been found, because thats the smallest
- * amount of copy information to code into a tag. If so, a tag
- * is omitted and all the input bytes covered by that are just
- * scanned for the history add's, otherwise a literal character
- * is omitted and only his history entry added.
- *
- * Acknowledgements:
- *
- * Many thanks to Adisak Pochanayon, who's article about SLZ
- * inspired me to write the PostgreSQL compression this way.
- *
- * Jan Wieck
- *
- * Copyright (c) 1999-2014, PostgreSQL Global Development Group
- *
- * src/backend/utils/adt/pg_lzcompress.c
- * ----------
- */
-#include "postgres.h"
-
-#include <limits.h>
-
-#include "utils/pg_lzcompress.h"
-
-
-/* ----------
- * Local definitions
- * ----------
- */
-#define PGLZ_MAX_HISTORY_LISTS 8192 /* must be power of 2 */
-#define PGLZ_HISTORY_SIZE 4096
-#define PGLZ_MAX_MATCH 273
-
-
-/* ----------
- * PGLZ_HistEntry -
- *
- * Linked list for the backward history lookup
- *
- * All the entries sharing a hash key are linked in a doubly linked list.
- * This makes it easy to remove an entry when it's time to recycle it
- * (because it's more than 4K positions old).
- * ----------
- */
-typedef struct PGLZ_HistEntry
-{
- struct PGLZ_HistEntry *next; /* links for my hash key's list */
- struct PGLZ_HistEntry *prev;
- int hindex; /* my current hash key */
- const char *pos; /* my input position */
-} PGLZ_HistEntry;
-
-
-/* ----------
- * The provided standard strategies
- * ----------
- */
-static const PGLZ_Strategy strategy_default_data = {
- 32, /* Data chunks less than 32 bytes are not
- * compressed */
- INT_MAX, /* No upper limit on what we'll try to
- * compress */
- 25, /* Require 25% compression rate, or not worth
- * it */
- 1024, /* Give up if no compression in the first 1KB */
- 128, /* Stop history lookup if a match of 128 bytes
- * is found */
- 10 /* Lower good match size by 10% at every loop
- * iteration */
-};
-const PGLZ_Strategy *const PGLZ_strategy_default = &strategy_default_data;
-
-
-static const PGLZ_Strategy strategy_always_data = {
- 0, /* Chunks of any size are compressed */
- INT_MAX,
- 0, /* It's enough to save one single byte */
- INT_MAX, /* Never give up early */
- 128, /* Stop history lookup if a match of 128 bytes
- * is found */
- 6 /* Look harder for a good match */
-};
-const PGLZ_Strategy *const PGLZ_strategy_always = &strategy_always_data;
-
-
-/* ----------
- * Statically allocated work arrays for history
- * ----------
- */
-static int16 hist_start[PGLZ_MAX_HISTORY_LISTS];
-static PGLZ_HistEntry hist_entries[PGLZ_HISTORY_SIZE + 1];
-
-/*
- * Element 0 in hist_entries is unused, and means 'invalid'. Likewise,
- * INVALID_ENTRY_PTR in next/prev pointers mean 'invalid'.
- */
-#define INVALID_ENTRY 0
-#define INVALID_ENTRY_PTR (&hist_entries[INVALID_ENTRY])
-
-/* ----------
- * pglz_hist_idx -
- *
- * Computes the history table slot for the lookup by the next 4
- * characters in the input.
- *
- * NB: because we use the next 4 characters, we are not guaranteed to
- * find 3-character matches; they very possibly will be in the wrong
- * hash list. This seems an acceptable tradeoff for spreading out the
- * hash keys more.
- * ----------
- */
-#define pglz_hist_idx(_s,_e, _mask) ( \
- ((((_e) - (_s)) < 4) ? (int) (_s)[0] : \
- (((_s)[0] << 6) ^ ((_s)[1] << 4) ^ \
- ((_s)[2] << 2) ^ (_s)[3])) & (_mask) \
- )
-
-
-/* ----------
- * pglz_hist_add -
- *
- * Adds a new entry to the history table.
- *
- * If _recycle is true, then we are recycling a previously used entry,
- * and must first delink it from its old hashcode's linked list.
- *
- * NOTE: beware of multiple evaluations of macro's arguments, and note that
- * _hn and _recycle are modified in the macro.
- * ----------
- */
-#define pglz_hist_add(_hs,_he,_hn,_recycle,_s,_e, _mask) \
-do { \
- int __hindex = pglz_hist_idx((_s),(_e), (_mask)); \
- int16 *__myhsp = &(_hs)[__hindex]; \
- PGLZ_HistEntry *__myhe = &(_he)[_hn]; \
- if (_recycle) { \
- if (__myhe->prev == NULL) \
- (_hs)[__myhe->hindex] = __myhe->next - (_he); \
- else \
- __myhe->prev->next = __myhe->next; \
- if (__myhe->next != NULL) \
- __myhe->next->prev = __myhe->prev; \
- } \
- __myhe->next = &(_he)[*__myhsp]; \
- __myhe->prev = NULL; \
- __myhe->hindex = __hindex; \
- __myhe->pos = (_s); \
- /* If there was an existing entry in this hash slot, link */ \
- /* this new entry to it. However, the 0th entry in the */ \
- /* entries table is unused, so we can freely scribble on it. */ \
- /* So don't bother checking if the slot was used - we'll */ \
- /* scribble on the unused entry if it was not, but that's */ \
- /* harmless. Avoiding the branch in this critical path */ \
- /* speeds this up a little bit. */ \
- /* if (*__myhsp != INVALID_ENTRY) */ \
- (_he)[(*__myhsp)].prev = __myhe; \
- *__myhsp = _hn; \
- if (++(_hn) >= PGLZ_HISTORY_SIZE + 1) { \
- (_hn) = 1; \
- (_recycle) = true; \
- } \
-} while (0)
-
-
-/* ----------
- * pglz_out_ctrl -
- *
- * Outputs the last and allocates a new control byte if needed.
- * ----------
- */
-#define pglz_out_ctrl(__ctrlp,__ctrlb,__ctrl,__buf) \
-do { \
- if ((__ctrl & 0xff) == 0) \
- { \
- *(__ctrlp) = __ctrlb; \
- __ctrlp = (__buf)++; \
- __ctrlb = 0; \
- __ctrl = 1; \
- } \
-} while (0)
-
-
-/* ----------
- * pglz_out_literal -
- *
- * Outputs a literal byte to the destination buffer including the
- * appropriate control bit.
- * ----------
- */
-#define pglz_out_literal(_ctrlp,_ctrlb,_ctrl,_buf,_byte) \
-do { \
- pglz_out_ctrl(_ctrlp,_ctrlb,_ctrl,_buf); \
- *(_buf)++ = (unsigned char)(_byte); \
- _ctrl <<= 1; \
-} while (0)
-
-
-/* ----------
- * pglz_out_tag -
- *
- * Outputs a backward reference tag of 2-4 bytes (depending on
- * offset and length) to the destination buffer including the
- * appropriate control bit.
- * ----------
- */
-#define pglz_out_tag(_ctrlp,_ctrlb,_ctrl,_buf,_len,_off) \
-do { \
- pglz_out_ctrl(_ctrlp,_ctrlb,_ctrl,_buf); \
- _ctrlb |= _ctrl; \
- _ctrl <<= 1; \
- if (_len > 17) \
- { \
- (_buf)[0] = (unsigned char)((((_off) & 0xf00) >> 4) | 0x0f); \
- (_buf)[1] = (unsigned char)(((_off) & 0xff)); \
- (_buf)[2] = (unsigned char)((_len) - 18); \
- (_buf) += 3; \
- } else { \
- (_buf)[0] = (unsigned char)((((_off) & 0xf00) >> 4) | ((_len) - 3)); \
- (_buf)[1] = (unsigned char)((_off) & 0xff); \
- (_buf) += 2; \
- } \
-} while (0)
-
-
-/* ----------
- * pglz_find_match -
- *
- * Lookup the history table if the actual input stream matches
- * another sequence of characters, starting somewhere earlier
- * in the input buffer.
- * ----------
- */
-static inline int
-pglz_find_match(int16 *hstart, const char *input, const char *end,
- int *lenp, int *offp, int good_match, int good_drop, int mask)
-{
- PGLZ_HistEntry *hent;
- int16 hentno;
- int32 len = 0;
- int32 off = 0;
-
- /*
- * Traverse the linked history list until a good enough match is found.
- */
- hentno = hstart[pglz_hist_idx(input, end, mask)];
- hent = &hist_entries[hentno];
- while (hent != INVALID_ENTRY_PTR)
- {
- const char *ip = input;
- const char *hp = hent->pos;
- int32 thisoff;
- int32 thislen;
-
- /*
- * Stop if the offset does not fit into our tag anymore.
- */
- thisoff = ip - hp;
- if (thisoff >= 0x0fff)
- break;
-
- /*
- * Determine length of match. A better match must be larger than the
- * best so far. And if we already have a match of 16 or more bytes,
- * it's worth the call overhead to use memcmp() to check if this match
- * is equal for the same size. After that we must fallback to
- * character by character comparison to know the exact position where
- * the diff occurred.
- */
- thislen = 0;
- if (len >= 16)
- {
- if (memcmp(ip, hp, len) == 0)
- {
- thislen = len;
- ip += len;
- hp += len;
- while (ip < end && *ip == *hp && thislen < PGLZ_MAX_MATCH)
- {
- thislen++;
- ip++;
- hp++;
- }
- }
- }
- else
- {
- while (ip < end && *ip == *hp && thislen < PGLZ_MAX_MATCH)
- {
- thislen++;
- ip++;
- hp++;
- }
- }
-
- /*
- * Remember this match as the best (if it is)
- */
- if (thislen > len)
- {
- len = thislen;
- off = thisoff;
- }
-
- /*
- * Advance to the next history entry
- */
- hent = hent->next;
-
- /*
- * Be happy with lesser good matches the more entries we visited. But
- * no point in doing calculation if we're at end of list.
- */
- if (hent != INVALID_ENTRY_PTR)
- {
- if (len >= good_match)
- break;
- good_match -= (good_match * good_drop) / 100;
- }
- }
-
- /*
- * Return match information only if it results at least in one byte
- * reduction.
- */
- if (len > 2)
- {
- *lenp = len;
- *offp = off;
- return 1;
- }
-
- return 0;
-}
-
-
-/* ----------
- * pglz_compress -
- *
- * Compresses source into dest using strategy.
- * ----------
- */
-bool
-pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
- const PGLZ_Strategy *strategy)
-{
- unsigned char *bp = ((unsigned char *) dest) + sizeof(PGLZ_Header);
- unsigned char *bstart = bp;
- int hist_next = 1;
- bool hist_recycle = false;
- const char *dp = source;
- const char *dend = source + slen;
- unsigned char ctrl_dummy = 0;
- unsigned char *ctrlp = &ctrl_dummy;
- unsigned char ctrlb = 0;
- unsigned char ctrl = 0;
- bool found_match = false;
- int32 match_len;
- int32 match_off;
- int32 good_match;
- int32 good_drop;
- int32 result_size;
- int32 result_max;
- int32 need_rate;
- int hashsz;
- int mask;
-
- /*
- * Our fallback strategy is the default.
- */
- if (strategy == NULL)
- strategy = PGLZ_strategy_default;
-
- /*
- * If the strategy forbids compression (at all or if source chunk size out
- * of range), fail.
- */
- if (strategy->match_size_good <= 0 ||
- slen < strategy->min_input_size ||
- slen > strategy->max_input_size)
- return false;
-
- /*
- * Save the original source size in the header.
- */
- dest->rawsize = slen;
-
- /*
- * Limit the match parameters to the supported range.
- */
- good_match = strategy->match_size_good;
- if (good_match > PGLZ_MAX_MATCH)
- good_match = PGLZ_MAX_MATCH;
- else if (good_match < 17)
- good_match = 17;
-
- good_drop = strategy->match_size_drop;
- if (good_drop < 0)
- good_drop = 0;
- else if (good_drop > 100)
- good_drop = 100;
-
- need_rate = strategy->min_comp_rate;
- if (need_rate < 0)
- need_rate = 0;
- else if (need_rate > 99)
- need_rate = 99;
-
- /*
- * Compute the maximum result size allowed by the strategy, namely the
- * input size minus the minimum wanted compression rate. This had better
- * be <= slen, else we might overrun the provided output buffer.
- */
- if (slen > (INT_MAX / 100))
- {
- /* Approximate to avoid overflow */
- result_max = (slen / 100) * (100 - need_rate);
- }
- else
- result_max = (slen * (100 - need_rate)) / 100;
-
- /*
- * Experiments suggest that these hash sizes work pretty well. A large
- * hash table minimizes collision, but has a higher startup cost. For a
- * small input, the startup cost dominates. The table size must be a power
- * of two.
- */
- if (slen < 128)
- hashsz = 512;
- else if (slen < 256)
- hashsz = 1024;
- else if (slen < 512)
- hashsz = 2048;
- else if (slen < 1024)
- hashsz = 4096;
- else
- hashsz = 8192;
- mask = hashsz - 1;
-
- /*
- * Initialize the history lists to empty. We do not need to zero the
- * hist_entries[] array; its entries are initialized as they are used.
- */
- memset(hist_start, 0, hashsz * sizeof(int16));
-
- /*
- * Compress the source directly into the output buffer.
- */
- while (dp < dend)
- {
- /*
- * If we already exceeded the maximum result size, fail.
- *
- * We check once per loop; since the loop body could emit as many as 4
- * bytes (a control byte and 3-byte tag), PGLZ_MAX_OUTPUT() had better
- * allow 4 slop bytes.
- */
- if (bp - bstart >= result_max)
- return false;
-
- /*
- * If we've emitted more than first_success_by bytes without finding
- * anything compressible at all, fail. This lets us fall out
- * reasonably quickly when looking at incompressible input (such as
- * pre-compressed data).
- */
- if (!found_match && bp - bstart >= strategy->first_success_by)
- return false;
-
- /*
- * Try to find a match in the history
- */
- if (pglz_find_match(hist_start, dp, dend, &match_len,
- &match_off, good_match, good_drop, mask))
- {
- /*
- * Create the tag and add history entries for all matched
- * characters.
- */
- pglz_out_tag(ctrlp, ctrlb, ctrl, bp, match_len, match_off);
- while (match_len--)
- {
- pglz_hist_add(hist_start, hist_entries,
- hist_next, hist_recycle,
- dp, dend, mask);
- dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
- }
- found_match = true;
- }
- else
- {
- /*
- * No match found. Copy one literal byte.
- */
- pglz_out_literal(ctrlp, ctrlb, ctrl, bp, *dp);
- pglz_hist_add(hist_start, hist_entries,
- hist_next, hist_recycle,
- dp, dend, mask);
- dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
- }
- }
-
- /*
- * Write out the last control byte and check that we haven't overrun the
- * output size allowed by the strategy.
- */
- *ctrlp = ctrlb;
- result_size = bp - bstart;
- if (result_size >= result_max)
- return false;
-
- /*
- * Success - need only fill in the actual length of the compressed datum.
- */
- SET_VARSIZE_COMPRESSED(dest, result_size + sizeof(PGLZ_Header));
-
- return true;
-}
-
-
-/* ----------
- * pglz_decompress -
- *
- * Decompresses source into dest.
- * ----------
- */
-void
-pglz_decompress(const PGLZ_Header *source, char *dest)
-{
- const unsigned char *sp;
- const unsigned char *srcend;
- unsigned char *dp;
- unsigned char *destend;
-
- sp = ((const unsigned char *) source) + sizeof(PGLZ_Header);
- srcend = ((const unsigned char *) source) + VARSIZE(source);
- dp = (unsigned char *) dest;
- destend = dp + source->rawsize;
-
- while (sp < srcend && dp < destend)
- {
- /*
- * Read one control byte and process the next 8 items (or as many as
- * remain in the compressed input).
- */
- unsigned char ctrl = *sp++;
- int ctrlc;
-
- for (ctrlc = 0; ctrlc < 8 && sp < srcend; ctrlc++)
- {
- if (ctrl & 1)
- {
- /*
- * Otherwise it contains the match length minus 3 and the
- * upper 4 bits of the offset. The next following byte
- * contains the lower 8 bits of the offset. If the length is
- * coded as 18, another extension tag byte tells how much
- * longer the match really was (0-255).
- */
- int32 len;
- int32 off;
-
- len = (sp[0] & 0x0f) + 3;
- off = ((sp[0] & 0xf0) << 4) | sp[1];
- sp += 2;
- if (len == 18)
- len += *sp++;
-
- /*
- * Check for output buffer overrun, to ensure we don't clobber
- * memory in case of corrupt input. Note: we must advance dp
- * here to ensure the error is detected below the loop. We
- * don't simply put the elog inside the loop since that will
- * probably interfere with optimization.
- */
- if (dp + len > destend)
- {
- dp += len;
- break;
- }
-
- /*
- * Now we copy the bytes specified by the tag from OUTPUT to
- * OUTPUT. It is dangerous and platform dependent to use
- * memcpy() here, because the copied areas could overlap
- * extremely!
- */
- while (len--)
- {
- *dp = dp[-off];
- dp++;
- }
- }
- else
- {
- /*
- * An unset control bit means LITERAL BYTE. So we just copy
- * one from INPUT to OUTPUT.
- */
- if (dp >= destend) /* check for buffer overrun */
- break; /* do not clobber memory */
-
- *dp++ = *sp++;
- }
-
- /*
- * Advance the control bit
- */
- ctrl >>= 1;
- }
- }
-
- /*
- * Check we decompressed the right amount.
- */
- if (dp != destend || sp != srcend)
- elog(ERROR, "compressed data is corrupt");
-
- /*
- * That's it.
- */
-}
diff --git a/src/include/utils/pg_lzcompress.h b/src/include/utils/pg_lzcompress.h
index 4af24a3..08c6b0e 100644
--- a/src/include/utils/pg_lzcompress.h
+++ b/src/include/utils/pg_lzcompress.h
@@ -107,6 +107,6 @@ extern const PGLZ_Strategy *const PGLZ_strategy_always;
*/
extern bool pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
const PGLZ_Strategy *strategy);
-extern void pglz_decompress(const PGLZ_Header *source, char *dest);
+extern bool pglz_decompress(const PGLZ_Header *source, char *dest);
#endif /* _PG_LZCOMPRESS_H_ */
diff --git a/src/port/Makefile b/src/port/Makefile
index 1be4ff5..f2450da 100644
--- a/src/port/Makefile
+++ b/src/port/Makefile
@@ -31,8 +31,8 @@ override CPPFLAGS := -I$(top_builddir)/src/port -DFRONTEND $(CPPFLAGS)
LIBS += $(PTHREAD_LIBS)
OBJS = $(LIBOBJS) chklocale.o dirmod.o erand48.o fls.o inet_net_ntop.o \
- noblock.o path.o pgcheckdir.o pg_crc.o pgmkdirp.o pgsleep.o \
- pgstrcasecmp.o pqsignal.o \
+ noblock.o path.o pgcheckdir.o pg_crc.o pg_lzcompress.o \
+ pgmkdirp.o pgsleep.o pgstrcasecmp.o pqsignal.o \
qsort.o qsort_arg.o quotes.o sprompt.o tar.o thread.o
# foo_srv.o and foo.o are both built from foo.c, but only foo.o has -DFRONTEND
diff --git a/src/port/pg_lzcompress.c b/src/port/pg_lzcompress.c
new file mode 100644
index 0000000..bd0f6e8
--- /dev/null
+++ b/src/port/pg_lzcompress.c
@@ -0,0 +1,781 @@
+/* ----------
+ * pg_lzcompress.c -
+ *
+ * This is an implementation of LZ compression for PostgreSQL.
+ * It uses a simple history table and generates 2-3 byte tags
+ * capable of backward copy information for 3-273 bytes with
+ * a max offset of 4095.
+ *
+ * Entry routines:
+ *
+ * bool
+ * pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
+ * const PGLZ_Strategy *strategy);
+ *
+ * source is the input data to be compressed.
+ *
+ * slen is the length of the input data.
+ *
+ * dest is the output area for the compressed result.
+ * It must be at least as big as PGLZ_MAX_OUTPUT(slen).
+ *
+ * strategy is a pointer to some information controlling
+ * the compression algorithm. If NULL, the compiled
+ * in default strategy is used.
+ *
+ * The return value is TRUE if compression succeeded,
+ * FALSE if not; in the latter case the contents of dest
+ * are undefined.
+ *
+ * void
+ * pglz_decompress(const PGLZ_Header *source, char *dest)
+ *
+ * source is the compressed input.
+ *
+ * dest is the area where the uncompressed data will be
+ * written to. It is the callers responsibility to
+ * provide enough space. The required amount can be
+ * obtained with the macro PGLZ_RAW_SIZE(source).
+ *
+ * The data is written to buff exactly as it was handed
+ * to pglz_compress(). No terminating zero byte is added.
+ *
+ * The decompression algorithm and internal data format:
+ *
+ * PGLZ_Header is defined as
+ *
+ * typedef struct PGLZ_Header {
+ * int32 vl_len_;
+ * int32 rawsize;
+ * }
+ *
+ * The header is followed by the compressed data itself.
+ *
+ * The data representation is easiest explained by describing
+ * the process of decompression.
+ *
+ * If VARSIZE(x) == rawsize + sizeof(PGLZ_Header), then the data
+ * is stored uncompressed as plain bytes. Thus, the decompressor
+ * simply copies rawsize bytes from the location after the
+ * header to the destination.
+ *
+ * Otherwise the first byte after the header tells what to do
+ * the next 8 times. We call this the control byte.
+ *
+ * An unset bit in the control byte means, that one uncompressed
+ * byte follows, which is copied from input to output.
+ *
+ * A set bit in the control byte means, that a tag of 2-3 bytes
+ * follows. A tag contains information to copy some bytes, that
+ * are already in the output buffer, to the current location in
+ * the output. Let's call the three tag bytes T1, T2 and T3. The
+ * position of the data to copy is coded as an offset from the
+ * actual output position.
+ *
+ * The offset is in the upper nibble of T1 and in T2.
+ * The length is in the lower nibble of T1.
+ *
+ * So the 16 bits of a 2 byte tag are coded as
+ *
+ * 7---T1--0 7---T2--0
+ * OOOO LLLL OOOO OOOO
+ *
+ * This limits the offset to 1-4095 (12 bits) and the length
+ * to 3-18 (4 bits) because 3 is always added to it. To emit
+ * a tag of 2 bytes with a length of 2 only saves one control
+ * bit. But we lose one byte in the possible length of a tag.
+ *
+ * In the actual implementation, the 2 byte tag's length is
+ * limited to 3-17, because the value 0xF in the length nibble
+ * has special meaning. It means, that the next following
+ * byte (T3) has to be added to the length value of 18. That
+ * makes total limits of 1-4095 for offset and 3-273 for length.
+ *
+ * Now that we have successfully decoded a tag. We simply copy
+ * the output that occurred <offset> bytes back to the current
+ * output location in the specified <length>. Thus, a
+ * sequence of 200 spaces (think about bpchar fields) could be
+ * coded in 4 bytes. One literal space and a three byte tag to
+ * copy 199 bytes with a -1 offset. Whow - that's a compression
+ * rate of 98%! Well, the implementation needs to save the
+ * original data size too, so we need another 4 bytes for it
+ * and end up with a total compression rate of 96%, what's still
+ * worth a Whow.
+ *
+ * The compression algorithm
+ *
+ * The following uses numbers used in the default strategy.
+ *
+ * The compressor works best for attributes of a size between
+ * 1K and 1M. For smaller items there's not that much chance of
+ * redundancy in the character sequence (except for large areas
+ * of identical bytes like trailing spaces) and for bigger ones
+ * our 4K maximum look-back distance is too small.
+ *
+ * The compressor creates a table for lists of positions.
+ * For each input position (except the last 3), a hash key is
+ * built from the 4 next input bytes and the position remembered
+ * in the appropriate list. Thus, the table points to linked
+ * lists of likely to be at least in the first 4 characters
+ * matching strings. This is done on the fly while the input
+ * is compressed into the output area. Table entries are only
+ * kept for the last 4096 input positions, since we cannot use
+ * back-pointers larger than that anyway. The size of the hash
+ * table is chosen based on the size of the input - a larger table
+ * has a larger startup cost, as it needs to be initialized to
+ * zero, but reduces the number of hash collisions on long inputs.
+ *
+ * For each byte in the input, its hash key (built from this
+ * byte and the next 3) is used to find the appropriate list
+ * in the table. The lists remember the positions of all bytes
+ * that had the same hash key in the past in increasing backward
+ * offset order. Now for all entries in the used lists, the
+ * match length is computed by comparing the characters from the
+ * entries position with the characters from the actual input
+ * position.
+ *
+ * The compressor starts with a so called "good_match" of 128.
+ * It is a "prefer speed against compression ratio" optimizer.
+ * So if the first entry looked at already has 128 or more
+ * matching characters, the lookup stops and that position is
+ * used for the next tag in the output.
+ *
+ * For each subsequent entry in the history list, the "good_match"
+ * is lowered by 10%. So the compressor will be more happy with
+ * short matches the farer it has to go back in the history.
+ * Another "speed against ratio" preference characteristic of
+ * the algorithm.
+ *
+ * Thus there are 3 stop conditions for the lookup of matches:
+ *
+ * - a match >= good_match is found
+ * - there are no more history entries to look at
+ * - the next history entry is already too far back
+ * to be coded into a tag.
+ *
+ * Finally the match algorithm checks that at least a match
+ * of 3 or more bytes has been found, because thats the smallest
+ * amount of copy information to code into a tag. If so, a tag
+ * is omitted and all the input bytes covered by that are just
+ * scanned for the history add's, otherwise a literal character
+ * is omitted and only his history entry added.
+ *
+ * Acknowledgements:
+ *
+ * Many thanks to Adisak Pochanayon, who's article about SLZ
+ * inspired me to write the PostgreSQL compression this way.
+ *
+ * Jan Wieck
+ *
+ * Copyright (c) 1999-2014, PostgreSQL Global Development Group
+ *
+ * src/backend/utils/adt/pg_lzcompress.c
+ * ----------
+ */
+#include "postgres.h"
+
+#include <limits.h>
+
+#include "utils/pg_lzcompress.h"
+
+
+/* ----------
+ * Local definitions
+ * ----------
+ */
+#define PGLZ_MAX_HISTORY_LISTS 8192 /* must be power of 2 */
+#define PGLZ_HISTORY_SIZE 4096
+#define PGLZ_MAX_MATCH 273
+
+
+/* ----------
+ * PGLZ_HistEntry -
+ *
+ * Linked list for the backward history lookup
+ *
+ * All the entries sharing a hash key are linked in a doubly linked list.
+ * This makes it easy to remove an entry when it's time to recycle it
+ * (because it's more than 4K positions old).
+ * ----------
+ */
+typedef struct PGLZ_HistEntry
+{
+ struct PGLZ_HistEntry *next; /* links for my hash key's list */
+ struct PGLZ_HistEntry *prev;
+ int hindex; /* my current hash key */
+ const char *pos; /* my input position */
+} PGLZ_HistEntry;
+
+
+/* ----------
+ * The provided standard strategies
+ * ----------
+ */
+static const PGLZ_Strategy strategy_default_data = {
+ 32, /* Data chunks less than 32 bytes are not
+ * compressed */
+ INT_MAX, /* No upper limit on what we'll try to
+ * compress */
+ 25, /* Require 25% compression rate, or not worth
+ * it */
+ 1024, /* Give up if no compression in the first 1KB */
+ 128, /* Stop history lookup if a match of 128 bytes
+ * is found */
+ 10 /* Lower good match size by 10% at every loop
+ * iteration */
+};
+const PGLZ_Strategy *const PGLZ_strategy_default = &strategy_default_data;
+
+
+static const PGLZ_Strategy strategy_always_data = {
+ 0, /* Chunks of any size are compressed */
+ INT_MAX,
+ 0, /* It's enough to save one single byte */
+ INT_MAX, /* Never give up early */
+ 128, /* Stop history lookup if a match of 128 bytes
+ * is found */
+ 6 /* Look harder for a good match */
+};
+const PGLZ_Strategy *const PGLZ_strategy_always = &strategy_always_data;
+
+
+/* ----------
+ * Statically allocated work arrays for history
+ * ----------
+ */
+static int16 hist_start[PGLZ_MAX_HISTORY_LISTS];
+static PGLZ_HistEntry hist_entries[PGLZ_HISTORY_SIZE + 1];
+
+/*
+ * Element 0 in hist_entries is unused, and means 'invalid'. Likewise,
+ * INVALID_ENTRY_PTR in next/prev pointers mean 'invalid'.
+ */
+#define INVALID_ENTRY 0
+#define INVALID_ENTRY_PTR (&hist_entries[INVALID_ENTRY])
+
+/* ----------
+ * pglz_hist_idx -
+ *
+ * Computes the history table slot for the lookup by the next 4
+ * characters in the input.
+ *
+ * NB: because we use the next 4 characters, we are not guaranteed to
+ * find 3-character matches; they very possibly will be in the wrong
+ * hash list. This seems an acceptable tradeoff for spreading out the
+ * hash keys more.
+ * ----------
+ */
+#define pglz_hist_idx(_s,_e, _mask) ( \
+ ((((_e) - (_s)) < 4) ? (int) (_s)[0] : \
+ (((_s)[0] << 6) ^ ((_s)[1] << 4) ^ \
+ ((_s)[2] << 2) ^ (_s)[3])) & (_mask) \
+ )
+
+
+/* ----------
+ * pglz_hist_add -
+ *
+ * Adds a new entry to the history table.
+ *
+ * If _recycle is true, then we are recycling a previously used entry,
+ * and must first delink it from its old hashcode's linked list.
+ *
+ * NOTE: beware of multiple evaluations of macro's arguments, and note that
+ * _hn and _recycle are modified in the macro.
+ * ----------
+ */
+#define pglz_hist_add(_hs,_he,_hn,_recycle,_s,_e, _mask) \
+do { \
+ int __hindex = pglz_hist_idx((_s),(_e), (_mask)); \
+ int16 *__myhsp = &(_hs)[__hindex]; \
+ PGLZ_HistEntry *__myhe = &(_he)[_hn]; \
+ if (_recycle) { \
+ if (__myhe->prev == NULL) \
+ (_hs)[__myhe->hindex] = __myhe->next - (_he); \
+ else \
+ __myhe->prev->next = __myhe->next; \
+ if (__myhe->next != NULL) \
+ __myhe->next->prev = __myhe->prev; \
+ } \
+ __myhe->next = &(_he)[*__myhsp]; \
+ __myhe->prev = NULL; \
+ __myhe->hindex = __hindex; \
+ __myhe->pos = (_s); \
+ /* If there was an existing entry in this hash slot, link */ \
+ /* this new entry to it. However, the 0th entry in the */ \
+ /* entries table is unused, so we can freely scribble on it. */ \
+ /* So don't bother checking if the slot was used - we'll */ \
+ /* scribble on the unused entry if it was not, but that's */ \
+ /* harmless. Avoiding the branch in this critical path */ \
+ /* speeds this up a little bit. */ \
+ /* if (*__myhsp != INVALID_ENTRY) */ \
+ (_he)[(*__myhsp)].prev = __myhe; \
+ *__myhsp = _hn; \
+ if (++(_hn) >= PGLZ_HISTORY_SIZE + 1) { \
+ (_hn) = 1; \
+ (_recycle) = true; \
+ } \
+} while (0)
+
+
+/* ----------
+ * pglz_out_ctrl -
+ *
+ * Outputs the last and allocates a new control byte if needed.
+ * ----------
+ */
+#define pglz_out_ctrl(__ctrlp,__ctrlb,__ctrl,__buf) \
+do { \
+ if ((__ctrl & 0xff) == 0) \
+ { \
+ *(__ctrlp) = __ctrlb; \
+ __ctrlp = (__buf)++; \
+ __ctrlb = 0; \
+ __ctrl = 1; \
+ } \
+} while (0)
+
+
+/* ----------
+ * pglz_out_literal -
+ *
+ * Outputs a literal byte to the destination buffer including the
+ * appropriate control bit.
+ * ----------
+ */
+#define pglz_out_literal(_ctrlp,_ctrlb,_ctrl,_buf,_byte) \
+do { \
+ pglz_out_ctrl(_ctrlp,_ctrlb,_ctrl,_buf); \
+ *(_buf)++ = (unsigned char)(_byte); \
+ _ctrl <<= 1; \
+} while (0)
+
+
+/* ----------
+ * pglz_out_tag -
+ *
+ * Outputs a backward reference tag of 2-4 bytes (depending on
+ * offset and length) to the destination buffer including the
+ * appropriate control bit.
+ * ----------
+ */
+#define pglz_out_tag(_ctrlp,_ctrlb,_ctrl,_buf,_len,_off) \
+do { \
+ pglz_out_ctrl(_ctrlp,_ctrlb,_ctrl,_buf); \
+ _ctrlb |= _ctrl; \
+ _ctrl <<= 1; \
+ if (_len > 17) \
+ { \
+ (_buf)[0] = (unsigned char)((((_off) & 0xf00) >> 4) | 0x0f); \
+ (_buf)[1] = (unsigned char)(((_off) & 0xff)); \
+ (_buf)[2] = (unsigned char)((_len) - 18); \
+ (_buf) += 3; \
+ } else { \
+ (_buf)[0] = (unsigned char)((((_off) & 0xf00) >> 4) | ((_len) - 3)); \
+ (_buf)[1] = (unsigned char)((_off) & 0xff); \
+ (_buf) += 2; \
+ } \
+} while (0)
+
+
+/* ----------
+ * pglz_find_match -
+ *
+ * Lookup the history table if the actual input stream matches
+ * another sequence of characters, starting somewhere earlier
+ * in the input buffer.
+ * ----------
+ */
+static inline int
+pglz_find_match(int16 *hstart, const char *input, const char *end,
+ int *lenp, int *offp, int good_match, int good_drop, int mask)
+{
+ PGLZ_HistEntry *hent;
+ int16 hentno;
+ int32 len = 0;
+ int32 off = 0;
+
+ /*
+ * Traverse the linked history list until a good enough match is found.
+ */
+ hentno = hstart[pglz_hist_idx(input, end, mask)];
+ hent = &hist_entries[hentno];
+ while (hent != INVALID_ENTRY_PTR)
+ {
+ const char *ip = input;
+ const char *hp = hent->pos;
+ int32 thisoff;
+ int32 thislen;
+
+ /*
+ * Stop if the offset does not fit into our tag anymore.
+ */
+ thisoff = ip - hp;
+ if (thisoff >= 0x0fff)
+ break;
+
+ /*
+ * Determine length of match. A better match must be larger than the
+ * best so far. And if we already have a match of 16 or more bytes,
+ * it's worth the call overhead to use memcmp() to check if this match
+ * is equal for the same size. After that we must fallback to
+ * character by character comparison to know the exact position where
+ * the diff occurred.
+ */
+ thislen = 0;
+ if (len >= 16)
+ {
+ if (memcmp(ip, hp, len) == 0)
+ {
+ thislen = len;
+ ip += len;
+ hp += len;
+ while (ip < end && *ip == *hp && thislen < PGLZ_MAX_MATCH)
+ {
+ thislen++;
+ ip++;
+ hp++;
+ }
+ }
+ }
+ else
+ {
+ while (ip < end && *ip == *hp && thislen < PGLZ_MAX_MATCH)
+ {
+ thislen++;
+ ip++;
+ hp++;
+ }
+ }
+
+ /*
+ * Remember this match as the best (if it is)
+ */
+ if (thislen > len)
+ {
+ len = thislen;
+ off = thisoff;
+ }
+
+ /*
+ * Advance to the next history entry
+ */
+ hent = hent->next;
+
+ /*
+ * Be happy with lesser good matches the more entries we visited. But
+ * no point in doing calculation if we're at end of list.
+ */
+ if (hent != INVALID_ENTRY_PTR)
+ {
+ if (len >= good_match)
+ break;
+ good_match -= (good_match * good_drop) / 100;
+ }
+ }
+
+ /*
+ * Return match information only if it results at least in one byte
+ * reduction.
+ */
+ if (len > 2)
+ {
+ *lenp = len;
+ *offp = off;
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/* ----------
+ * pglz_compress -
+ *
+ * Compresses source into dest using strategy.
+ * ----------
+ */
+bool
+pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
+ const PGLZ_Strategy *strategy)
+{
+ unsigned char *bp = ((unsigned char *) dest) + sizeof(PGLZ_Header);
+ unsigned char *bstart = bp;
+ int hist_next = 1;
+ bool hist_recycle = false;
+ const char *dp = source;
+ const char *dend = source + slen;
+ unsigned char ctrl_dummy = 0;
+ unsigned char *ctrlp = &ctrl_dummy;
+ unsigned char ctrlb = 0;
+ unsigned char ctrl = 0;
+ bool found_match = false;
+ int32 match_len;
+ int32 match_off;
+ int32 good_match;
+ int32 good_drop;
+ int32 result_size;
+ int32 result_max;
+ int32 need_rate;
+ int hashsz;
+ int mask;
+
+ /*
+ * Our fallback strategy is the default.
+ */
+ if (strategy == NULL)
+ strategy = PGLZ_strategy_default;
+
+ /*
+ * If the strategy forbids compression (at all or if source chunk size out
+ * of range), fail.
+ */
+ if (strategy->match_size_good <= 0 ||
+ slen < strategy->min_input_size ||
+ slen > strategy->max_input_size)
+ return false;
+
+ /*
+ * Save the original source size in the header.
+ */
+ dest->rawsize = slen;
+
+ /*
+ * Limit the match parameters to the supported range.
+ */
+ good_match = strategy->match_size_good;
+ if (good_match > PGLZ_MAX_MATCH)
+ good_match = PGLZ_MAX_MATCH;
+ else if (good_match < 17)
+ good_match = 17;
+
+ good_drop = strategy->match_size_drop;
+ if (good_drop < 0)
+ good_drop = 0;
+ else if (good_drop > 100)
+ good_drop = 100;
+
+ need_rate = strategy->min_comp_rate;
+ if (need_rate < 0)
+ need_rate = 0;
+ else if (need_rate > 99)
+ need_rate = 99;
+
+ /*
+ * Compute the maximum result size allowed by the strategy, namely the
+ * input size minus the minimum wanted compression rate. This had better
+ * be <= slen, else we might overrun the provided output buffer.
+ */
+ if (slen > (INT_MAX / 100))
+ {
+ /* Approximate to avoid overflow */
+ result_max = (slen / 100) * (100 - need_rate);
+ }
+ else
+ result_max = (slen * (100 - need_rate)) / 100;
+
+ /*
+ * Experiments suggest that these hash sizes work pretty well. A large
+ * hash table minimizes collision, but has a higher startup cost. For a
+ * small input, the startup cost dominates. The table size must be a power
+ * of two.
+ */
+ if (slen < 128)
+ hashsz = 512;
+ else if (slen < 256)
+ hashsz = 1024;
+ else if (slen < 512)
+ hashsz = 2048;
+ else if (slen < 1024)
+ hashsz = 4096;
+ else
+ hashsz = 8192;
+ mask = hashsz - 1;
+
+ /*
+ * Initialize the history lists to empty. We do not need to zero the
+ * hist_entries[] array; its entries are initialized as they are used.
+ */
+ memset(hist_start, 0, hashsz * sizeof(int16));
+
+ /*
+ * Compress the source directly into the output buffer.
+ */
+ while (dp < dend)
+ {
+ /*
+ * If we already exceeded the maximum result size, fail.
+ *
+ * We check once per loop; since the loop body could emit as many as 4
+ * bytes (a control byte and 3-byte tag), PGLZ_MAX_OUTPUT() had better
+ * allow 4 slop bytes.
+ */
+ if (bp - bstart >= result_max)
+ return false;
+
+ /*
+ * If we've emitted more than first_success_by bytes without finding
+ * anything compressible at all, fail. This lets us fall out
+ * reasonably quickly when looking at incompressible input (such as
+ * pre-compressed data).
+ */
+ if (!found_match && bp - bstart >= strategy->first_success_by)
+ return false;
+
+ /*
+ * Try to find a match in the history
+ */
+ if (pglz_find_match(hist_start, dp, dend, &match_len,
+ &match_off, good_match, good_drop, mask))
+ {
+ /*
+ * Create the tag and add history entries for all matched
+ * characters.
+ */
+ pglz_out_tag(ctrlp, ctrlb, ctrl, bp, match_len, match_off);
+ while (match_len--)
+ {
+ pglz_hist_add(hist_start, hist_entries,
+ hist_next, hist_recycle,
+ dp, dend, mask);
+ dp++; /* Do not do this ++ in the line above! */
+ /* The macro would do it four times - Jan. */
+ }
+ found_match = true;
+ }
+ else
+ {
+ /*
+ * No match found. Copy one literal byte.
+ */
+ pglz_out_literal(ctrlp, ctrlb, ctrl, bp, *dp);
+ pglz_hist_add(hist_start, hist_entries,
+ hist_next, hist_recycle,
+ dp, dend, mask);
+ dp++; /* Do not do this ++ in the line above! */
+ /* The macro would do it four times - Jan. */
+ }
+ }
+
+ /*
+ * Write out the last control byte and check that we haven't overrun the
+ * output size allowed by the strategy.
+ */
+ *ctrlp = ctrlb;
+ result_size = bp - bstart;
+ if (result_size >= result_max)
+ return false;
+
+ /*
+ * Success - need only fill in the actual length of the compressed datum.
+ */
+ SET_VARSIZE_COMPRESSED(dest, result_size + sizeof(PGLZ_Header));
+
+ return true;
+}
+
+
+/* ----------
+ * pglz_decompress -
+ *
+ * Decompresses source into dest. Returns false if a failure
+ * occurred, true in case of success.
+ * ----------
+ */
+bool
+pglz_decompress(const PGLZ_Header *source, char *dest)
+{
+ const unsigned char *sp;
+ const unsigned char *srcend;
+ unsigned char *dp;
+ unsigned char *destend;
+
+ sp = ((const unsigned char *) source) + sizeof(PGLZ_Header);
+ srcend = ((const unsigned char *) source) + VARSIZE(source);
+ dp = (unsigned char *) dest;
+ destend = dp + source->rawsize;
+
+ while (sp < srcend && dp < destend)
+ {
+ /*
+ * Read one control byte and process the next 8 items (or as many as
+ * remain in the compressed input).
+ */
+ unsigned char ctrl = *sp++;
+ int ctrlc;
+
+ for (ctrlc = 0; ctrlc < 8 && sp < srcend; ctrlc++)
+ {
+ if (ctrl & 1)
+ {
+ /*
+ * Otherwise it contains the match length minus 3 and the
+ * upper 4 bits of the offset. The next following byte
+ * contains the lower 8 bits of the offset. If the length is
+ * coded as 18, another extension tag byte tells how much
+ * longer the match really was (0-255).
+ */
+ int32 len;
+ int32 off;
+
+ len = (sp[0] & 0x0f) + 3;
+ off = ((sp[0] & 0xf0) << 4) | sp[1];
+ sp += 2;
+ if (len == 18)
+ len += *sp++;
+
+ /*
+ * Check for output buffer overrun, to ensure we don't clobber
+ * memory in case of corrupt input. Note: we must advance dp
+ * here to ensure the error is detected below the loop. We
+ * don't simply put the elog inside the loop since that will
+ * probably interfere with optimization.
+ */
+ if (dp + len > destend)
+ {
+ dp += len;
+ break;
+ }
+
+ /*
+ * Now we copy the bytes specified by the tag from OUTPUT to
+ * OUTPUT. It is dangerous and platform dependent to use
+ * memcpy() here, because the copied areas could overlap
+ * extremely!
+ */
+ while (len--)
+ {
+ *dp = dp[-off];
+ dp++;
+ }
+ }
+ else
+ {
+ /*
+ * An unset control bit means LITERAL BYTE. So we just copy
+ * one from INPUT to OUTPUT.
+ */
+ if (dp >= destend) /* check for buffer overrun */
+ break; /* do not clobber memory */
+
+ *dp++ = *sp++;
+ }
+
+ /*
+ * Advance the control bit
+ */
+ ctrl >>= 1;
+ }
+ }
+
+ /*
+ * Check we decompressed the right amount.
+ */
+ if (dp != destend || sp != srcend)
+ return false;
+
+ /*
+ * That's it.
+ */
+ return true;
+}
diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm
index 004942c..387169d 100644
--- a/src/tools/msvc/Mkvcbuild.pm
+++ b/src/tools/msvc/Mkvcbuild.pm
@@ -68,8 +68,8 @@ sub mkvcbuild
chklocale.c crypt.c fls.c fseeko.c getrusage.c inet_aton.c random.c
srandom.c getaddrinfo.c gettimeofday.c inet_net_ntop.c kill.c open.c
erand48.c snprintf.c strlcat.c strlcpy.c dirmod.c noblock.c path.c
- pgcheckdir.c pg_crc.c pgmkdirp.c pgsleep.c pgstrcasecmp.c pqsignal.c
- mkdtemp.c qsort.c qsort_arg.c quotes.c system.c
+ pgcheckdir.c pg_crc.c pg_lzcompress.c pgmkdirp.c pgsleep.c pgstrcasecmp.c
+ pqsignal.c mkdtemp.c qsort.c qsort_arg.c quotes.c system.c
sprompt.c tar.c thread.c getopt.c getopt_long.c dirent.c
win32env.c win32error.c win32setlocale.c);
--
2.1.3
From c04444048868ab520a86f7e7576c15509c5d5741 Mon Sep 17 00:00:00 2001
From: Michael Paquier <[email protected]>
Date: Tue, 25 Nov 2014 14:24:26 +0900
Subject: [PATCH 2/2] Support compression for full-page writes in WAL
full_page_writes has a new setting value called 'compress' allowing
a user to reduce the amount of data written to WAL for full-page
writes.
---
doc/src/sgml/config.sgml | 41 ++++---
src/backend/access/rmgrdesc/xlogdesc.c | 8 +-
src/backend/access/transam/xlog.c | 56 ++++++----
src/backend/access/transam/xloginsert.c | 150 +++++++++++++++++++++-----
src/backend/access/transam/xlogreader.c | 27 ++++-
src/backend/utils/misc/guc.c | 44 +++++---
src/backend/utils/misc/postgresql.conf.sample | 3 +-
src/bin/pg_controldata/pg_controldata.c | 2 +-
src/bin/pg_resetxlog/pg_resetxlog.c | 4 +-
src/include/access/xlog.h | 22 +++-
src/include/access/xlogreader.h | 1 +
src/include/access/xlogrecord.h | 10 +-
src/include/catalog/pg_control.h | 2 +-
13 files changed, 281 insertions(+), 89 deletions(-)
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index ab8c263..ee490bf 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -2181,28 +2181,32 @@ include_dir 'conf.d'
</varlistentry>
<varlistentry id="guc-full-page-writes" xreflabel="full_page_writes">
- <term><varname>full_page_writes</varname> (<type>boolean</type>)
+ <term><varname>full_page_writes</varname> (<type>enum</type>)
<indexterm>
<primary><varname>full_page_writes</> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When this parameter is on, the <productname>PostgreSQL</> server
- writes the entire content of each disk page to WAL during the
- first modification of that page after a checkpoint.
- This is needed because
- a page write that is in process during an operating system crash might
- be only partially completed, leading to an on-disk page
- that contains a mix of old and new data. The row-level change data
- normally stored in WAL will not be enough to completely restore
- such a page during post-crash recovery. Storing the full page image
- guarantees that the page can be correctly restored, but at the price
- of increasing the amount of data that must be written to WAL.
- (Because WAL replay always starts from a checkpoint, it is sufficient
- to do this during the first change of each page after a checkpoint.
- Therefore, one way to reduce the cost of full-page writes is to
- increase the checkpoint interval parameters.)
+ When this parameter is <literal>on</> or <literal>compress</>,
+ the <productname>PostgreSQL</> server writes the entire content
+ of each disk page to WAL during the first modification of that
+ page after a checkpoint. This is needed because a page write that
+ is in process during an operating system crash might be only partially
+ completed, leading to an on-disk page that contains a mix of old and
+ new data. The row-level change data normally stored in WAL will not
+ be enough to completely restore such a page during post-crash
+ recovery. Storing the full page image guarantees that the page can
+ be correctly restored, but at the price of increasing the amount of
+ data that must be written to WAL. (Because WAL replay always starts
+ from a checkpoint, it is sufficient to do this during the first change
+ of each page after a checkpoint. Therefore, one way to reduce the cost
+ of full-page writes is to increase the checkpoint interval parameters.)
+ </para>
+
+ <para>
+ Valid values are <literal>on</>, <literal>compress</>, and
+ <literal>off</>. The default is <literal>on</>.
</para>
<para>
@@ -2220,6 +2224,11 @@ include_dir 'conf.d'
</para>
<para>
+ Setting this parameter to <literal>compress</> compresses
+ the full page image to reduce the amount of WAL data.
+ </para>
+
+ <para>
This parameter can only be set in the <filename>postgresql.conf</>
file or on the server command line.
The default is <literal>on</>.
diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c
index eba046d..918cc5a 100644
--- a/src/backend/access/rmgrdesc/xlogdesc.c
+++ b/src/backend/access/rmgrdesc/xlogdesc.c
@@ -49,7 +49,7 @@ xlog_desc(StringInfo buf, XLogReaderState *record)
(uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
checkpoint->ThisTimeLineID,
checkpoint->PrevTimeLineID,
- checkpoint->fullPageWrites ? "true" : "false",
+ FullPageWritesStr(checkpoint->fullPageWrites),
checkpoint->nextXidEpoch, checkpoint->nextXid,
checkpoint->nextOid,
checkpoint->nextMulti,
@@ -114,10 +114,10 @@ xlog_desc(StringInfo buf, XLogReaderState *record)
}
else if (info == XLOG_FPW_CHANGE)
{
- bool fpw;
+ int fpw;
- memcpy(&fpw, rec, sizeof(bool));
- appendStringInfo(buf, "%s", fpw ? "true" : "false");
+ memcpy(&fpw, rec, sizeof(int));
+ appendStringInfo(buf, "fpw: %s", FullPageWritesStr(fpw));
}
else if (info == XLOG_END_OF_RECOVERY)
{
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 8e712b7..20c3ed9 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -85,7 +85,7 @@ int XLogArchiveTimeout = 0;
bool XLogArchiveMode = false;
char *XLogArchiveCommand = NULL;
bool EnableHotStandby = false;
-bool fullPageWrites = true;
+int fullPageWrites = FULL_PAGE_WRITES_ON;
bool wal_log_hints = false;
bool log_checkpoints = false;
int sync_method = DEFAULT_SYNC_METHOD;
@@ -179,7 +179,7 @@ static TimeLineID receiveTLI = 0;
* that the recovery starting checkpoint record indicates, and then updated
* each time XLOG_FPW_CHANGE record is replayed.
*/
-static bool lastFullPageWrites;
+static int lastFullPageWrites;
/*
* Local copy of SharedRecoveryInProgress variable. True actually means "not
@@ -316,6 +316,13 @@ static XLogRecPtr RedoRecPtr;
static bool doPageWrites;
/*
+ * doPageCompression is this backend'd local copy of
+ * (fullPageWrites == FULL_PAGE_WRITES_COMPRESS). It is used to check if
+ * a full page write can be compressed.
+ */
+static int doPageCompression;
+
+/*
* RedoStartLSN points to the checkpoint's REDO location which is specified
* in a backup label file, backup history file or control file. In standby
* mode, XLOG streaming usually starts from the position where an invalid
@@ -464,7 +471,7 @@ typedef struct XLogCtlInsert
*/
XLogRecPtr RedoRecPtr; /* current redo point for insertions */
bool forcePageWrites; /* forcing full-page writes for PITR? */
- bool fullPageWrites;
+ int fullPageWrites;
/*
* exclusiveBackup is true if a backup started with pg_start_backup() is
@@ -915,10 +922,11 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
WALInsertLockAcquire();
/*
- * Check to see if my copy of RedoRecPtr or doPageWrites is out of date.
- * If so, may have to go back and have the caller recompute everything.
- * This can only happen just after a checkpoint, so it's better to be
- * slow in this case and fast otherwise.
+ * Check to see if my copy of RedoRecPtr, doPageWrites or
+ * doPageCompression is out of date. If so, may have to go back and
+ * have the caller recompute everything. This can only happen just
+ * after a checkpoint, so it's better to be slow in this case and
+ * fast otherwise.
*
* If we aren't doing full-page writes then RedoRecPtr doesn't actually
* affect the contents of the XLOG record, so we'll update our local copy
@@ -932,6 +940,7 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
RedoRecPtr = Insert->RedoRecPtr;
}
doPageWrites = (Insert->fullPageWrites || Insert->forcePageWrites);
+ doPageCompression = (Insert->fullPageWrites == FULL_PAGE_WRITES_COMPRESS);
if (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr && doPageWrites)
{
@@ -5935,6 +5944,7 @@ StartupXLOG(void)
RedoRecPtr = XLogCtl->RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
doPageWrites = lastFullPageWrites;
+ doPageCompression = (lastFullPageWrites == FULL_PAGE_WRITES_COMPRESS);
if (RecPtr < checkPoint.redo)
ereport(PANIC,
@@ -7232,17 +7242,23 @@ GetRedoRecPtr(void)
/*
* Return information needed to decide whether a modified block needs a
- * full-page image to be included in the WAL record.
+ * full-page image to be included in the WAL record, compressed or not.
*
* The returned values are cached copies from backend-private memory, and
* possibly out-of-date. XLogInsertRecord will re-check them against
* up-to-date values, while holding the WAL insert lock.
*/
void
-GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
-{
- *RedoRecPtr_p = RedoRecPtr;
- *doPageWrites_p = doPageWrites;
+GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p,
+ bool *doPageWrites_p,
+ bool *doPageCompression_p)
+{
+ if (RedoRecPtr_p)
+ *RedoRecPtr_p = RedoRecPtr;
+ if (doPageWrites_p)
+ *doPageWrites_p = doPageWrites;
+ if (doPageCompression_p)
+ *doPageCompression_p = doPageCompression;
}
/*
@@ -8458,10 +8474,10 @@ UpdateFullPageWrites(void)
* setting it to false, first write the WAL record and then set the global
* flag.
*/
- if (fullPageWrites)
+ if (fullPageWrites != FULL_PAGE_WRITES_OFF)
{
WALInsertLockAcquireExclusive();
- Insert->fullPageWrites = true;
+ Insert->fullPageWrites = fullPageWrites;
WALInsertLockRelease();
}
@@ -8472,7 +8488,7 @@ UpdateFullPageWrites(void)
if (XLogStandbyInfoActive() && !RecoveryInProgress())
{
XLogBeginInsert();
- XLogRegisterData((char *) (&fullPageWrites), sizeof(bool));
+ XLogRegisterData((char *) (&fullPageWrites), sizeof(int));
XLogInsert(RM_XLOG_ID, XLOG_FPW_CHANGE);
}
@@ -8480,7 +8496,7 @@ UpdateFullPageWrites(void)
if (!fullPageWrites)
{
WALInsertLockAcquireExclusive();
- Insert->fullPageWrites = false;
+ Insert->fullPageWrites = fullPageWrites;
WALInsertLockRelease();
}
END_CRIT_SECTION();
@@ -8824,16 +8840,16 @@ xlog_redo(XLogReaderState *record)
}
else if (info == XLOG_FPW_CHANGE)
{
- bool fpw;
+ int fpw;
- memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
+ memcpy(&fpw, XLogRecGetData(record), sizeof(int));
/*
* Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
* do_pg_start_backup() and do_pg_stop_backup() can check whether
* full_page_writes has been disabled during online backup.
*/
- if (!fpw)
+ if (fpw == FULL_PAGE_WRITES_OFF)
{
SpinLockAcquire(&XLogCtl->info_lck);
if (XLogCtl->lastFpwDisableRecPtr < ReadRecPtr)
@@ -9246,7 +9262,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
recptr = XLogCtl->lastFpwDisableRecPtr;
SpinLockRelease(&XLogCtl->info_lck);
- if (!checkpointfpw || startpoint <= recptr)
+ if (checkpointfpw == FULL_PAGE_WRITES_OFF || startpoint <= recptr)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c
index 34e44e4..5591d39 100644
--- a/src/backend/access/transam/xloginsert.c
+++ b/src/backend/access/transam/xloginsert.c
@@ -27,6 +27,7 @@
#include "miscadmin.h"
#include "storage/bufmgr.h"
#include "storage/proc.h"
+#include "utils/pg_lzcompress.h"
#include "utils/memutils.h"
#include "pg_trace.h"
@@ -50,6 +51,8 @@ typedef struct
XLogRecData bkp_rdatas[2]; /* temporary rdatas used to hold references to
* backup block data in XLogRecordAssemble() */
+ char *compressed_page; /* recipient for compressed page, NULL
+ * if compression is not activated */
} registered_buffer;
static registered_buffer *registered_buffers;
@@ -95,7 +98,10 @@ static MemoryContext xloginsert_cxt;
static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
XLogRecPtr RedoRecPtr, bool doPageWrites,
- XLogRecPtr *fpw_lsn);
+ bool doPageCompression, XLogRecPtr *fpw_lsn);
+
+static bool XLogCompressBackupBlock(char *page, uint32 orig_len,
+ char *dest, uint16 *len);
/*
* Begin constructing a WAL record. This must be called before the
@@ -150,6 +156,7 @@ XLogEnsureRecordSpace(int max_block_id, int ndatas)
if (nbuffers > max_registered_buffers)
{
+ int i;
registered_buffers = (registered_buffer *)
repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
@@ -159,6 +166,9 @@ XLogEnsureRecordSpace(int max_block_id, int ndatas)
*/
MemSet(®istered_buffers[max_registered_buffers], 0,
(nbuffers - max_registered_buffers) * sizeof(registered_buffer));
+ for (i = max_registered_buffers; i < nbuffers; i++)
+ registered_buffers[i].compressed_page = (char *)
+ MemoryContextAllocZero(xloginsert_cxt, BLCKSZ);
max_registered_buffers = nbuffers;
}
@@ -409,18 +419,20 @@ XLogInsert(RmgrId rmid, uint8 info)
{
XLogRecPtr RedoRecPtr;
bool doPageWrites;
+ bool doPageCompression;
XLogRecPtr fpw_lsn;
XLogRecData *rdt;
/*
- * Get values needed to decide whether to do full-page writes. Since
- * we don't yet have an insertion lock, these could change under us,
- * but XLogInsertRecData will recheck them once it has a lock.
+ * Get values needed to decide whether to do full-page writes and if yes
+ * under with conditions. Since we don't yet have an insertion lock, these
+ * could change under us, but XLogInsertRecord will recheck them once it
+ * has a lock.
*/
- GetFullPageWriteInfo(&RedoRecPtr, &doPageWrites);
+ GetFullPageWriteInfo(&RedoRecPtr, &doPageWrites, &doPageCompression);
rdt = XLogRecordAssemble(rmid, info, RedoRecPtr, doPageWrites,
- &fpw_lsn);
+ doPageCompression, &fpw_lsn);
EndPos = XLogInsertRecord(rdt, fpw_lsn);
} while (EndPos == InvalidXLogRecPtr);
@@ -445,7 +457,7 @@ XLogInsert(RmgrId rmid, uint8 info)
static XLogRecData *
XLogRecordAssemble(RmgrId rmid, uint8 info,
XLogRecPtr RedoRecPtr, bool doPageWrites,
- XLogRecPtr *fpw_lsn)
+ bool doPageCompression, XLogRecPtr *fpw_lsn)
{
XLogRecData *rdt;
uint32 total_len = 0;
@@ -472,7 +484,11 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
/*
* Make an rdata chain containing all the data portions of all block
* references. This includes the data for full-page images. Also append
- * the headers for the block references in the scratch buffer.
+ * the headers for the block references in the scratch buffer. If
+ * compression of full-page writes is activated, save all the images in
+ * the dedicated buffer and assemble the unique record made of the
+ * compressed data ince all the registered blocks are completely
+ * scanned.
*/
*fpw_lsn = InvalidXLogRecPtr;
for (block_id = 0; block_id < max_registered_block_id; block_id++)
@@ -529,6 +545,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
if (needs_backup)
{
Page page = regbuf->page;
+ int compression_done = false;
/*
* The page needs to be backed up, so set up *bimg
@@ -563,29 +580,76 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
/* Fill in the remaining fields in the XLogRecordBlockData struct */
bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
- total_len += BLCKSZ - bimg.hole_length;
-
/*
- * Construct XLogRecData entries for the page content.
+ * Construct XLogRecData entries for the page content. If page
+ * compression is active instead of creating a new entry store
+ * the data in dedicated buffer to prepare for the compression.
+ * If page has a hole pre-cook for compression the
*/
- rdt_datas_last->next = ®buf->bkp_rdatas[0];
- rdt_datas_last = rdt_datas_last->next;
- if (bimg.hole_length == 0)
+ if (doPageCompression)
{
- rdt_datas_last->data = page;
- rdt_datas_last->len = BLCKSZ;
+ int page_len = BLCKSZ - bimg.hole_length;
+ char uncompressed_page[BLCKSZ];
+ uint16 compression_len;
+
+ /* shape block image for compression and skip hole if any */
+ if (bimg.hole_length == 0)
+ memcpy(uncompressed_page, page, BLCKSZ);
+ else
+ {
+ /* Copy page content without hole */
+ memcpy(uncompressed_page, page, bimg.hole_offset);
+ memcpy(uncompressed_page + bimg.hole_offset,
+ page + bimg.hole_offset + bimg.hole_length,
+ BLCKSZ - (bimg.hole_offset + bimg.hole_length));
+ }
+
+ /* Perform compression of block */
+ if (XLogCompressBackupBlock(uncompressed_page,
+ page_len,
+ regbuf->compressed_page,
+ &compression_len))
+ {
+ /* compression is done, add record */
+ compression_done = true;
+ bimg.compress_len = compression_len;
+
+ rdt_datas_last->next = ®buf->bkp_rdatas[0];
+ rdt_datas_last = rdt_datas_last->next;
+ rdt_datas_last->data = regbuf->compressed_page;
+ rdt_datas_last->len = compression_len;
+ total_len += compression_len;
+ }
}
- else
+
+ /*
+ * If compression has not been done store normally this
+ * block image.
+ */
+ if (!compression_done)
{
- /* must skip the hole */
- rdt_datas_last->data = page;
- rdt_datas_last->len = bimg.hole_offset;
+ total_len += BLCKSZ - bimg.hole_length;
- rdt_datas_last->next = ®buf->bkp_rdatas[1];
+ rdt_datas_last->next = ®buf->bkp_rdatas[0];
rdt_datas_last = rdt_datas_last->next;
+ if (bimg.hole_length == 0)
+ {
+ rdt_datas_last->data = page;
+ rdt_datas_last->len = BLCKSZ;
+ }
+ else
+ {
+ /* must skip the hole */
+ rdt_datas_last->data = page;
+ rdt_datas_last->len = bimg.hole_offset;
- rdt_datas_last->data = page + (bimg.hole_offset + bimg.hole_length);
- rdt_datas_last->len = BLCKSZ - (bimg.hole_offset + bimg.hole_length);
+ rdt_datas_last->next = ®buf->bkp_rdatas[1];
+ rdt_datas_last = rdt_datas_last->next;
+
+ rdt_datas_last->data = page + (bimg.hole_offset + bimg.hole_length);
+ rdt_datas_last->len = BLCKSZ - (bimg.hole_offset + bimg.hole_length);
+ }
+ bimg.compress_len = 0;
}
}
@@ -681,6 +745,40 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
}
/*
+ * Create a compressed version of a backup block. If successful, return
+ * true and set 'len' to its length. If block cannot be compressed or if
+ * compression failed return false.
+ */
+static bool
+XLogCompressBackupBlock(char *page, uint32 orig_len, char *dest, uint16 *len)
+{
+ struct varlena *buf = (struct varlena *) dest;
+ bool ret;
+ ret = pglz_compress(page, orig_len,
+ (PGLZ_Header *) buf, PGLZ_strategy_default);
+
+ /* Incompressible data */
+ if (!ret)
+ return false;
+
+ /*
+ * We recheck the actual size even if pglz_compress() report success,
+ * because it might be satisfied with having saved as little as one byte
+ * in the compressed data --- which could turn into a net loss once you
+ * consider header and alignment padding. Worst case, the compressed
+ * format might require three padding bytes (plus header, which is
+ * included in VARSIZE(buf)), whereas the uncompressed format would take
+ * only one header byte and no padding if the value is short enough. So
+ * we insist on a savings of more than 2 bytes to ensure we have a gain.
+ */
+ if (VARSIZE(buf) >= orig_len - 2)
+ return false;
+
+ *len = (uint16) VARSIZE(buf);
+ return true;
+}
+
+/*
* Determine whether the buffer referenced has to be backed up.
*
* Since we don't yet have the insert lock, fullPageWrites and forcePageWrites
@@ -694,7 +792,7 @@ XLogCheckBufferNeedsBackup(Buffer buffer)
bool doPageWrites;
Page page;
- GetFullPageWriteInfo(&RedoRecPtr, &doPageWrites);
+ GetFullPageWriteInfo(&RedoRecPtr, &doPageWrites, NULL);
page = BufferGetPage(buffer);
@@ -875,9 +973,13 @@ InitXLogInsert(void)
if (registered_buffers == NULL)
{
+ int i;
registered_buffers = (registered_buffer *)
MemoryContextAllocZero(xloginsert_cxt,
sizeof(registered_buffer) * (XLR_NORMAL_MAX_BLOCK_ID + 1));
+ for (i = 0; i < XLR_NORMAL_MAX_BLOCK_ID + 1; i++)
+ registered_buffers[i].compressed_page = (char *)
+ MemoryContextAllocZero(xloginsert_cxt, BLCKSZ);
max_registered_buffers = XLR_NORMAL_MAX_BLOCK_ID + 1;
}
if (rdatas == NULL)
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index 67d6223..8274477 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -20,6 +20,7 @@
#include "access/xlog_internal.h"
#include "access/xlogreader.h"
#include "catalog/pg_control.h"
+#include "utils/pg_lzcompress.h"
static bool allocate_recordbuf(XLogReaderState *state, uint32 reclength);
@@ -1034,7 +1035,11 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg)
{
COPY_HEADER_FIELD(&blk->hole_offset, sizeof(uint16));
COPY_HEADER_FIELD(&blk->hole_length, sizeof(uint16));
- datatotal += BLCKSZ - blk->hole_length;
+ COPY_HEADER_FIELD(&blk->compress_len, sizeof(uint16));
+ if (blk->compress_len != 0)
+ datatotal += blk->compress_len;
+ else
+ datatotal += BLCKSZ - blk->hole_length;
}
if (!(fork_flags & BKPBLOCK_SAME_REL))
{
@@ -1195,6 +1200,8 @@ bool
RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
{
DecodedBkpBlock *bkpb;
+ char *uncompressed_page = NULL;
+ char *block_image;
if (!record->blocks[block_id].in_use)
return false;
@@ -1202,20 +1209,32 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
return false;
bkpb = &record->blocks[block_id];
+ block_image = bkpb->bkp_image;
+ /* decompress block if it is compressed before processing */
+ if (bkpb->compress_len != 0)
+ {
+ uncompressed_page = (char *) palloc(BLCKSZ - bkpb->hole_length);
+ pglz_decompress((PGLZ_Header *) bkpb->bkp_image, uncompressed_page);
+ block_image = uncompressed_page;
+ }
+
+ /* generate page, taking into account hole if necessary */
if (bkpb->hole_length == 0)
{
- memcpy(page, bkpb->bkp_image, BLCKSZ);
+ memcpy(page, block_image, BLCKSZ);
}
else
{
- memcpy(page, bkpb->bkp_image, bkpb->hole_offset);
+ memcpy(page, block_image, bkpb->hole_offset);
/* must zero-fill the hole */
MemSet(page + bkpb->hole_offset, 0, bkpb->hole_length);
memcpy(page + (bkpb->hole_offset + bkpb->hole_length),
- bkpb->bkp_image + bkpb->hole_offset,
+ block_image + bkpb->hole_offset,
BLCKSZ - (bkpb->hole_offset + bkpb->hole_length));
}
+ if (uncompressed_page)
+ pfree(uncompressed_page);
return true;
}
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index f04757c..1abfafa 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -420,6 +420,23 @@ static const struct config_enum_entry row_security_options[] = {
};
/*
+ * Although only "on", "off", and "compress" are documented, we
+ * accept all the likely variants of "on" and "off".
+ */
+static const struct config_enum_entry full_page_writes_options[] = {
+ {"compress", FULL_PAGE_WRITES_COMPRESS, false},
+ {"on", FULL_PAGE_WRITES_ON, false},
+ {"off", FULL_PAGE_WRITES_OFF, false},
+ {"true", FULL_PAGE_WRITES_ON, true},
+ {"false", FULL_PAGE_WRITES_OFF, true},
+ {"yes", FULL_PAGE_WRITES_ON, true},
+ {"no", FULL_PAGE_WRITES_OFF, true},
+ {"1", FULL_PAGE_WRITES_ON, true},
+ {"0", FULL_PAGE_WRITES_OFF, true},
+ {NULL, 0, false}
+};
+
+/*
* Options for enum values stored in other modules
*/
extern const struct config_enum_entry wal_level_options[];
@@ -894,19 +911,6 @@ static struct config_bool ConfigureNamesBool[] =
false,
NULL, NULL, NULL
},
- {
- {"full_page_writes", PGC_SIGHUP, WAL_SETTINGS,
- gettext_noop("Writes full pages to WAL when first modified after a checkpoint."),
- gettext_noop("A page write in process during an operating system crash might be "
- "only partially written to disk. During recovery, the row changes "
- "stored in WAL are not enough to recover. This option writes "
- "pages when first modified after a checkpoint to WAL so full recovery "
- "is possible.")
- },
- &fullPageWrites,
- true,
- NULL, NULL, NULL
- },
{
{"wal_log_hints", PGC_POSTMASTER, WAL_SETTINGS,
@@ -3436,6 +3440,20 @@ static struct config_enum ConfigureNamesEnum[] =
},
{
+ {"full_page_writes", PGC_SIGHUP, WAL_SETTINGS,
+ gettext_noop("Writes full pages to WAL when first modified after a checkpoint."),
+ gettext_noop("A page write in process during an operating system crash might be "
+ "only partially written to disk. During recovery, the row changes "
+ "stored in WAL are not enough to recover. This option writes "
+ "pages when first modified after a checkpoint to WAL so full recovery "
+ "is possible.")
+ },
+ &fullPageWrites,
+ FULL_PAGE_WRITES_ON, full_page_writes_options,
+ NULL, NULL, NULL
+ },
+
+ {
{"trace_recovery_messages", PGC_SIGHUP, DEVELOPER_OPTIONS,
gettext_noop("Enables logging of recovery-related debugging information."),
gettext_noop("Each level includes all the levels that follow it. The later"
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index 4a89cb7..8a1fb9e 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -185,7 +185,8 @@
# fsync
# fsync_writethrough
# open_sync
-#full_page_writes = on # recover from partial page writes
+#full_page_writes = on # recover from partial page writes;
+ # off, compress, or on
#wal_log_hints = off # also do full page writes of non-critical updates
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
diff --git a/src/bin/pg_controldata/pg_controldata.c b/src/bin/pg_controldata/pg_controldata.c
index b2e0793..e250ee0 100644
--- a/src/bin/pg_controldata/pg_controldata.c
+++ b/src/bin/pg_controldata/pg_controldata.c
@@ -250,7 +250,7 @@ main(int argc, char *argv[])
printf(_("Latest checkpoint's PrevTimeLineID: %u\n"),
ControlFile.checkPointCopy.PrevTimeLineID);
printf(_("Latest checkpoint's full_page_writes: %s\n"),
- ControlFile.checkPointCopy.fullPageWrites ? _("on") : _("off"));
+ FullPageWritesStr(ControlFile.checkPointCopy.fullPageWrites));
printf(_("Latest checkpoint's NextXID: %u/%u\n"),
ControlFile.checkPointCopy.nextXidEpoch,
ControlFile.checkPointCopy.nextXid);
diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c
index 666e8db..178c43c 100644
--- a/src/bin/pg_resetxlog/pg_resetxlog.c
+++ b/src/bin/pg_resetxlog/pg_resetxlog.c
@@ -517,7 +517,7 @@ GuessControlValues(void)
ControlFile.checkPointCopy.redo = SizeOfXLogLongPHD;
ControlFile.checkPointCopy.ThisTimeLineID = 1;
ControlFile.checkPointCopy.PrevTimeLineID = 1;
- ControlFile.checkPointCopy.fullPageWrites = false;
+ ControlFile.checkPointCopy.fullPageWrites = FULL_PAGE_WRITES_OFF;
ControlFile.checkPointCopy.nextXidEpoch = 0;
ControlFile.checkPointCopy.nextXid = FirstNormalTransactionId;
ControlFile.checkPointCopy.nextOid = FirstBootstrapObjectId;
@@ -601,7 +601,7 @@ PrintControlValues(bool guessed)
printf(_("Latest checkpoint's TimeLineID: %u\n"),
ControlFile.checkPointCopy.ThisTimeLineID);
printf(_("Latest checkpoint's full_page_writes: %s\n"),
- ControlFile.checkPointCopy.fullPageWrites ? _("on") : _("off"));
+ FullPageWritesStr(ControlFile.checkPointCopy.fullPageWrites));
printf(_("Latest checkpoint's NextXID: %u/%u\n"),
ControlFile.checkPointCopy.nextXidEpoch,
ControlFile.checkPointCopy.nextXid);
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index d06fbc0..c34f49c 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -96,7 +96,6 @@ extern int XLogArchiveTimeout;
extern bool XLogArchiveMode;
extern char *XLogArchiveCommand;
extern bool EnableHotStandby;
-extern bool fullPageWrites;
extern bool wal_log_hints;
extern bool log_checkpoints;
@@ -113,6 +112,23 @@ extern int wal_level;
#define XLogArchivingActive() (XLogArchiveMode && wal_level >= WAL_LEVEL_ARCHIVE)
#define XLogArchiveCommandSet() (XLogArchiveCommand[0] != '\0')
+/* full-page writes */
+typedef enum FullPageWritesLevel
+{
+ FULL_PAGE_WRITES_OFF = 0,
+ FULL_PAGE_WRITES_COMPRESS,
+ FULL_PAGE_WRITES_ON
+} FullPageWritesLevel;
+extern int fullPageWrites;
+
+/*
+ * Convert full-page write parameter into a readable string.
+ */
+#define FullPageWritesStr(fpw) \
+ (fpw == FULL_PAGE_WRITES_ON ? _("on") : \
+ (fpw == FULL_PAGE_WRITES_COMPRESS ? _("compress") : \
+ (fpw == FULL_PAGE_WRITES_OFF ? _("off") : _("unrecognized"))))
+
/*
* Is WAL-logging necessary for archival or log-shipping, or can we skip
* WAL-logging if we fsync() the data before committing instead?
@@ -235,7 +251,9 @@ extern bool CreateRestartPoint(int flags);
extern void XLogPutNextOid(Oid nextOid);
extern XLogRecPtr XLogRestorePoint(const char *rpName);
extern void UpdateFullPageWrites(void);
-extern void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p);
+extern void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p,
+ bool *doPageWrites_p,
+ bool *doPageCompression_p);
extern XLogRecPtr GetRedoRecPtr(void);
extern XLogRecPtr GetInsertRecPtr(void);
extern XLogRecPtr GetFlushRecPtr(void);
diff --git a/src/include/access/xlogreader.h b/src/include/access/xlogreader.h
index eb6cc89..84c107e 100644
--- a/src/include/access/xlogreader.h
+++ b/src/include/access/xlogreader.h
@@ -55,6 +55,7 @@ typedef struct
char *bkp_image;
uint16 hole_offset;
uint16 hole_length;
+ uint16 compress_len;
/* Buffer holding the rmgr-specific data associated with this block */
bool has_data;
diff --git a/src/include/access/xlogrecord.h b/src/include/access/xlogrecord.h
index 11ddfac..0a111c1 100644
--- a/src/include/access/xlogrecord.h
+++ b/src/include/access/xlogrecord.h
@@ -29,6 +29,9 @@
* ...
* main data
*
+ * If compression of full-page write is activated, all the data blocks are
+ * compressed as a single record in the record chain.
+ *
* There can be zero or more XLogRecordBlockHeaders, and 0 or more bytes of
* rmgr-specific data not associated with a block. XLogRecord structs
* always start on MAXALIGN boundaries in the WAL files, but the rest of
@@ -103,11 +106,16 @@ typedef struct XLogRecordBlockHeader
* such a "hole" from the stored data (and it's not counted in the
* XLOG record's CRC, either). Hence, the amount of block data actually
* present is BLCKSZ - hole_length bytes.
+ *
+ * compress_len indicates the length of this block when compressed. A length
+ * of 0 means that this block is not compressed. If the block image has a hole
+ * the block image is compressed without the hole.
*/
typedef struct XLogRecordBlockImageHeader
{
uint16 hole_offset; /* number of bytes before "hole" */
- uint16 hole_length; /* number of bytes in "hole" */
+ uint16 hole_length; /* number of bytes before "hole" */
+ uint16 compress_len; /* size of compressed block */
} XLogRecordBlockImageHeader;
#define SizeOfXLogRecordBlockImageHeader sizeof(XLogRecordBlockImageHeader)
diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h
index 15f81e4..97d4a6d 100644
--- a/src/include/catalog/pg_control.h
+++ b/src/include/catalog/pg_control.h
@@ -35,7 +35,7 @@ typedef struct CheckPoint
TimeLineID ThisTimeLineID; /* current TLI */
TimeLineID PrevTimeLineID; /* previous TLI, if this record begins a new
* timeline (equals ThisTimeLineID otherwise) */
- bool fullPageWrites; /* current full_page_writes */
+ int fullPageWrites; /* current full_page_writes */
uint32 nextXidEpoch; /* higher-order bits of nextXid */
TransactionId nextXid; /* next free XID */
Oid nextOid; /* next free OID */
--
2.1.3
--
Sent via pgsql-hackers mailing list ([email protected])
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers