On Wed, 2020-02-19 at 20:16 +0100, Tomas Vondra wrote:
> 1) explain.c currently does this:
>
> I wonder if we could show something for plain explain (without
> analyze).
> At least the initial estimate of partitions, etc. I know not showing
> those details until after execution is what e.g. sort does, but I
> find
> it a bit annoying.
Looks like you meant to include some example explain output, but I
think I understand what you mean. I'll look into it.
> 2) The ExecBuildAggTrans comment should probably explain "spilled".
Done.
> 3) I wonder if we need to invent new opcodes? Wouldn't it be simpler
> to
> just add a new flag to the agg_* structs instead? I haven't tried
> hacking
> this, so maybe it's a silly idea.
There was a reason I didn't do it this way, but I'm trying to remember
why. I'll look into this, also.
> 4) lookup_hash_entries says
>
> /* check to see if we need to spill the tuple for this grouping
> set */
>
> But that seems bogus, because AFAIK we can't spill tuples for
> grouping
> sets. So maybe this should say just "grouping"?
Yes, we can spill tuples for grouping sets. Unfortunately, I think my
tests (which covered this case previously) don't seem to be exercising
that path well now. I am going to improve my tests, too.
> 5) Assert(nbuckets > 0);
I did not repro this issue, but I did set a floor of 256 buckets.
> which fails with segfault at execution time:
Fixed. I was resetting the hash table context without setting the
pointers to NULL.
Thanks! I attached a new, rebased version. The fixes are quick fixes
for now and I will revisit them after I improve my test cases (which
might find more issues).
Regards,
Jeff Davis
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index c1128f89ec7..85f559387f9 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -1751,6 +1751,23 @@ include_dir 'conf.d'
</listitem>
</varlistentry>
+ <varlistentry id="guc-hashagg-mem-overflow" xreflabel="hashagg_mem_overflow">
+ <term><varname>hashagg_mem_overflow</varname> (<type>boolean</type>)
+ <indexterm>
+ <primary><varname>hashagg_mem_overflow</varname> configuration parameter</primary>
+ </indexterm>
+ </term>
+ <listitem>
+ <para>
+ If hash aggregation exceeds <varname>work_mem</varname> at query
+ execution time, and <varname>hashagg_mem_overflow</varname> is set
+ to <literal>on</literal>, continue consuming more memory rather than
+ performing disk-based hash aggregation. The default
+ is <literal>off</literal>.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry id="guc-max-stack-depth" xreflabel="max_stack_depth">
<term><varname>max_stack_depth</varname> (<type>integer</type>)
<indexterm>
@@ -4476,6 +4493,24 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
</listitem>
</varlistentry>
+ <varlistentry id="guc-enable-hashagg-spill" xreflabel="enable_hashagg_spill">
+ <term><varname>enable_hashagg_spill</varname> (<type>boolean</type>)
+ <indexterm>
+ <primary><varname>enable_hashagg_spill</varname> configuration parameter</primary>
+ </indexterm>
+ </term>
+ <listitem>
+ <para>
+ Enables or disables the query planner's use of hashed aggregation plan
+ types when the memory usage is expected to
+ exceed <varname>work_mem</varname>. This only affects the planner
+ choice; actual behavior at execution time is dictated by
+ <xref linkend="guc-hashagg-mem-overflow"/>. The default
+ is <literal>on</literal>.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry id="guc-enable-hashjoin" xreflabel="enable_hashjoin">
<term><varname>enable_hashjoin</varname> (<type>boolean</type>)
<indexterm>
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index d901dc4a50e..2923f4ba46d 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -104,6 +104,7 @@ static void show_tablesample(TableSampleClause *tsc, PlanState *planstate,
List *ancestors, ExplainState *es);
static void show_sort_info(SortState *sortstate, ExplainState *es);
static void show_hash_info(HashState *hashstate, ExplainState *es);
+static void show_hashagg_info(AggState *hashstate, ExplainState *es);
static void show_tidbitmap_info(BitmapHeapScanState *planstate,
ExplainState *es);
static void show_instrumentation_count(const char *qlabel, int which,
@@ -1882,6 +1883,8 @@ ExplainNode(PlanState *planstate, List *ancestors,
case T_Agg:
show_agg_keys(castNode(AggState, planstate), ancestors, es);
show_upper_qual(plan->qual, "Filter", planstate, ancestors, es);
+ if (es->analyze)
+ show_hashagg_info((AggState *) planstate, es);
if (plan->qual)
show_instrumentation_count("Rows Removed by Filter", 1,
planstate, es);
@@ -2769,6 +2772,55 @@ show_hash_info(HashState *hashstate, ExplainState *es)
}
}
+/*
+ * If EXPLAIN ANALYZE, show information on hash aggregate memory usage and
+ * batches.
+ */
+static void
+show_hashagg_info(AggState *aggstate, ExplainState *es)
+{
+ Agg *agg = (Agg *)aggstate->ss.ps.plan;
+ long memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024;
+
+ Assert(IsA(aggstate, AggState));
+
+ if (agg->aggstrategy != AGG_HASHED &&
+ agg->aggstrategy != AGG_MIXED)
+ return;
+
+ if (es->format == EXPLAIN_FORMAT_TEXT)
+ {
+ appendStringInfoSpaces(es->str, es->indent * 2);
+ appendStringInfo(
+ es->str,
+ "Memory Usage: %ldkB",
+ memPeakKb);
+
+ if (aggstate->hash_batches_used > 0)
+ {
+ appendStringInfo(
+ es->str,
+ " Batches: %d Disk: %ldkB",
+ aggstate->hash_batches_used, aggstate->hash_disk_used);
+ }
+
+ appendStringInfo(
+ es->str,
+ "\n");
+ }
+ else
+ {
+ ExplainPropertyInteger("Peak Memory Usage", "kB", memPeakKb, es);
+ if (aggstate->hash_batches_used > 0)
+ {
+ ExplainPropertyInteger("HashAgg Batches", NULL,
+ aggstate->hash_batches_used, es);
+ ExplainPropertyInteger("Disk Usage", "kB",
+ aggstate->hash_disk_used, es);
+ }
+ }
+}
+
/*
* If it's EXPLAIN ANALYZE, show exact/lossy pages for a BitmapHeapScan node
*/
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c
index 121eff97a0c..5236d6f3935 100644
--- a/src/backend/executor/execExpr.c
+++ b/src/backend/executor/execExpr.c
@@ -79,7 +79,8 @@ static void ExecInitCoerceToDomain(ExprEvalStep *scratch, CoerceToDomain *ctest,
static void ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
ExprEvalStep *scratch,
FunctionCallInfo fcinfo, AggStatePerTrans pertrans,
- int transno, int setno, int setoff, bool ishash);
+ int transno, int setno, int setoff, bool ishash,
+ bool spilled);
/*
@@ -2924,10 +2925,13 @@ ExecInitCoerceToDomain(ExprEvalStep *scratch, CoerceToDomain *ctest,
* check for filters, evaluate aggregate input, check that that input is not
* NULL for a strict transition function, and then finally invoke the
* transition for each of the concurrently computed grouping sets.
+ *
+ * If "spilled" is true, the generated code will take into account the
+ * possibility that a Hash Aggregation has spilled to disk.
*/
ExprState *
ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase,
- bool doSort, bool doHash)
+ bool doSort, bool doHash, bool spilled)
{
ExprState *state = makeNode(ExprState);
PlanState *parent = &aggstate->ss.ps;
@@ -3158,7 +3162,8 @@ ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase,
for (int setno = 0; setno < processGroupingSets; setno++)
{
ExecBuildAggTransCall(state, aggstate, &scratch, trans_fcinfo,
- pertrans, transno, setno, setoff, false);
+ pertrans, transno, setno, setoff, false,
+ spilled);
setoff++;
}
}
@@ -3177,7 +3182,8 @@ ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase,
for (int setno = 0; setno < numHashes; setno++)
{
ExecBuildAggTransCall(state, aggstate, &scratch, trans_fcinfo,
- pertrans, transno, setno, setoff, true);
+ pertrans, transno, setno, setoff, true,
+ spilled);
setoff++;
}
}
@@ -3227,7 +3233,8 @@ static void
ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
ExprEvalStep *scratch,
FunctionCallInfo fcinfo, AggStatePerTrans pertrans,
- int transno, int setno, int setoff, bool ishash)
+ int transno, int setno, int setoff, bool ishash,
+ bool spilled)
{
int adjust_init_jumpnull = -1;
int adjust_strict_jumpnull = -1;
@@ -3249,7 +3256,8 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
fcinfo->flinfo->fn_strict &&
pertrans->initValueIsNull)
{
- scratch->opcode = EEOP_AGG_INIT_TRANS;
+ scratch->opcode = spilled ?
+ EEOP_AGG_INIT_TRANS_SPILLED : EEOP_AGG_INIT_TRANS;
scratch->d.agg_init_trans.pertrans = pertrans;
scratch->d.agg_init_trans.setno = setno;
scratch->d.agg_init_trans.setoff = setoff;
@@ -3265,7 +3273,8 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
if (pertrans->numSortCols == 0 &&
fcinfo->flinfo->fn_strict)
{
- scratch->opcode = EEOP_AGG_STRICT_TRANS_CHECK;
+ scratch->opcode = spilled ?
+ EEOP_AGG_STRICT_TRANS_CHECK_SPILLED : EEOP_AGG_STRICT_TRANS_CHECK;
scratch->d.agg_strict_trans_check.setno = setno;
scratch->d.agg_strict_trans_check.setoff = setoff;
scratch->d.agg_strict_trans_check.transno = transno;
@@ -3282,9 +3291,11 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
/* invoke appropriate transition implementation */
if (pertrans->numSortCols == 0 && pertrans->transtypeByVal)
- scratch->opcode = EEOP_AGG_PLAIN_TRANS_BYVAL;
+ scratch->opcode = spilled ?
+ EEOP_AGG_PLAIN_TRANS_BYVAL_SPILLED : EEOP_AGG_PLAIN_TRANS_BYVAL;
else if (pertrans->numSortCols == 0)
- scratch->opcode = EEOP_AGG_PLAIN_TRANS;
+ scratch->opcode = spilled ?
+ EEOP_AGG_PLAIN_TRANS_SPILLED : EEOP_AGG_PLAIN_TRANS;
else if (pertrans->numInputs == 1)
scratch->opcode = EEOP_AGG_ORDERED_TRANS_DATUM;
else
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 35eb8b99f69..e21e0c440ea 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -426,9 +426,13 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
&&CASE_EEOP_AGG_STRICT_INPUT_CHECK_ARGS,
&&CASE_EEOP_AGG_STRICT_INPUT_CHECK_NULLS,
&&CASE_EEOP_AGG_INIT_TRANS,
+ &&CASE_EEOP_AGG_INIT_TRANS_SPILLED,
&&CASE_EEOP_AGG_STRICT_TRANS_CHECK,
+ &&CASE_EEOP_AGG_STRICT_TRANS_CHECK_SPILLED,
&&CASE_EEOP_AGG_PLAIN_TRANS_BYVAL,
+ &&CASE_EEOP_AGG_PLAIN_TRANS_BYVAL_SPILLED,
&&CASE_EEOP_AGG_PLAIN_TRANS,
+ &&CASE_EEOP_AGG_PLAIN_TRANS_SPILLED,
&&CASE_EEOP_AGG_ORDERED_TRANS_DATUM,
&&CASE_EEOP_AGG_ORDERED_TRANS_TUPLE,
&&CASE_EEOP_LAST
@@ -1619,6 +1623,35 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
EEO_NEXT();
}
+ EEO_CASE(EEOP_AGG_INIT_TRANS_SPILLED)
+ {
+ AggState *aggstate = castNode(AggState, state->parent);
+ AggStatePerGroup pergroup;
+ AggStatePerGroup pergroup_allaggs;
+
+ pergroup_allaggs = aggstate->all_pergroups[op->d.agg_init_trans.setoff];
+
+ if (pergroup_allaggs == NULL)
+ EEO_NEXT();
+
+ pergroup = &pergroup_allaggs[op->d.agg_init_trans.transno];
+
+ /* If transValue has not yet been initialized, do so now. */
+ if (pergroup->noTransValue)
+ {
+ AggStatePerTrans pertrans = op->d.agg_init_trans.pertrans;
+
+ aggstate->curaggcontext = op->d.agg_init_trans.aggcontext;
+ aggstate->current_set = op->d.agg_init_trans.setno;
+
+ ExecAggInitGroup(aggstate, pertrans, pergroup);
+
+ /* copied trans value from input, done this round */
+ EEO_JUMP(op->d.agg_init_trans.jumpnull);
+ }
+
+ EEO_NEXT();
+ }
/* check that a strict aggregate's input isn't NULL */
EEO_CASE(EEOP_AGG_STRICT_TRANS_CHECK)
@@ -1635,6 +1668,24 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
EEO_NEXT();
}
+ EEO_CASE(EEOP_AGG_STRICT_TRANS_CHECK_SPILLED)
+ {
+ AggState *aggstate = castNode(AggState, state->parent);
+ AggStatePerGroup pergroup;
+ AggStatePerGroup pergroup_allaggs;
+
+ pergroup_allaggs = aggstate->all_pergroups[op->d.agg_strict_trans_check.setoff];
+
+ if (pergroup_allaggs == NULL)
+ EEO_NEXT();
+
+ pergroup = &pergroup_allaggs[op->d.agg_strict_trans_check.transno];
+
+ if (unlikely(pergroup->transValueIsNull))
+ EEO_JUMP(op->d.agg_strict_trans_check.jumpnull);
+
+ EEO_NEXT();
+ }
/*
* Evaluate aggregate transition / combine function that has a
@@ -1683,6 +1734,51 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
EEO_NEXT();
}
+ EEO_CASE(EEOP_AGG_PLAIN_TRANS_BYVAL_SPILLED)
+ {
+ AggState *aggstate = castNode(AggState, state->parent);
+ AggStatePerTrans pertrans;
+ AggStatePerGroup pergroup;
+ AggStatePerGroup pergroup_allaggs;
+ FunctionCallInfo fcinfo;
+ MemoryContext oldContext;
+ Datum newVal;
+
+ pertrans = op->d.agg_trans.pertrans;
+
+ pergroup_allaggs = aggstate->all_pergroups[op->d.agg_trans.setoff];
+ pergroup = &pergroup_allaggs[op->d.agg_trans.transno];
+
+ if (pergroup_allaggs == NULL)
+ EEO_NEXT();
+
+ Assert(pertrans->transtypeByVal);
+
+ fcinfo = pertrans->transfn_fcinfo;
+
+ /* cf. select_current_set() */
+ aggstate->curaggcontext = op->d.agg_trans.aggcontext;
+ aggstate->current_set = op->d.agg_trans.setno;
+
+ /* set up aggstate->curpertrans for AggGetAggref() */
+ aggstate->curpertrans = pertrans;
+
+ /* invoke transition function in per-tuple context */
+ oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory);
+
+ fcinfo->args[0].value = pergroup->transValue;
+ fcinfo->args[0].isnull = pergroup->transValueIsNull;
+ fcinfo->isnull = false; /* just in case transfn doesn't set it */
+
+ newVal = FunctionCallInvoke(fcinfo);
+
+ pergroup->transValue = newVal;
+ pergroup->transValueIsNull = fcinfo->isnull;
+
+ MemoryContextSwitchTo(oldContext);
+
+ EEO_NEXT();
+ }
/*
* Evaluate aggregate transition / combine function that has a
@@ -1726,6 +1822,66 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
newVal = FunctionCallInvoke(fcinfo);
+ /*
+ * For pass-by-ref datatype, must copy the new value into
+ * aggcontext and free the prior transValue. But if transfn
+ * returned a pointer to its first input, we don't need to do
+ * anything. Also, if transfn returned a pointer to a R/W
+ * expanded object that is already a child of the aggcontext,
+ * assume we can adopt that value without copying it.
+ */
+ if (DatumGetPointer(newVal) != DatumGetPointer(pergroup->transValue))
+ newVal = ExecAggTransReparent(aggstate, pertrans,
+ newVal, fcinfo->isnull,
+ pergroup->transValue,
+ pergroup->transValueIsNull);
+
+ pergroup->transValue = newVal;
+ pergroup->transValueIsNull = fcinfo->isnull;
+
+ MemoryContextSwitchTo(oldContext);
+
+ EEO_NEXT();
+ }
+ EEO_CASE(EEOP_AGG_PLAIN_TRANS_SPILLED)
+ {
+ AggState *aggstate = castNode(AggState, state->parent);
+ AggStatePerTrans pertrans;
+ AggStatePerGroup pergroup;
+ AggStatePerGroup pergroup_allaggs;
+ FunctionCallInfo fcinfo;
+ MemoryContext oldContext;
+ Datum newVal;
+
+ pertrans = op->d.agg_trans.pertrans;
+
+ pergroup_allaggs = aggstate->all_pergroups[op->d.agg_trans.setoff];
+
+ if (pergroup_allaggs == NULL)
+ EEO_NEXT();
+
+ pergroup = &pergroup_allaggs[op->d.agg_trans.transno];
+
+ Assert(!pertrans->transtypeByVal);
+
+ fcinfo = pertrans->transfn_fcinfo;
+
+ /* cf. select_current_set() */
+ aggstate->curaggcontext = op->d.agg_trans.aggcontext;
+ aggstate->current_set = op->d.agg_trans.setno;
+
+ /* set up aggstate->curpertrans for AggGetAggref() */
+ aggstate->curpertrans = pertrans;
+
+ /* invoke transition function in per-tuple context */
+ oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory);
+
+ fcinfo->args[0].value = pergroup->transValue;
+ fcinfo->args[0].isnull = pergroup->transValueIsNull;
+ fcinfo->isnull = false; /* just in case transfn doesn't set it */
+
+ newVal = FunctionCallInvoke(fcinfo);
+
/*
* For pass-by-ref datatype, must copy the new value into
* aggcontext and free the prior transValue. But if transfn
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 2e9a21bf400..517a2649f7e 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -194,6 +194,29 @@
* transition values. hashcontext is the single context created to support
* all hash tables.
*
+ * Spilling To Disk
+ *
+ * When performing hash aggregation, if the hash table memory exceeds the
+ * limit (see hash_agg_check_limits()), we enter "spill mode". In spill
+ * mode, we advance the transition states only for groups already in the
+ * hash table. For tuples that would need to create a new hash table
+ * entries (and initialize new transition states), we instead spill them to
+ * disk to be processed later. The tuples are spilled in a partitioned
+ * manner, so that subsequent batches are smaller and less likely to exceed
+ * work_mem (if a batch does exceed work_mem, it must be spilled
+ * recursively).
+ *
+ * Spilled data is written to logical tapes. These provide better control
+ * over memory usage, disk space, and the number of files than if we were
+ * to use a BufFile for each spill.
+ *
+ * Note that it's possible for transition states to start small but then
+ * grow very large; for instance in the case of ARRAY_AGG. In such cases,
+ * it's still possible to significantly exceed work_mem. We try to avoid
+ * this situation by estimating what will fit in the available memory, and
+ * imposing a limit on the number of groups separately from the amount of
+ * memory consumed.
+ *
* Transition / Combine function invocation:
*
* For performance reasons transition functions, including combine
@@ -233,12 +256,100 @@
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/datum.h"
+#include "utils/dynahash.h"
#include "utils/expandeddatum.h"
+#include "utils/logtape.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/syscache.h"
#include "utils/tuplesort.h"
+/*
+ * Control how many partitions are created when spilling HashAgg to
+ * disk.
+ *
+ * HASHAGG_PARTITION_FACTOR is multiplied by the estimated number of
+ * partitions needed such that each partition will fit in memory. The factor
+ * is set higher than one because there's not a high cost to having a few too
+ * many partitions, and it makes it less likely that a partition will need to
+ * be spilled recursively. Another benefit of having more, smaller partitions
+ * is that small hash tables may perform better than large ones due to memory
+ * caching effects.
+ *
+ * We also specify a min and max number of partitions per spill. Too few might
+ * mean a lot of wasted I/O from repeated spilling of the same tuples. Too
+ * many will result in lots of memory wasted buffering the spill files (which
+ * could instead be spent on a larger hash table).
+ *
+ * For reading from tapes, the buffer size must be a multiple of
+ * BLCKSZ. Larger values help when reading from multiple tapes concurrently,
+ * but that doesn't happen in HashAgg, so we simply use BLCKSZ. Writing to a
+ * tape always uses a buffer of size BLCKSZ.
+ */
+#define HASHAGG_PARTITION_FACTOR 1.50
+#define HASHAGG_MIN_PARTITIONS 4
+#define HASHAGG_MAX_PARTITIONS 256
+#define HASHAGG_MIN_BUCKETS 256
+#define HASHAGG_READ_BUFFER_SIZE BLCKSZ
+#define HASHAGG_WRITE_BUFFER_SIZE BLCKSZ
+
+/*
+ * Track all tapes needed for a HashAgg that spills. We don't know the maximum
+ * number of tapes needed at the start of the algorithm (because it can
+ * recurse), so one tape set is allocated and extended as needed for new
+ * tapes. When a particular tape is already read, rewind it for write mode and
+ * put it in the free list.
+ *
+ * Tapes' buffers can take up substantial memory when many tapes are open at
+ * once. We only need one tape open at a time in read mode (using a buffer
+ * that's a multiple of BLCKSZ); but we need up to HASHAGG_MAX_PARTITIONS
+ * tapes open in write mode (each requiring a buffer of size BLCKSZ).
+ */
+typedef struct HashTapeInfo
+{
+ LogicalTapeSet *tapeset;
+ int ntapes;
+ int *freetapes;
+ int nfreetapes;
+} HashTapeInfo;
+
+/*
+ * Represents partitioned spill data for a single hashtable. Contains the
+ * necessary information to route tuples to the correct partition, and to
+ * transform the spilled data into new batches.
+ *
+ * The high bits are used for partition selection (when recursing, we ignore
+ * the bits that have already been used for partition selection at an earlier
+ * level).
+ */
+typedef struct HashAggSpill
+{
+ HashTapeInfo *tapeinfo; /* borrowed reference to tape info */
+ int npartitions; /* number of partitions */
+ int *partitions; /* spill partition tape numbers */
+ int64 *ntuples; /* number of tuples in each partition */
+ uint32 mask; /* mask to find partition from hash value */
+ int shift; /* after masking, shift by this amount */
+} HashAggSpill;
+
+/*
+ * Represents work to be done for one pass of hash aggregation (with only one
+ * grouping set).
+ *
+ * Also tracks the bits of the hash already used for partition selection by
+ * earlier iterations, so that this batch can use new bits. If all bits have
+ * already been used, no partitioning will be done (any spilled data will go
+ * to a single output tape).
+ */
+typedef struct HashAggBatch
+{
+ int setno; /* grouping set */
+ int used_bits; /* number of bits of hash already used */
+ HashTapeInfo *tapeinfo; /* borrowed reference to tape info */
+ int input_tapenum; /* input partition tape */
+ int64 input_tuples; /* number of tuples in this batch */
+} HashAggBatch;
+
static void select_current_set(AggState *aggstate, int setno, bool is_hash);
static void initialize_phase(AggState *aggstate, int newphase);
static TupleTableSlot *fetch_input_tuple(AggState *aggstate);
@@ -275,11 +386,38 @@ static Bitmapset *find_unaggregated_cols(AggState *aggstate);
static bool find_unaggregated_cols_walker(Node *node, Bitmapset **colnos);
static void build_hash_tables(AggState *aggstate);
static void build_hash_table(AggState *aggstate, int setno, long nbuckets);
+static void hashagg_recompile_expressions(AggState *aggstate);
+static long hash_choose_num_buckets(AggState *aggstate,
+ long estimated_nbuckets,
+ Size memory);
+static int hash_choose_num_partitions(uint64 input_groups,
+ double hashentrysize,
+ int used_bits,
+ int *log2_npartittions);
static AggStatePerGroup lookup_hash_entry(AggState *aggstate, uint32 hash);
static void lookup_hash_entries(AggState *aggstate);
static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
static void agg_fill_hash_table(AggState *aggstate);
+static bool agg_refill_hash_table(AggState *aggstate);
static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
+static TupleTableSlot *agg_retrieve_hash_table_in_memory(AggState *aggstate);
+static void hash_agg_check_limits(AggState *aggstate);
+static void hashagg_finish_initial_spills(AggState *aggstate);
+static void hashagg_reset_spill_state(AggState *aggstate);
+static HashAggBatch *hashagg_batch_new(HashTapeInfo *tapeinfo,
+ int input_tapenum, int setno,
+ int64 input_tuples, int used_bits);
+static MinimalTuple hashagg_batch_read(HashAggBatch *batch, uint32 *hashp);
+static void hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo,
+ int used_bits, uint64 input_tuples,
+ double hashentrysize);
+static Size hashagg_spill_tuple(HashAggSpill *spill, TupleTableSlot *slot,
+ uint32 hash);
+static void hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill,
+ int setno);
+static void hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *dest,
+ int ndest);
+static void hashagg_tapeinfo_release(HashTapeInfo *tapeinfo, int tapenum);
static Datum GetAggInitVal(Datum textInitVal, Oid transtype);
static void build_pertrans_for_aggref(AggStatePerTrans pertrans,
AggState *aggstate, EState *estate,
@@ -1261,7 +1399,7 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
}
/*
- * (Re-)initialize the hash table(s) to empty.
+ * Initialize the hash table(s).
*
* To implement hashed aggregation, we need a hashtable that stores a
* representative tuple and an array of AggStatePerGroup structs for each
@@ -1272,9 +1410,9 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
* We have a separate hashtable and associated perhash data structure for each
* grouping set for which we're doing hashing.
*
- * The contents of the hash tables always live in the hashcontext's per-tuple
- * memory context (there is only one of these for all tables together, since
- * they are all reset at the same time).
+ * The hash tables and their contents always live in the hashcontext's
+ * per-tuple memory context (there is only one of these for all tables
+ * together, since they are all reset at the same time).
*/
static void
build_hash_tables(AggState *aggstate)
@@ -1284,11 +1422,24 @@ build_hash_tables(AggState *aggstate)
for (setno = 0; setno < aggstate->num_hashes; ++setno)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
+ long nbuckets;
+ Size memory;
Assert(perhash->aggnode->numGroups > 0);
- build_hash_table(aggstate, setno, perhash->aggnode->numGroups);
+ memory = aggstate->hash_mem_limit / aggstate->num_hashes;
+
+ /* choose reasonable number of buckets per hashtable */
+ nbuckets = hash_choose_num_buckets(
+ aggstate, perhash->aggnode->numGroups, memory);
+
+ build_hash_table(aggstate, setno, nbuckets);
}
+
+ aggstate->hash_alloc_current = MemoryContextMemAllocated(
+ aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ aggstate->hash_alloc_last = aggstate->hash_alloc_current;
+ aggstate->hash_ngroups_current = 0;
}
/*
@@ -1298,7 +1449,7 @@ static void
build_hash_table(AggState *aggstate, int setno, long nbuckets)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- MemoryContext metacxt = aggstate->ss.ps.state->es_query_cxt;
+ MemoryContext metacxt;
MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
Size additionalsize;
@@ -1306,6 +1457,12 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets)
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
+ /*
+ * We don't try to preserve any part of the hash table. Set the metacxt to
+ * hashcxt, which will be reset for each batch.
+ */
+ metacxt = hashcxt;
+
/*
* Used to make sure initial hash table allocation does not exceed
* work_mem. Note that the estimate does not include space for
@@ -1481,14 +1638,250 @@ hash_agg_entry_size(int numAggs, Size tupleWidth, Size transitionSpace)
transitionSpace;
}
+/*
+ * Recompile the expressions for advancing aggregates while hashing. This is
+ * necessary for certain kinds of state changes that affect the resulting
+ * expression. For instance, changing aggstate->hash_ever_spilled or
+ * aggstate->ss.ps.outerops requires recompilation.
+ *
+ * A compiled expression where hash_ever_spilled is true will work even when
+ * hash_spill_mode is false, because it merely introduces additional branches
+ * that are unnecessary when hash_spill_mode is false. That allows us to only
+ * recompile when hash_ever_spilled changes, rather than every time
+ * hash_spill_mode changes.
+ */
+static void
+hashagg_recompile_expressions(AggState *aggstate)
+{
+ AggStatePerPhase phase;
+
+ Assert(aggstate->aggstrategy == AGG_HASHED ||
+ aggstate->aggstrategy == AGG_MIXED);
+
+ if (aggstate->aggstrategy == AGG_HASHED)
+ phase = &aggstate->phases[0];
+ else /* AGG_MIXED */
+ phase = &aggstate->phases[1];
+
+ phase->evaltrans = ExecBuildAggTrans(
+ aggstate, phase,
+ aggstate->aggstrategy == AGG_MIXED ? true : false, /* dosort */
+ true, /* dohash */
+ aggstate->hash_ever_spilled);
+}
+
+/*
+ * Set limits that trigger spilling to avoid exceeding work_mem. Consider the
+ * number of partitions we expect to create (if we do spill).
+ *
+ * There are two limits: a memory limit, and also an ngroups limit. The
+ * ngroups limit becomes important when we expect transition values to grow
+ * substantially larger than the initial value.
+ */
+void
+hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
+ Size *mem_limit, long *ngroups_limit, int *num_partitions)
+{
+ int npartitions;
+ Size partition_mem;
+
+ /* no attempt to obey work_mem */
+ if (hashagg_mem_overflow)
+ {
+ *mem_limit = SIZE_MAX;
+ *ngroups_limit = LONG_MAX;
+ return;
+ }
+
+ /* if not expected to spill, use all of work_mem */
+ if (input_groups * hashentrysize < work_mem * 1024L)
+ {
+ *mem_limit = work_mem * 1024L;
+ *ngroups_limit = *mem_limit / hashentrysize;
+ return;
+ }
+
+ /*
+ * Calculate expected memory requirements for spilling, which is the size
+ * of the buffers needed for all the tapes that need to be open at
+ * once. Then, subtract that from the memory available for holding hash
+ * tables.
+ */
+ npartitions = hash_choose_num_partitions(input_groups,
+ hashentrysize,
+ used_bits,
+ NULL);
+ if (num_partitions != NULL)
+ *num_partitions = npartitions;
+
+ partition_mem =
+ HASHAGG_READ_BUFFER_SIZE +
+ HASHAGG_WRITE_BUFFER_SIZE * npartitions;
+
+ /*
+ * Don't set the limit below 3/4 of work_mem. In that case, we are at the
+ * minimum number of partitions, so we aren't going to dramatically exceed
+ * work mem anyway.
+ */
+ if (work_mem * 1024L > 4 * partition_mem)
+ *mem_limit = work_mem * 1024L - partition_mem;
+ else
+ *mem_limit = work_mem * 1024L * 0.75;
+
+ if (*mem_limit > hashentrysize)
+ *ngroups_limit = *mem_limit / hashentrysize;
+ else
+ *ngroups_limit = 1;
+}
+
+/*
+ * hash_agg_check_limits
+ *
+ * After adding a new group to the hash table, check whether we need to enter
+ * spill mode. Allocations may happen without adding new groups (for instance,
+ * if the transition state size grows), so this check is imperfect.
+ *
+ * Memory usage is tracked by how much is allocated to the underlying memory
+ * context, not individual chunks. This is more accurate because it accounts
+ * for all memory in the context, and also accounts for fragmentation and
+ * other forms of overhead and waste that can be difficult to estimate. It's
+ * also cheaper because we don't have to track each chunk.
+ *
+ * When memory is first allocated to a memory context, it is not actually
+ * used. So when the next allocation happens, we consider the
+ * previously-allocated amount to be the memory currently used.
+ */
+static void
+hash_agg_check_limits(AggState *aggstate)
+{
+ Size allocation;
+
+ /*
+ * Even if already in spill mode, it's possible for memory usage to grow,
+ * and we should still track it for the purposes of EXPLAIN ANALYZE.
+ */
+ allocation = MemoryContextMemAllocated(
+ aggstate->hashcontext->ecxt_per_tuple_memory, true);
+
+ /* has allocation grown since the last observation? */
+ if (allocation > aggstate->hash_alloc_current)
+ {
+ aggstate->hash_alloc_last = aggstate->hash_alloc_current;
+ aggstate->hash_alloc_current = allocation;
+ }
+
+ if (aggstate->hash_alloc_last > aggstate->hash_mem_peak)
+ aggstate->hash_mem_peak = aggstate->hash_alloc_last;
+
+ /*
+ * Don't spill unless there's at least one group in the hash table so we
+ * can be sure to make progress even in edge cases.
+ */
+ if (aggstate->hash_ngroups_current > 0 &&
+ (aggstate->hash_alloc_last > aggstate->hash_mem_limit ||
+ aggstate->hash_ngroups_current > aggstate->hash_ngroups_limit))
+ {
+ aggstate->hash_spill_mode = true;
+
+ if (!aggstate->hash_ever_spilled)
+ {
+ aggstate->hash_ever_spilled = true;
+ aggstate->hash_spills = palloc0(
+ sizeof(HashAggSpill) * aggstate->num_hashes);
+ aggstate->hash_tapeinfo = palloc0(sizeof(HashTapeInfo));
+ hashagg_recompile_expressions(aggstate);
+ }
+ }
+}
+
+/*
+ * Choose a reasonable number of buckets for the initial hash table size.
+ */
+static long
+hash_choose_num_buckets(AggState *aggstate, long ngroups, Size memory)
+{
+ long max_nbuckets;
+ long nbuckets = ngroups;
+
+ max_nbuckets = memory / aggstate->hashentrysize;
+
+ /*
+ * Leave room for slop to avoid a case where the initial hash table size
+ * exceeds the memory limit (though that may still happen in edge cases).
+ */
+ max_nbuckets *= 0.75;
+
+ if (nbuckets > max_nbuckets)
+ nbuckets = max_nbuckets;
+ if (nbuckets < HASHAGG_MIN_BUCKETS)
+ nbuckets = HASHAGG_MIN_BUCKETS;
+ return nbuckets;
+}
+
+/*
+ * Determine the number of partitions to create when spilling, which will
+ * always be a power of two. If log2_npartitions is non-NULL, set
+ * *log2_npartitions to the log2() of the number of partitions.
+ */
+static int
+hash_choose_num_partitions(uint64 input_groups, double hashentrysize,
+ int used_bits, int *log2_npartitions)
+{
+ Size mem_wanted;
+ int partition_limit;
+ int npartitions;
+ int partition_bits;
+
+ /*
+ * Avoid creating so many partitions that the memory requirements of the
+ * open partition files are greater than 1/4 of work_mem.
+ */
+ partition_limit =
+ (work_mem * 1024L * 0.25 - HASHAGG_READ_BUFFER_SIZE) /
+ HASHAGG_WRITE_BUFFER_SIZE;
+
+ /* pessimistically estimate that each input tuple creates a new group */
+ mem_wanted = HASHAGG_PARTITION_FACTOR * input_groups * hashentrysize;
+
+ /* make enough partitions so that each one is likely to fit in memory */
+ npartitions = 1 + (mem_wanted / (work_mem * 1024L));
+
+ if (npartitions > partition_limit)
+ npartitions = partition_limit;
+
+ if (npartitions < HASHAGG_MIN_PARTITIONS)
+ npartitions = HASHAGG_MIN_PARTITIONS;
+ if (npartitions > HASHAGG_MAX_PARTITIONS)
+ npartitions = HASHAGG_MAX_PARTITIONS;
+
+ /* ceil(log2(npartitions)) */
+ partition_bits = my_log2(npartitions);
+
+ /* make sure that we don't exhaust the hash bits */
+ if (partition_bits + used_bits >= 32)
+ partition_bits = 32 - used_bits;
+
+ if (log2_npartitions != NULL)
+ *log2_npartitions = partition_bits;
+
+ /* number of partitions will be a power of two */
+ npartitions = 1L << partition_bits;
+
+ return npartitions;
+}
+
/*
* Find or create a hashtable entry for the tuple group containing the current
* tuple (already set in tmpcontext's outertuple slot), in the current grouping
* set (which the caller must have selected - note that initialize_aggregate
* depends on this).
*
- * When called, CurrentMemoryContext should be the per-query context. The
- * already-calculated hash value for the tuple must be specified.
+ * When called, CurrentMemoryContext should be the per-query context.
+ *
+ * If the hash table is at the memory limit, then only find existing hashtable
+ * entries; don't create new ones. If a tuple's group is not already present
+ * in the hash table for the current grouping set, return NULL and the caller
+ * will spill it to disk.
*/
static AggStatePerGroup
lookup_hash_entry(AggState *aggstate, uint32 hash)
@@ -1496,16 +1889,27 @@ lookup_hash_entry(AggState *aggstate, uint32 hash)
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
TupleHashEntryData *entry;
- bool isnew;
+ bool isnew = false;
+ bool *p_isnew;
+
+ /* if hash table already spilled, don't create new entries */
+ p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
/* find or create the hashtable entry using the filtered tuple */
- entry = LookupTupleHashEntryHash(perhash->hashtable, hashslot, &isnew,
+ entry = LookupTupleHashEntryHash(perhash->hashtable, hashslot, p_isnew,
hash);
+ if (entry == NULL)
+ return NULL;
+
if (isnew)
{
- AggStatePerGroup pergroup;
- int transno;
+ AggStatePerGroup pergroup;
+ int transno;
+
+ aggstate->hash_ngroups_current++;
+ if (!hashagg_mem_overflow)
+ hash_agg_check_limits(aggstate);
pergroup = (AggStatePerGroup)
MemoryContextAlloc(perhash->hashtable->tablecxt,
@@ -1533,23 +1937,51 @@ lookup_hash_entry(AggState *aggstate, uint32 hash)
* returning an array of pergroup pointers suitable for advance_aggregates.
*
* Be aware that lookup_hash_entry can reset the tmpcontext.
+ *
+ * Some entries may be left NULL if we have reached the limit and have begun
+ * to spill. The same tuple will belong to different groups for each set, so
+ * may match a group already in memory for one set and match a group not in
+ * memory for another set. If we have begun to spill and a tuple doesn't match
+ * a group in memory for a particular set, it will be spilled.
+ *
+ * NB: It's possible to spill the same tuple for several different grouping
+ * sets. This may seem wasteful, but it's actually a trade-off: if we spill
+ * the tuple multiple times for multiple grouping sets, it can be partitioned
+ * for each grouping set, making the refilling of the hash table very
+ * efficient.
*/
static void
lookup_hash_entries(AggState *aggstate)
{
- int numHashes = aggstate->num_hashes;
AggStatePerGroup *pergroup = aggstate->hash_pergroup;
int setno;
- for (setno = 0; setno < numHashes; setno++)
+ for (setno = 0; setno < aggstate->num_hashes; setno++)
{
- AggStatePerHash perhash = &aggstate->perhash[setno];
+ AggStatePerHash perhash = &aggstate->perhash[setno];
uint32 hash;
select_current_set(aggstate, setno, true);
prepare_hash_slot(aggstate);
hash = TupleHashTableHash(perhash->hashtable, perhash->hashslot);
pergroup[setno] = lookup_hash_entry(aggstate, hash);
+
+ /* check to see if we need to spill the tuple for this grouping set */
+ if (pergroup[setno] == NULL)
+ {
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
+ TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
+
+ if (spill->partitions == NULL)
+ hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
+ perhash->aggnode->numGroups,
+ aggstate->hashentrysize);
+
+ hashagg_spill_tuple(spill, slot, hash);
+
+ aggstate->hash_disk_used = LogicalTapeSetBlocks(
+ aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
+ }
}
}
@@ -1872,6 +2304,12 @@ agg_retrieve_direct(AggState *aggstate)
if (TupIsNull(outerslot))
{
/* no more outer-plan tuples available */
+
+ /* if we built hash tables, finalize any spills */
+ if (aggstate->aggstrategy == AGG_MIXED &&
+ aggstate->current_phase == 1)
+ hashagg_finish_initial_spills(aggstate);
+
if (hasGroupingSets)
{
aggstate->input_done = true;
@@ -1974,6 +2412,9 @@ agg_fill_hash_table(AggState *aggstate)
ResetExprContext(aggstate->tmpcontext);
}
+ /* finalize spills, if any */
+ hashagg_finish_initial_spills(aggstate);
+
aggstate->table_filled = true;
/* Initialize to walk the first hash table */
select_current_set(aggstate, 0, true);
@@ -1981,11 +2422,196 @@ agg_fill_hash_table(AggState *aggstate)
&aggstate->perhash[0].hashiter);
}
+/*
+ * If any data was spilled during hash aggregation, reset the hash table and
+ * reprocess one batch of spilled data. After reprocessing a batch, the hash
+ * table will again contain data, ready to be consumed by
+ * agg_retrieve_hash_table_in_memory().
+ *
+ * Should only be called after all in memory hash table entries have been
+ * consumed.
+ *
+ * Return false when input is exhausted and there's no more work to be done;
+ * otherwise return true.
+ */
+static bool
+agg_refill_hash_table(AggState *aggstate)
+{
+ HashAggBatch *batch;
+ HashAggSpill spill;
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ long nbuckets;
+ int setno;
+
+ if (aggstate->hash_batches == NIL)
+ return false;
+
+ spill.npartitions = 0;
+ spill.partitions = NULL;
+ /*
+ * Each spill file contains spilled data for only a single grouping
+ * set. We want to ignore all others, which is done by setting the other
+ * pergroups to NULL.
+ */
+ memset(aggstate->all_pergroups, 0,
+ sizeof(AggStatePerGroup) *
+ (aggstate->maxsets + aggstate->num_hashes));
+
+ batch = linitial(aggstate->hash_batches);
+ aggstate->hash_batches = list_delete_first(aggstate->hash_batches);
+
+ hash_agg_set_limits(aggstate->hashentrysize, batch->input_tuples,
+ batch->used_bits, &aggstate->hash_mem_limit,
+ &aggstate->hash_ngroups_limit, NULL);
+
+ /*
+ * Free memory and rebuild a single hash table for this batch's grouping
+ * set. Estimate the number of groups to be the number of input tuples in
+ * this batch.
+ */
+ ReScanExprContext(aggstate->hashcontext);
+ for (setno = 0; setno < aggstate->num_hashes; setno++)
+ aggstate->perhash[setno].hashtable = NULL;
+
+ nbuckets = hash_choose_num_buckets(
+ aggstate, batch->input_tuples, aggstate->hash_mem_limit);
+ build_hash_table(aggstate, batch->setno, nbuckets);
+ aggstate->hash_alloc_current = MemoryContextMemAllocated(
+ aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ aggstate->hash_alloc_last = aggstate->hash_alloc_current;
+ aggstate->hash_ngroups_current = 0;
+
+ Assert(aggstate->current_phase == 0);
+
+ if (aggstate->phase->aggstrategy == AGG_MIXED)
+ {
+ aggstate->current_phase = 1;
+ aggstate->phase = &aggstate->phases[aggstate->current_phase];
+ }
+
+ /*
+ * The first pass (agg_fill_hash_table) reads whatever kind of slot comes
+ * from the outer plan, and considers the slot fixed. But spilled tuples
+ * are always MinimalTuples, so if that's different from the outer plan we
+ * need to change it and recompile the aggregate expressions.
+ */
+ if (aggstate->ss.ps.outerops != &TTSOpsMinimalTuple)
+ {
+ aggstate->ss.ps.outerops = &TTSOpsMinimalTuple;
+ hashagg_recompile_expressions(aggstate);
+ }
+
+ LogicalTapeRewindForRead(tapeinfo->tapeset, batch->input_tapenum,
+ HASHAGG_READ_BUFFER_SIZE);
+ for (;;) {
+ TupleTableSlot *slot = aggstate->hash_spill_slot;
+ MinimalTuple tuple;
+ uint32 hash;
+
+ CHECK_FOR_INTERRUPTS();
+
+ tuple = hashagg_batch_read(batch, &hash);
+ if (tuple == NULL)
+ break;
+
+ ExecStoreMinimalTuple(tuple, slot, true);
+ aggstate->tmpcontext->ecxt_outertuple = slot;
+
+ select_current_set(aggstate, batch->setno, true);
+ prepare_hash_slot(aggstate);
+ aggstate->hash_pergroup[batch->setno] = lookup_hash_entry(aggstate, hash);
+
+ /* if there's no memory for a new group, spill */
+ if (aggstate->hash_pergroup[batch->setno] == NULL)
+ {
+ /*
+ * Estimate the number of groups for this batch as the total
+ * number of tuples in its input file. Although that's a worst
+ * case, it's not bad here for two reasons: (1) overestimating
+ * is better than underestimating; and (2) we've already
+ * scanned the relation once, so it's likely that we've
+ * already finalized many of the common values.
+ */
+ if (spill.partitions == NULL)
+ hashagg_spill_init(&spill, tapeinfo, batch->used_bits,
+ batch->input_tuples,
+ aggstate->hashentrysize);
+
+ hashagg_spill_tuple(&spill, slot, hash);
+
+ aggstate->hash_disk_used = LogicalTapeSetBlocks(
+ aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
+ }
+
+ /* Advance the aggregates (or combine functions) */
+ advance_aggregates(aggstate);
+
+ /*
+ * Reset per-input-tuple context after each tuple, but note that the
+ * hash lookups do this too
+ */
+ ResetExprContext(aggstate->tmpcontext);
+ }
+
+ hashagg_tapeinfo_release(tapeinfo, batch->input_tapenum);
+
+ aggstate->current_phase = 0;
+ aggstate->phase = &aggstate->phases[aggstate->current_phase];
+
+ /* update hashentrysize estimate based on contents */
+ if (aggstate->hash_ngroups_current > 0)
+ {
+ aggstate->hashentrysize = (double)aggstate->hash_alloc_last /
+ (double)aggstate->hash_ngroups_current;
+ }
+
+ hashagg_spill_finish(aggstate, &spill, batch->setno);
+ aggstate->hash_spill_mode = false;
+
+ pfree(batch);
+
+ /* Initialize to walk the first hash table */
+ select_current_set(aggstate, 0, true);
+ ResetTupleHashIterator(aggstate->perhash[0].hashtable,
+ &aggstate->perhash[0].hashiter);
+
+ return true;
+}
+
/*
* ExecAgg for hashed case: retrieving groups from hash table
+ *
+ * After exhausting in-memory tuples, also try refilling the hash table using
+ * previously-spilled tuples. Only returns NULL after all in-memory and
+ * spilled tuples are exhausted.
*/
static TupleTableSlot *
agg_retrieve_hash_table(AggState *aggstate)
+{
+ TupleTableSlot *result = NULL;
+
+ while (result == NULL)
+ {
+ result = agg_retrieve_hash_table_in_memory(aggstate);
+ if (result == NULL)
+ {
+ if (!agg_refill_hash_table(aggstate))
+ {
+ aggstate->agg_done = true;
+ break;
+ }
+ }
+ }
+
+ return result;
+}
+
+/*
+ * Retrieve the groups from the in-memory hash tables without considering any
+ * spilled tuples.
+ */
+static TupleTableSlot *
+agg_retrieve_hash_table_in_memory(AggState *aggstate)
{
ExprContext *econtext;
AggStatePerAgg peragg;
@@ -2014,7 +2640,7 @@ agg_retrieve_hash_table(AggState *aggstate)
* We loop retrieving groups until we find one satisfying
* aggstate->ss.ps.qual
*/
- while (!aggstate->agg_done)
+ for (;;)
{
TupleTableSlot *hashslot = perhash->hashslot;
int i;
@@ -2039,14 +2665,15 @@ agg_retrieve_hash_table(AggState *aggstate)
perhash = &aggstate->perhash[aggstate->current_set];
+ if (perhash->hashtable == NULL)
+ return NULL;
+
ResetTupleHashIterator(perhash->hashtable, &perhash->hashiter);
continue;
}
else
{
- /* No more hashtables, so done */
- aggstate->agg_done = true;
return NULL;
}
}
@@ -2103,6 +2730,296 @@ agg_retrieve_hash_table(AggState *aggstate)
return NULL;
}
+/*
+ * Assign unused tapes to spill partitions, extending the tape set if
+ * necessary.
+ */
+static void
+hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *partitions,
+ int npartitions)
+{
+ int partidx = 0;
+
+ /* use free tapes if available */
+ while (partidx < npartitions && tapeinfo->nfreetapes > 0)
+ partitions[partidx++] = tapeinfo->freetapes[--tapeinfo->nfreetapes];
+
+ if (tapeinfo->tapeset == NULL)
+ tapeinfo->tapeset = LogicalTapeSetCreate(npartitions, NULL, NULL, -1);
+ else if (partidx < npartitions)
+ {
+ tapeinfo->tapeset = LogicalTapeSetExtend(
+ tapeinfo->tapeset, npartitions - partidx);
+ }
+
+ while (partidx < npartitions)
+ partitions[partidx++] = tapeinfo->ntapes++;
+}
+
+/*
+ * After a tape has already been written to and then read, this function
+ * rewinds it for writing and adds it to the free list.
+ */
+static void
+hashagg_tapeinfo_release(HashTapeInfo *tapeinfo, int tapenum)
+{
+ LogicalTapeRewindForWrite(tapeinfo->tapeset, tapenum);
+ if (tapeinfo->freetapes == NULL)
+ tapeinfo->freetapes = palloc(sizeof(int));
+ else
+ tapeinfo->freetapes = repalloc(
+ tapeinfo->freetapes, sizeof(int) * (tapeinfo->nfreetapes + 1));
+ tapeinfo->freetapes[tapeinfo->nfreetapes++] = tapenum;
+}
+
+/*
+ * hashagg_spill_init
+ *
+ * Called after we determined that spilling is necessary. Chooses the number
+ * of partitions to create, and initializes them.
+ */
+static void
+hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
+ uint64 input_groups, double hashentrysize)
+{
+ int npartitions;
+ int partition_bits;
+
+ npartitions = hash_choose_num_partitions(
+ input_groups, hashentrysize, used_bits, &partition_bits);
+
+ spill->partitions = palloc0(sizeof(int) * npartitions);
+ spill->ntuples = palloc0(sizeof(int64) * npartitions);
+
+ hashagg_tapeinfo_assign(tapeinfo, spill->partitions, npartitions);
+
+ spill->tapeinfo = tapeinfo;
+ spill->shift = 32 - used_bits - partition_bits;
+ spill->mask = (npartitions - 1) << spill->shift;
+ spill->npartitions = npartitions;
+}
+
+/*
+ * hashagg_spill_tuple
+ *
+ * No room for new groups in the hash table. Save for later in the appropriate
+ * partition.
+ */
+static Size
+hashagg_spill_tuple(HashAggSpill *spill, TupleTableSlot *slot, uint32 hash)
+{
+ LogicalTapeSet *tapeset = spill->tapeinfo->tapeset;
+ int partition;
+ MinimalTuple tuple;
+ int tapenum;
+ int total_written = 0;
+ bool shouldFree;
+
+ Assert(spill->partitions != NULL);
+
+ /* may contain unnecessary attributes, consider projecting? */
+ tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
+
+ partition = (hash & spill->mask) >> spill->shift;
+ spill->ntuples[partition]++;
+
+ tapenum = spill->partitions[partition];
+
+ LogicalTapeWrite(tapeset, tapenum, (void *) &hash, sizeof(uint32));
+ total_written += sizeof(uint32);
+
+ LogicalTapeWrite(tapeset, tapenum, (void *) tuple, tuple->t_len);
+ total_written += tuple->t_len;
+
+ if (shouldFree)
+ pfree(tuple);
+
+ return total_written;
+}
+
+/*
+ * hashagg_batch_new
+ *
+ * Construct a HashAggBatch item, which represents one iteration of HashAgg to
+ * be done. Should be called in the aggregate's memory context.
+ */
+static HashAggBatch *
+hashagg_batch_new(HashTapeInfo *tapeinfo, int tapenum, int setno,
+ int64 input_tuples, int used_bits)
+{
+ HashAggBatch *batch = palloc0(sizeof(HashAggBatch));
+
+ batch->setno = setno;
+ batch->used_bits = used_bits;
+ batch->tapeinfo = tapeinfo;
+ batch->input_tapenum = tapenum;
+ batch->input_tuples = input_tuples;
+
+ return batch;
+}
+
+/*
+ * read_spilled_tuple
+ * read the next tuple from a batch file. Return NULL if no more.
+ */
+static MinimalTuple
+hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
+{
+ LogicalTapeSet *tapeset = batch->tapeinfo->tapeset;
+ int tapenum = batch->input_tapenum;
+ MinimalTuple tuple;
+ uint32 t_len;
+ size_t nread;
+ uint32 hash;
+
+ nread = LogicalTapeRead(tapeset, tapenum, &hash, sizeof(uint32));
+ if (nread == 0)
+ return NULL;
+ if (nread != sizeof(uint32))
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("unexpected EOF for tape %d: requested %zu bytes, read %zu bytes",
+ tapenum, sizeof(uint32), nread)));
+ if (hashp != NULL)
+ *hashp = hash;
+
+ nread = LogicalTapeRead(tapeset, tapenum, &t_len, sizeof(t_len));
+ if (nread != sizeof(uint32))
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("unexpected EOF for tape %d: requested %zu bytes, read %zu bytes",
+ tapenum, sizeof(uint32), nread)));
+
+ tuple = (MinimalTuple) palloc(t_len);
+ tuple->t_len = t_len;
+
+ nread = LogicalTapeRead(tapeset, tapenum,
+ (void *)((char *)tuple + sizeof(uint32)),
+ t_len - sizeof(uint32));
+ if (nread != t_len - sizeof(uint32))
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("unexpected EOF for tape %d: requested %zu bytes, read %zu bytes",
+ tapenum, t_len - sizeof(uint32), nread)));
+
+ return tuple;
+}
+
+/*
+ * hashagg_finish_initial_spills
+ *
+ * After a HashAggBatch has been processed, it may have spilled tuples to
+ * disk. If so, turn the spilled partitions into new batches that must later
+ * be executed.
+ */
+static void
+hashagg_finish_initial_spills(AggState *aggstate)
+{
+ int setno;
+
+ if (aggstate->hash_spills == NULL)
+ return;
+
+ /* update hashentrysize estimate based on contents */
+ Assert(aggstate->hash_ngroups_current > 0);
+ aggstate->hashentrysize = (double)aggstate->hash_alloc_last /
+ (double)aggstate->hash_ngroups_current;
+
+ for (setno = 0; setno < aggstate->num_hashes; setno++)
+ hashagg_spill_finish(aggstate, &aggstate->hash_spills[setno], setno);
+
+ aggstate->hash_spill_mode = false;
+
+ /*
+ * We're not processing tuples from outer plan any more; only processing
+ * batches of spilled tuples. The initial spill structures are no longer
+ * needed.
+ */
+ pfree(aggstate->hash_spills);
+ aggstate->hash_spills = NULL;
+}
+
+/*
+ * hashagg_spill_finish
+ *
+ * Transform spill partitions into new batches.
+ */
+static void
+hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
+{
+ int i;
+ int used_bits = 32 - spill->shift;
+
+ if (spill->npartitions == 0)
+ return; /* didn't spill */
+
+ for (i = 0; i < spill->npartitions; i++)
+ {
+ int tapenum = spill->partitions[i];
+ MemoryContext oldContext;
+ HashAggBatch *new_batch;
+
+ oldContext = MemoryContextSwitchTo(aggstate->ss.ps.state->es_query_cxt);
+ new_batch = hashagg_batch_new(aggstate->hash_tapeinfo,
+ tapenum, setno, spill->ntuples[i],
+ used_bits);
+ aggstate->hash_batches = lcons(new_batch, aggstate->hash_batches);
+ aggstate->hash_batches_used++;
+ MemoryContextSwitchTo(oldContext);
+ }
+
+ pfree(spill->ntuples);
+ pfree(spill->partitions);
+}
+
+/*
+ * Free resources related to a spilled HashAgg.
+ */
+static void
+hashagg_reset_spill_state(AggState *aggstate)
+{
+ ListCell *lc;
+
+ /* free spills from initial pass */
+ if (aggstate->hash_spills != NULL)
+ {
+ int setno;
+
+ for (setno = 0; setno < aggstate->num_hashes; setno++)
+ {
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
+ if (spill->ntuples != NULL)
+ pfree(spill->ntuples);
+ if (spill->partitions != NULL)
+ pfree(spill->partitions);
+ }
+ pfree(aggstate->hash_spills);
+ aggstate->hash_spills = NULL;
+ }
+
+ /* free batches */
+ foreach(lc, aggstate->hash_batches)
+ {
+ HashAggBatch *batch = (HashAggBatch*) lfirst(lc);
+ pfree(batch);
+ }
+ list_free(aggstate->hash_batches);
+ aggstate->hash_batches = NIL;
+
+ /* close tape set */
+ if (aggstate->hash_tapeinfo != NULL)
+ {
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ if (tapeinfo->tapeset != NULL)
+ LogicalTapeSetClose(tapeinfo->tapeset);
+ if (tapeinfo->freetapes != NULL)
+ pfree(tapeinfo->freetapes);
+ pfree(tapeinfo);
+ aggstate->hash_tapeinfo = NULL;
+ }
+}
+
+
/* -----------------
* ExecInitAgg
*
@@ -2287,6 +3204,10 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
aggstate->ss.ps.outeropsfixed = false;
}
+ if (use_hashing)
+ aggstate->hash_spill_slot = ExecInitExtraTupleSlot(estate, scanDesc,
+ &TTSOpsMinimalTuple);
+
/*
* Initialize result type, slot and projection.
*/
@@ -2512,9 +3433,22 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*/
if (use_hashing)
{
+ Plan *outerplan = outerPlan(node);
+ long totalGroups = 0;
+ int i;
+
/* this is an array of pointers, not structures */
aggstate->hash_pergroup = pergroups;
+ aggstate->hashentrysize = hash_agg_entry_size(
+ aggstate->numtrans, outerplan->plan_width, node->transitionSpace);
+
+ for (i = 0; i < aggstate->num_hashes; i++)
+ totalGroups = aggstate->perhash[i].aggnode->numGroups;
+
+ hash_agg_set_limits(aggstate->hashentrysize, totalGroups, 0,
+ &aggstate->hash_mem_limit,
+ &aggstate->hash_ngroups_limit, NULL);
find_hash_columns(aggstate);
build_hash_tables(aggstate);
aggstate->table_filled = false;
@@ -2922,7 +3856,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
else
Assert(false);
- phase->evaltrans = ExecBuildAggTrans(aggstate, phase, dosort, dohash);
+ phase->evaltrans = ExecBuildAggTrans(aggstate, phase, dosort, dohash,
+ false);
}
@@ -3417,6 +4352,8 @@ ExecEndAgg(AggState *node)
if (node->sort_out)
tuplesort_end(node->sort_out);
+ hashagg_reset_spill_state(node);
+
for (transno = 0; transno < node->numtrans; transno++)
{
AggStatePerTrans pertrans = &node->pertrans[transno];
@@ -3472,12 +4409,13 @@ ExecReScanAgg(AggState *node)
return;
/*
- * If we do have the hash table, and the subplan does not have any
- * parameter changes, and none of our own parameter changes affect
- * input expressions of the aggregated functions, then we can just
- * rescan the existing hash table; no need to build it again.
+ * If we do have the hash table, and it never spilled, and the subplan
+ * does not have any parameter changes, and none of our own parameter
+ * changes affect input expressions of the aggregated functions, then
+ * we can just rescan the existing hash table; no need to build it
+ * again.
*/
- if (outerPlan->chgParam == NULL &&
+ if (outerPlan->chgParam == NULL && !node->hash_ever_spilled &&
!bms_overlap(node->ss.ps.chgParam, aggnode->aggParams))
{
ResetTupleHashIterator(node->perhash[0].hashtable,
@@ -3534,11 +4472,33 @@ ExecReScanAgg(AggState *node)
*/
if (node->aggstrategy == AGG_HASHED || node->aggstrategy == AGG_MIXED)
{
+ const TupleTableSlotOps *outerops = ExecGetResultSlotOps(
+ outerPlanState(&node->ss), &node->ss.ps.outeropsfixed);
+
+ hashagg_reset_spill_state(node);
+
+ node->hash_ever_spilled = false;
+ node->hash_spill_mode = false;
+ node->hash_alloc_last = 0;
+ node->hash_alloc_current = 0;
+ node->hash_ngroups_current = 0;
+
+ /* reset stats */
+ node->hash_mem_peak = 0;
+ node->hash_disk_used = 0;
+ node->hash_batches_used = 0;
+
ReScanExprContext(node->hashcontext);
/* Rebuild an empty hash table */
build_hash_tables(node);
node->table_filled = false;
/* iterator will be reset when the table is filled */
+
+ if (node->ss.ps.outerops != outerops)
+ {
+ node->ss.ps.outerops = outerops;
+ hashagg_recompile_expressions(node);
+ }
}
if (node->aggstrategy != AGG_HASHED)
diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c
index cea0d6fa5ce..7246fc2b33f 100644
--- a/src/backend/jit/llvm/llvmjit_expr.c
+++ b/src/backend/jit/llvm/llvmjit_expr.c
@@ -2047,6 +2047,7 @@ llvm_compile_expr(ExprState *state)
}
case EEOP_AGG_INIT_TRANS:
+ case EEOP_AGG_INIT_TRANS_SPILLED:
{
AggStatePerTrans pertrans;
@@ -2056,6 +2057,7 @@ llvm_compile_expr(ExprState *state)
LLVMValueRef v_allpergroupsp;
LLVMValueRef v_pergroupp;
+ LLVMValueRef v_pergroup_allaggs;
LLVMValueRef v_setoff,
v_transno;
@@ -2082,11 +2084,32 @@ llvm_compile_expr(ExprState *state)
"aggstate.all_pergroups");
v_setoff = l_int32_const(op->d.agg_init_trans.setoff);
v_transno = l_int32_const(op->d.agg_init_trans.transno);
- v_pergroupp =
- LLVMBuildGEP(b,
- l_load_gep1(b, v_allpergroupsp, v_setoff, ""),
- &v_transno, 1, "");
+ v_pergroup_allaggs = l_load_gep1(b, v_allpergroupsp, v_setoff, "");
+ /*
+ * When no tuples at all have spilled, we avoid adding this
+ * extra branch. But after some tuples have spilled, this
+ * branch is necessary, so we recompile the expression
+ * using a new opcode.
+ */
+ if (opcode == EEOP_AGG_INIT_TRANS_SPILLED)
+ {
+ LLVMBasicBlockRef b_check_notransvalue = l_bb_before_v(
+ opblocks[opno + 1], "op.%d.check_notransvalue", opno);
+
+ LLVMBuildCondBr(
+ b,
+ LLVMBuildICmp(b, LLVMIntEQ,
+ LLVMBuildPtrToInt(
+ b, v_pergroup_allaggs, TypeSizeT, ""),
+ l_sizet_const(0), ""),
+ opblocks[opno + 1],
+ b_check_notransvalue);
+
+ LLVMPositionBuilderAtEnd(b, b_check_notransvalue);
+ }
+
+ v_pergroupp = LLVMBuildGEP(b, v_pergroup_allaggs, &v_transno, 1, "");
v_notransvalue =
l_load_struct_gep(b, v_pergroupp,
FIELDNO_AGGSTATEPERGROUPDATA_NOTRANSVALUE,
@@ -2143,6 +2166,7 @@ llvm_compile_expr(ExprState *state)
}
case EEOP_AGG_STRICT_TRANS_CHECK:
+ case EEOP_AGG_STRICT_TRANS_CHECK_SPILLED:
{
LLVMValueRef v_setoff,
v_transno;
@@ -2152,6 +2176,7 @@ llvm_compile_expr(ExprState *state)
LLVMValueRef v_transnull;
LLVMValueRef v_pergroupp;
+ LLVMValueRef v_pergroup_allaggs;
int jumpnull = op->d.agg_strict_trans_check.jumpnull;
@@ -2171,11 +2196,32 @@ llvm_compile_expr(ExprState *state)
l_int32_const(op->d.agg_strict_trans_check.setoff);
v_transno =
l_int32_const(op->d.agg_strict_trans_check.transno);
- v_pergroupp =
- LLVMBuildGEP(b,
- l_load_gep1(b, v_allpergroupsp, v_setoff, ""),
- &v_transno, 1, "");
+ v_pergroup_allaggs = l_load_gep1(b, v_allpergroupsp, v_setoff, "");
+
+ /*
+ * When no tuples at all have spilled, we avoid adding this
+ * extra branch. But after some tuples have spilled, this
+ * branch is necessary, so we recompile the expression
+ * using a new opcode.
+ */
+ if (opcode == EEOP_AGG_STRICT_TRANS_CHECK_SPILLED)
+ {
+ LLVMBasicBlockRef b_check_transnull = l_bb_before_v(
+ opblocks[opno + 1], "op.%d.check_transnull", opno);
+
+ LLVMBuildCondBr(
+ b,
+ LLVMBuildICmp(b, LLVMIntEQ,
+ LLVMBuildPtrToInt(b, v_pergroup_allaggs,
+ TypeSizeT, ""),
+ l_sizet_const(0), ""),
+ opblocks[jumpnull],
+ b_check_transnull);
+
+ LLVMPositionBuilderAtEnd(b, b_check_transnull);
+ }
+ v_pergroupp = LLVMBuildGEP(b, v_pergroup_allaggs, &v_transno, 1, "");
v_transnull =
l_load_struct_gep(b, v_pergroupp,
FIELDNO_AGGSTATEPERGROUPDATA_TRANSVALUEISNULL,
@@ -2191,7 +2237,9 @@ llvm_compile_expr(ExprState *state)
}
case EEOP_AGG_PLAIN_TRANS_BYVAL:
+ case EEOP_AGG_PLAIN_TRANS_BYVAL_SPILLED:
case EEOP_AGG_PLAIN_TRANS:
+ case EEOP_AGG_PLAIN_TRANS_SPILLED:
{
AggState *aggstate;
AggStatePerTrans pertrans;
@@ -2217,6 +2265,7 @@ llvm_compile_expr(ExprState *state)
LLVMValueRef v_pertransp;
LLVMValueRef v_pergroupp;
+ LLVMValueRef v_pergroup_allaggs;
LLVMValueRef v_retval;
@@ -2244,10 +2293,33 @@ llvm_compile_expr(ExprState *state)
"aggstate.all_pergroups");
v_setoff = l_int32_const(op->d.agg_trans.setoff);
v_transno = l_int32_const(op->d.agg_trans.transno);
- v_pergroupp =
- LLVMBuildGEP(b,
- l_load_gep1(b, v_allpergroupsp, v_setoff, ""),
- &v_transno, 1, "");
+ v_pergroup_allaggs = l_load_gep1(b, v_allpergroupsp, v_setoff, "");
+
+ /*
+ * When no tuples at all have spilled, we avoid adding this
+ * extra branch. But after some tuples have spilled, this
+ * branch is necessary, so we recompile the expression
+ * using a new opcode.
+ */
+ if (opcode == EEOP_AGG_PLAIN_TRANS_BYVAL_SPILLED ||
+ opcode == EEOP_AGG_PLAIN_TRANS_SPILLED)
+ {
+ LLVMBasicBlockRef b_advance_transval = l_bb_before_v(
+ opblocks[opno + 1], "op.%d.advance_transval", opno);
+
+ LLVMBuildCondBr(
+ b,
+ LLVMBuildICmp(b, LLVMIntEQ,
+ LLVMBuildPtrToInt(b, v_pergroup_allaggs,
+ TypeSizeT, ""),
+ l_sizet_const(0), ""),
+ opblocks[opno + 1],
+ b_advance_transval);
+
+ LLVMPositionBuilderAtEnd(b, b_advance_transval);
+ }
+
+ v_pergroupp = LLVMBuildGEP(b, v_pergroup_allaggs, &v_transno, 1, "");
v_fcinfo = l_ptr_const(fcinfo,
l_ptr(StructFunctionCallInfoData));
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index b5a0033721f..8d58780bf6a 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -77,6 +77,7 @@
#include "access/htup_details.h"
#include "access/tsmapi.h"
#include "executor/executor.h"
+#include "executor/nodeAgg.h"
#include "executor/nodeHash.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
@@ -128,6 +129,7 @@ bool enable_bitmapscan = true;
bool enable_tidscan = true;
bool enable_sort = true;
bool enable_hashagg = true;
+bool enable_hashagg_spill = true;
bool enable_nestloop = true;
bool enable_material = true;
bool enable_mergejoin = true;
@@ -2153,7 +2155,7 @@ cost_agg(Path *path, PlannerInfo *root,
int numGroupCols, double numGroups,
List *quals,
Cost input_startup_cost, Cost input_total_cost,
- double input_tuples)
+ double input_tuples, double input_width)
{
double output_tuples;
Cost startup_cost;
@@ -2219,21 +2221,88 @@ cost_agg(Path *path, PlannerInfo *root,
total_cost += aggcosts->finalCost.per_tuple * numGroups;
total_cost += cpu_tuple_cost * numGroups;
output_tuples = numGroups;
+
+ /*
+ * We don't need to compute the disk costs of hash aggregation here,
+ * because the planner does not choose hash aggregation for grouping
+ * sets that it doesn't expect to fit in memory.
+ */
}
else
{
+ double pages_written = 0.0;
+ double pages_read = 0.0;
+ double hashentrysize;
+ double nbatches;
+ Size mem_limit;
+ long ngroups_limit;
+ int num_partitions;
+
/* must be AGG_HASHED */
startup_cost = input_total_cost;
if (!enable_hashagg)
startup_cost += disable_cost;
startup_cost += aggcosts->transCost.startup;
startup_cost += aggcosts->transCost.per_tuple * input_tuples;
+ /* cost of computing hash value */
startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
startup_cost += aggcosts->finalCost.startup;
+
total_cost = startup_cost;
total_cost += aggcosts->finalCost.per_tuple * numGroups;
+ /* cost of retrieving from hash table */
total_cost += cpu_tuple_cost * numGroups;
output_tuples = numGroups;
+
+ /*
+ * Estimate number of batches based on the computed limits. If less
+ * than or equal to one, all groups are expected to fit in memory;
+ * otherwise we expect to spill.
+ */
+ hashentrysize = hash_agg_entry_size(
+ aggcosts->numAggs, input_width, aggcosts->transitionSpace);
+ hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
+ &ngroups_limit, &num_partitions);
+
+ nbatches = Max( (numGroups * hashentrysize) / mem_limit,
+ numGroups / ngroups_limit );
+
+ /*
+ * Estimate number of pages read and written. For each level of
+ * recursion, a tuple must be written and then later read.
+ */
+ if (!hashagg_mem_overflow && nbatches > 1.0)
+ {
+ double depth;
+ double pages;
+
+ pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
+
+ /*
+ * The number of partitions can change at different levels of
+ * recursion; but for the purposes of this calculation assume it
+ * stays constant.
+ */
+ depth = ceil( log(nbatches - 1) / log(num_partitions) );
+ pages_written = pages_read = pages * depth;
+ }
+
+ /*
+ * Add the disk costs of hash aggregation that spills to disk.
+ *
+ * Groups that go into the hash table stay in memory until finalized,
+ * so spilling and reprocessing tuples doesn't incur additional
+ * invocations of transCost or finalCost. Furthermore, the computed
+ * hash value is stored with the spilled tuples, so we don't incur
+ * extra invocations of the hash function.
+ *
+ * Hash Agg begins returning tuples after the first batch is
+ * complete. Accrue writes (spilled tuples) to startup_cost and to
+ * total_cost; accrue reads only to total_cost.
+ */
+ startup_cost += pages_written * random_page_cost;
+ total_cost += pages_written * random_page_cost;
+ total_cost += pages_read * seq_page_cost;
}
/*
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index e048d200bb4..090919e39a0 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -1644,6 +1644,7 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags)
NIL,
NIL,
best_path->path.rows,
+ 0,
subplan);
}
else
@@ -2096,6 +2097,7 @@ create_agg_plan(PlannerInfo *root, AggPath *best_path)
NIL,
NIL,
best_path->numGroups,
+ best_path->transitionSpace,
subplan);
copy_generic_path_info(&plan->plan, (Path *) best_path);
@@ -2257,6 +2259,7 @@ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path)
rollup->gsets,
NIL,
rollup->numGroups,
+ best_path->transitionSpace,
sort_plan);
/*
@@ -2295,6 +2298,7 @@ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path)
rollup->gsets,
chain,
rollup->numGroups,
+ best_path->transitionSpace,
subplan);
/* Copy cost data from Path to Plan */
@@ -6192,8 +6196,8 @@ Agg *
make_agg(List *tlist, List *qual,
AggStrategy aggstrategy, AggSplit aggsplit,
int numGroupCols, AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations,
- List *groupingSets, List *chain,
- double dNumGroups, Plan *lefttree)
+ List *groupingSets, List *chain, double dNumGroups,
+ int32 transitionSpace, Plan *lefttree)
{
Agg *node = makeNode(Agg);
Plan *plan = &node->plan;
@@ -6209,6 +6213,7 @@ make_agg(List *tlist, List *qual,
node->grpOperators = grpOperators;
node->grpCollations = grpCollations;
node->numGroups = numGroups;
+ node->transitionSpace = transitionSpace;
node->aggParams = NULL; /* SS_finalize_plan() will fill this */
node->groupingSets = groupingSets;
node->chain = chain;
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index b44efd6314c..913ad9335e5 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -6528,7 +6528,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
* were unable to sort above, then we'd better generate a Path, so
* that we at least have one.
*/
- if (hashaggtablesize < work_mem * 1024L ||
+ if (enable_hashagg_spill ||
+ hashaggtablesize < work_mem * 1024L ||
grouped_rel->pathlist == NIL)
{
/*
@@ -6561,7 +6562,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
agg_final_costs,
dNumGroups);
- if (hashaggtablesize < work_mem * 1024L)
+ if (enable_hashagg_spill ||
+ hashaggtablesize < work_mem * 1024L)
add_path(grouped_rel, (Path *)
create_agg_path(root,
grouped_rel,
@@ -6830,7 +6832,7 @@ create_partial_grouping_paths(PlannerInfo *root,
* Tentatively produce a partial HashAgg Path, depending on if it
* looks as if the hash table will fit in work_mem.
*/
- if (hashaggtablesize < work_mem * 1024L &&
+ if ((enable_hashagg_spill || hashaggtablesize < work_mem * 1024L) &&
cheapest_total_path != NULL)
{
add_path(partially_grouped_rel, (Path *)
@@ -6857,7 +6859,7 @@ create_partial_grouping_paths(PlannerInfo *root,
dNumPartialPartialGroups);
/* Do the same for partial paths. */
- if (hashaggtablesize < work_mem * 1024L &&
+ if ((enable_hashagg_spill || hashaggtablesize < work_mem * 1024L) &&
cheapest_partial_path != NULL)
{
add_partial_path(partially_grouped_rel, (Path *)
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 1a23e18970d..951aed80e7a 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -1072,7 +1072,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
numGroupCols, dNumGroups,
NIL,
input_path->startup_cost, input_path->total_cost,
- input_path->rows);
+ input_path->rows, input_path->pathtarget->width);
/*
* Now for the sorted case. Note that the input is *always* unsorted,
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index e6d08aede56..8ba8122ee2f 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1704,7 +1704,8 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
NIL,
subpath->startup_cost,
subpath->total_cost,
- rel->rows);
+ rel->rows,
+ subpath->pathtarget->width);
}
if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
@@ -2949,6 +2950,7 @@ create_agg_path(PlannerInfo *root,
pathnode->aggstrategy = aggstrategy;
pathnode->aggsplit = aggsplit;
pathnode->numGroups = numGroups;
+ pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
pathnode->groupClause = groupClause;
pathnode->qual = qual;
@@ -2957,7 +2959,7 @@ create_agg_path(PlannerInfo *root,
list_length(groupClause), numGroups,
qual,
subpath->startup_cost, subpath->total_cost,
- subpath->rows);
+ subpath->rows, subpath->pathtarget->width);
/* add tlist eval cost for each output row */
pathnode->path.startup_cost += target->cost.startup;
@@ -3036,6 +3038,7 @@ create_groupingsets_path(PlannerInfo *root,
pathnode->aggstrategy = aggstrategy;
pathnode->rollups = rollups;
pathnode->qual = having_qual;
+ pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
Assert(rollups != NIL);
Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
@@ -3067,7 +3070,8 @@ create_groupingsets_path(PlannerInfo *root,
having_qual,
subpath->startup_cost,
subpath->total_cost,
- subpath->rows);
+ subpath->rows,
+ subpath->pathtarget->width);
is_first = false;
if (!rollup->is_hashed)
is_first_sort = false;
@@ -3090,7 +3094,8 @@ create_groupingsets_path(PlannerInfo *root,
rollup->numGroups,
having_qual,
0.0, 0.0,
- subpath->rows);
+ subpath->rows,
+ subpath->pathtarget->width);
if (!rollup->is_hashed)
is_first_sort = false;
}
@@ -3115,7 +3120,8 @@ create_groupingsets_path(PlannerInfo *root,
having_qual,
sort_path.startup_cost,
sort_path.total_cost,
- sort_path.rows);
+ sort_path.rows,
+ subpath->pathtarget->width);
}
pathnode->path.total_cost += agg_path.total_cost;
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index eb196444198..1151b807418 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -120,6 +120,7 @@ bool enableFsync = true;
bool allowSystemTableMods = false;
int work_mem = 1024;
int maintenance_work_mem = 16384;
+bool hashagg_mem_overflow = false;
int max_parallel_maintenance_workers = 2;
/*
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 8228e1f3903..ed6737a8ac9 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -998,6 +998,26 @@ static struct config_bool ConfigureNamesBool[] =
true,
NULL, NULL, NULL
},
+ {
+ {"enable_hashagg_spill", PGC_USERSET, QUERY_TUNING_METHOD,
+ gettext_noop("Enables the planner's use of hashed aggregation plans that are expected to exceed work_mem."),
+ NULL,
+ GUC_EXPLAIN
+ },
+ &enable_hashagg_spill,
+ true,
+ NULL, NULL, NULL
+ },
+ {
+ {"hashagg_mem_overflow", PGC_USERSET, QUERY_TUNING_METHOD,
+ gettext_noop("Enables hashed aggregation to overflow work_mem at execution time."),
+ NULL,
+ GUC_EXPLAIN
+ },
+ &hashagg_mem_overflow,
+ false,
+ NULL, NULL, NULL
+ },
{
{"enable_material", PGC_USERSET, QUERY_TUNING_METHOD,
gettext_noop("Enables the planner's use of materialization."),
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index 4f78b55fbaf..36104a73a75 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -201,6 +201,7 @@ static long ltsGetFreeBlock(LogicalTapeSet *lts);
static void ltsReleaseBlock(LogicalTapeSet *lts, long blocknum);
static void ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared,
SharedFileSet *fileset);
+static void ltsInitTape(LogicalTape *lt);
static void ltsInitReadBuffer(LogicalTapeSet *lts, LogicalTape *lt);
@@ -536,6 +537,30 @@ ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared,
lts->nHoleBlocks = lts->nBlocksAllocated - nphysicalblocks;
}
+/*
+ * Initialize per-tape struct. Note we allocate the I/O buffer and the first
+ * block for a tape only when it is first actually written to. This avoids
+ * wasting memory space when tuplesort.c overestimates the number of tapes
+ * needed.
+ */
+static void
+ltsInitTape(LogicalTape *lt)
+{
+ lt->writing = true;
+ lt->frozen = false;
+ lt->dirty = false;
+ lt->firstBlockNumber = -1L;
+ lt->curBlockNumber = -1L;
+ lt->nextBlockNumber = -1L;
+ lt->offsetBlockNumber = 0L;
+ lt->buffer = NULL;
+ lt->buffer_size = 0;
+ /* palloc() larger than MaxAllocSize would fail */
+ lt->max_size = MaxAllocSize;
+ lt->pos = 0;
+ lt->nbytes = 0;
+}
+
/*
* Lazily allocate and initialize the read buffer. This avoids waste when many
* tapes are open at once, but not all are active between rewinding and
@@ -579,7 +604,6 @@ LogicalTapeSetCreate(int ntapes, TapeShare *shared, SharedFileSet *fileset,
int worker)
{
LogicalTapeSet *lts;
- LogicalTape *lt;
int i;
/*
@@ -597,29 +621,8 @@ LogicalTapeSetCreate(int ntapes, TapeShare *shared, SharedFileSet *fileset,
lts->nFreeBlocks = 0;
lts->nTapes = ntapes;
- /*
- * Initialize per-tape structs. Note we allocate the I/O buffer and the
- * first block for a tape only when it is first actually written to. This
- * avoids wasting memory space when tuplesort.c overestimates the number
- * of tapes needed.
- */
for (i = 0; i < ntapes; i++)
- {
- lt = <s->tapes[i];
- lt->writing = true;
- lt->frozen = false;
- lt->dirty = false;
- lt->firstBlockNumber = -1L;
- lt->curBlockNumber = -1L;
- lt->nextBlockNumber = -1L;
- lt->offsetBlockNumber = 0L;
- lt->buffer = NULL;
- lt->buffer_size = 0;
- /* palloc() larger than MaxAllocSize would fail */
- lt->max_size = MaxAllocSize;
- lt->pos = 0;
- lt->nbytes = 0;
- }
+ ltsInitTape(<s->tapes[i]);
/*
* Create temp BufFile storage as required.
@@ -1004,6 +1007,29 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum, TapeShare *share)
}
}
+/*
+ * Add additional tapes to this tape set. Not intended to be used when any
+ * tapes are frozen.
+ */
+LogicalTapeSet *
+LogicalTapeSetExtend(LogicalTapeSet *lts, int nAdditional)
+{
+ int i;
+ int nTapesOrig = lts->nTapes;
+ Size newSize;
+
+ lts->nTapes += nAdditional;
+ newSize = offsetof(LogicalTapeSet, tapes) +
+ lts->nTapes * sizeof(LogicalTape);
+
+ lts = (LogicalTapeSet *) repalloc(lts, newSize);
+
+ for (i = nTapesOrig; i < lts->nTapes; i++)
+ ltsInitTape(<s->tapes[i]);
+
+ return lts;
+}
+
/*
* Backspace the tape a given number of bytes. (We also support a more
* general seek interface, see below.)
diff --git a/src/include/executor/execExpr.h b/src/include/executor/execExpr.h
index 73a2ca8c6dd..d70bc048c46 100644
--- a/src/include/executor/execExpr.h
+++ b/src/include/executor/execExpr.h
@@ -226,9 +226,13 @@ typedef enum ExprEvalOp
EEOP_AGG_STRICT_INPUT_CHECK_ARGS,
EEOP_AGG_STRICT_INPUT_CHECK_NULLS,
EEOP_AGG_INIT_TRANS,
+ EEOP_AGG_INIT_TRANS_SPILLED,
EEOP_AGG_STRICT_TRANS_CHECK,
+ EEOP_AGG_STRICT_TRANS_CHECK_SPILLED,
EEOP_AGG_PLAIN_TRANS_BYVAL,
+ EEOP_AGG_PLAIN_TRANS_BYVAL_SPILLED,
EEOP_AGG_PLAIN_TRANS,
+ EEOP_AGG_PLAIN_TRANS_SPILLED,
EEOP_AGG_ORDERED_TRANS_DATUM,
EEOP_AGG_ORDERED_TRANS_TUPLE,
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 81fdfa4add3..d6eb2abb60b 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -255,7 +255,7 @@ extern ExprState *ExecInitQual(List *qual, PlanState *parent);
extern ExprState *ExecInitCheck(List *qual, PlanState *parent);
extern List *ExecInitExprList(List *nodes, PlanState *parent);
extern ExprState *ExecBuildAggTrans(AggState *aggstate, struct AggStatePerPhaseData *phase,
- bool doSort, bool doHash);
+ bool doSort, bool doHash, bool spilled);
extern ExprState *ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc,
const TupleTableSlotOps *lops, const TupleTableSlotOps *rops,
int numCols,
diff --git a/src/include/executor/nodeAgg.h b/src/include/executor/nodeAgg.h
index 264916f9a92..307987a45ab 100644
--- a/src/include/executor/nodeAgg.h
+++ b/src/include/executor/nodeAgg.h
@@ -311,5 +311,8 @@ extern void ExecReScanAgg(AggState *node);
extern Size hash_agg_entry_size(int numAggs, Size tupleWidth,
Size transitionSpace);
+extern void hash_agg_set_limits(double hashentrysize, uint64 input_groups,
+ int used_bits, Size *mem_limit,
+ long *ngroups_limit, int *num_partitions);
#endif /* NODEAGG_H */
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index f985453ec32..707a07a2de4 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -244,6 +244,7 @@ extern bool enableFsync;
extern PGDLLIMPORT bool allowSystemTableMods;
extern PGDLLIMPORT int work_mem;
extern PGDLLIMPORT int maintenance_work_mem;
+extern PGDLLIMPORT bool hashagg_mem_overflow;
extern PGDLLIMPORT int max_parallel_maintenance_workers;
extern int VacuumCostPageHit;
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index cd3ddf781f1..19b9cef42f6 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -2078,13 +2078,32 @@ typedef struct AggState
HeapTuple grp_firstTuple; /* copy of first tuple of current group */
/* these fields are used in AGG_HASHED and AGG_MIXED modes: */
bool table_filled; /* hash table filled yet? */
- int num_hashes;
+ int num_hashes; /* number of hash tables active at once */
+ double hashentrysize; /* estimate revised during execution */
+ struct HashTapeInfo *hash_tapeinfo; /* metadata for spill tapes */
+ struct HashAggSpill *hash_spills; /* HashAggSpill for each hash table,
+ exists only during first pass if spilled */
+ TupleTableSlot *hash_spill_slot; /* slot for reading from spill files */
+ bool hash_ever_spilled; /* ever spilled during this execution? */
+ bool hash_spill_mode; /* we hit a limit during the current batch
+ and we must not create new groups */
+ Size hash_alloc_last; /* previous total memory allocation */
+ Size hash_alloc_current; /* current total memory allocation */
+ Size hash_mem_limit; /* limit before spilling hash table */
+ Size hash_mem_peak; /* peak hash table memory usage */
+ long hash_ngroups_current; /* number of groups currently in
+ memory in all hash tables */
+ long hash_ngroups_limit; /* limit before spilling hash table */
+ long hash_disk_used; /* kB of disk space used */
+ int hash_batches_used; /* batches used during entire execution */
+ List *hash_batches; /* hash batches remaining to be processed */
+
AggStatePerHash perhash; /* array of per-hashtable data */
AggStatePerGroup *hash_pergroup; /* grouping set indexed array of
* per-group pointers */
/* support for evaluation of agg input expressions: */
-#define FIELDNO_AGGSTATE_ALL_PERGROUPS 34
+#define FIELDNO_AGGSTATE_ALL_PERGROUPS 49
AggStatePerGroup *all_pergroups; /* array of first ->pergroups, than
* ->hash_pergroup */
ProjectionInfo *combinedproj; /* projection machinery */
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index 3d3be197e0e..be592d0fee4 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -1663,6 +1663,7 @@ typedef struct AggPath
AggStrategy aggstrategy; /* basic strategy, see nodes.h */
AggSplit aggsplit; /* agg-splitting mode, see nodes.h */
double numGroups; /* estimated number of groups in input */
+ int32 transitionSpace; /* estimated transition state size */
List *groupClause; /* a list of SortGroupClause's */
List *qual; /* quals (HAVING quals), if any */
} AggPath;
@@ -1700,6 +1701,7 @@ typedef struct GroupingSetsPath
AggStrategy aggstrategy; /* basic strategy */
List *rollups; /* list of RollupData */
List *qual; /* quals (HAVING quals), if any */
+ int32 transitionSpace; /* estimated transition state size */
} GroupingSetsPath;
/*
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 32c0d87f80e..f4183e1efa5 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -813,6 +813,7 @@ typedef struct Agg
Oid *grpOperators; /* equality operators to compare with */
Oid *grpCollations;
long numGroups; /* estimated number of groups in input */
+ int32 transitionSpace; /* estimated transition state size */
Bitmapset *aggParams; /* IDs of Params used in Aggref inputs */
/* Note: planner provides numGroups & aggParams only in HASHED/MIXED case */
List *groupingSets; /* grouping sets to use */
diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h
index cb012ba1980..6572dc24699 100644
--- a/src/include/optimizer/cost.h
+++ b/src/include/optimizer/cost.h
@@ -54,6 +54,7 @@ extern PGDLLIMPORT bool enable_bitmapscan;
extern PGDLLIMPORT bool enable_tidscan;
extern PGDLLIMPORT bool enable_sort;
extern PGDLLIMPORT bool enable_hashagg;
+extern PGDLLIMPORT bool enable_hashagg_spill;
extern PGDLLIMPORT bool enable_nestloop;
extern PGDLLIMPORT bool enable_material;
extern PGDLLIMPORT bool enable_mergejoin;
@@ -114,7 +115,7 @@ extern void cost_agg(Path *path, PlannerInfo *root,
int numGroupCols, double numGroups,
List *quals,
Cost input_startup_cost, Cost input_total_cost,
- double input_tuples);
+ double input_tuples, double input_width);
extern void cost_windowagg(Path *path, PlannerInfo *root,
List *windowFuncs, int numPartCols, int numOrderCols,
Cost input_startup_cost, Cost input_total_cost,
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index eab486a6214..c7bda2b0917 100644
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -54,8 +54,8 @@ extern Sort *make_sort_from_sortclauses(List *sortcls, Plan *lefttree);
extern Agg *make_agg(List *tlist, List *qual,
AggStrategy aggstrategy, AggSplit aggsplit,
int numGroupCols, AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations,
- List *groupingSets, List *chain,
- double dNumGroups, Plan *lefttree);
+ List *groupingSets, List *chain, double dNumGroups,
+ int32 transitionSpace, Plan *lefttree);
extern Limit *make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount);
/*
diff --git a/src/include/utils/logtape.h b/src/include/utils/logtape.h
index 695d2c00ee4..3ebe52239f8 100644
--- a/src/include/utils/logtape.h
+++ b/src/include/utils/logtape.h
@@ -67,6 +67,8 @@ extern void LogicalTapeRewindForRead(LogicalTapeSet *lts, int tapenum,
extern void LogicalTapeRewindForWrite(LogicalTapeSet *lts, int tapenum);
extern void LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum,
TapeShare *share);
+extern LogicalTapeSet *LogicalTapeSetExtend(LogicalTapeSet *lts,
+ int nAdditional);
extern size_t LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum,
size_t size);
extern void LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index f457b5b150f..7eeeaaa5e4a 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -2357,3 +2357,124 @@ explain (costs off)
-> Seq Scan on onek
(8 rows)
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+set work_mem='64kB';
+-- Produce results with sorting.
+set enable_hashagg = false;
+set jit_above_cost = 0;
+explain (costs off)
+select g%100000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 199999) g
+ group by g%100000;
+ QUERY PLAN
+------------------------------------------------
+ GroupAggregate
+ Group Key: ((g % 100000))
+ -> Sort
+ Sort Key: ((g % 100000))
+ -> Function Scan on generate_series g
+(5 rows)
+
+create table agg_group_1 as
+select g%100000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 199999) g
+ group by g%100000;
+create table agg_group_2 as
+select * from
+ (values (100), (300), (500)) as r(a),
+ lateral (
+ select (g/2)::numeric as c1,
+ array_agg(g::numeric) as c2,
+ count(*) as c3
+ from generate_series(0, 1999) g
+ where g < r.a
+ group by g/2) as s;
+set jit_above_cost to default;
+create table agg_group_3 as
+select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
+ from generate_series(0, 1999) g
+ group by g/2;
+create table agg_group_4 as
+select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 1999) g
+ group by g/2;
+-- Produce results with hash aggregation
+set enable_hashagg = true;
+set enable_sort = false;
+set jit_above_cost = 0;
+explain (costs off)
+select g%100000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 199999) g
+ group by g%100000;
+ QUERY PLAN
+------------------------------------------
+ HashAggregate
+ Group Key: (g % 100000)
+ -> Function Scan on generate_series g
+(3 rows)
+
+create table agg_hash_1 as
+select g%100000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 199999) g
+ group by g%100000;
+create table agg_hash_2 as
+select * from
+ (values (100), (300), (500)) as r(a),
+ lateral (
+ select (g/2)::numeric as c1,
+ array_agg(g::numeric) as c2,
+ count(*) as c3
+ from generate_series(0, 1999) g
+ where g < r.a
+ group by g/2) as s;
+set jit_above_cost to default;
+create table agg_hash_3 as
+select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
+ from generate_series(0, 1999) g
+ group by g/2;
+create table agg_hash_4 as
+select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 1999) g
+ group by g/2;
+set enable_sort = true;
+set work_mem to default;
+-- Compare group aggregation results to hash aggregation results
+(select * from agg_hash_1 except select * from agg_group_1)
+ union all
+(select * from agg_group_1 except select * from agg_hash_1);
+ c1 | c2 | c3
+----+----+----
+(0 rows)
+
+(select * from agg_hash_2 except select * from agg_group_2)
+ union all
+(select * from agg_group_2 except select * from agg_hash_2);
+ a | c1 | c2 | c3
+---+----+----+----
+(0 rows)
+
+(select * from agg_hash_3 except select * from agg_group_3)
+ union all
+(select * from agg_group_3 except select * from agg_hash_3);
+ c1 | c2 | c3
+----+----+----
+(0 rows)
+
+(select * from agg_hash_4 except select * from agg_group_4)
+ union all
+(select * from agg_group_4 except select * from agg_hash_4);
+ c1 | c2 | c3
+----+----+----
+(0 rows)
+
+drop table agg_group_1;
+drop table agg_group_2;
+drop table agg_group_3;
+drop table agg_group_4;
+drop table agg_hash_1;
+drop table agg_hash_2;
+drop table agg_hash_3;
+drop table agg_hash_4;
diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out
index c1f802c88a7..767f60a96c7 100644
--- a/src/test/regress/expected/groupingsets.out
+++ b/src/test/regress/expected/groupingsets.out
@@ -1633,4 +1633,127 @@ select v||'a', case when grouping(v||'a') = 1 then 1 else 0 end, count(*)
| 1 | 2
(4 rows)
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+SET work_mem='64kB';
+-- Produce results with sorting.
+set enable_hashagg = false;
+set jit_above_cost = 0;
+explain (costs off)
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g%1000 as g1000, g%100 as g100, g%10 as g10, g
+ from generate_series(0,199999) g) s
+group by cube (g1000,g100,g10);
+ QUERY PLAN
+---------------------------------------------------------------
+ GroupAggregate
+ Group Key: ((g.g % 1000)), ((g.g % 100)), ((g.g % 10))
+ Group Key: ((g.g % 1000)), ((g.g % 100))
+ Group Key: ((g.g % 1000))
+ Group Key: ()
+ Sort Key: ((g.g % 100)), ((g.g % 10))
+ Group Key: ((g.g % 100)), ((g.g % 10))
+ Group Key: ((g.g % 100))
+ Sort Key: ((g.g % 10)), ((g.g % 1000))
+ Group Key: ((g.g % 10)), ((g.g % 1000))
+ Group Key: ((g.g % 10))
+ -> Sort
+ Sort Key: ((g.g % 1000)), ((g.g % 100)), ((g.g % 10))
+ -> Function Scan on generate_series g
+(14 rows)
+
+create table gs_group_1 as
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g%1000 as g1000, g%100 as g100, g%10 as g10, g
+ from generate_series(0,199999) g) s
+group by cube (g1000,g100,g10);
+set jit_above_cost to default;
+create table gs_group_2 as
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g/20 as g1000, g/200 as g100, g/2000 as g10, g
+ from generate_series(0,19999) g) s
+group by cube (g1000,g100,g10);
+create table gs_group_3 as
+select g100, g10, array_agg(g) as a, count(*) as c, max(g::text) as m from
+ (select g/200 as g100, g/2000 as g10, g
+ from generate_series(0,19999) g) s
+group by grouping sets (g100,g10);
+-- Produce results with hash aggregation.
+set enable_hashagg = true;
+set enable_sort = false;
+set work_mem='64kB';
+set jit_above_cost = 0;
+explain (costs off)
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g%1000 as g1000, g%100 as g100, g%10 as g10, g
+ from generate_series(0,199999) g) s
+group by cube (g1000,g100,g10);
+ QUERY PLAN
+---------------------------------------------------------------
+ GroupAggregate
+ Group Key: ((g.g % 1000)), ((g.g % 100)), ((g.g % 10))
+ Group Key: ((g.g % 1000)), ((g.g % 100))
+ Group Key: ((g.g % 1000))
+ Group Key: ()
+ Sort Key: ((g.g % 100)), ((g.g % 10))
+ Group Key: ((g.g % 100)), ((g.g % 10))
+ Group Key: ((g.g % 100))
+ Sort Key: ((g.g % 10)), ((g.g % 1000))
+ Group Key: ((g.g % 10)), ((g.g % 1000))
+ Group Key: ((g.g % 10))
+ -> Sort
+ Sort Key: ((g.g % 1000)), ((g.g % 100)), ((g.g % 10))
+ -> Function Scan on generate_series g
+(14 rows)
+
+create table gs_hash_1 as
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g%1000 as g1000, g%100 as g100, g%10 as g10, g
+ from generate_series(0,199999) g) s
+group by cube (g1000,g100,g10);
+set jit_above_cost to default;
+create table gs_hash_2 as
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g/20 as g1000, g/200 as g100, g/2000 as g10, g
+ from generate_series(0,19999) g) s
+group by cube (g1000,g100,g10);
+create table gs_hash_3 as
+select g100, g10, array_agg(g) as a, count(*) as c, max(g::text) as m from
+ (select g/200 as g100, g/2000 as g10, g
+ from generate_series(0,19999) g) s
+group by grouping sets (g100,g10);
+set enable_sort = true;
+set work_mem to default;
+-- Compare results
+(select * from gs_hash_1 except select * from gs_group_1)
+ union all
+(select * from gs_group_1 except select * from gs_hash_1);
+ g1000 | g100 | g10 | sum | count | max
+-------+------+-----+-----+-------+-----
+(0 rows)
+
+(select * from gs_hash_2 except select * from gs_group_2)
+ union all
+(select * from gs_group_2 except select * from gs_hash_2);
+ g1000 | g100 | g10 | sum | count | max
+-------+------+-----+-----+-------+-----
+(0 rows)
+
+(select g100,g10,unnest(a),c,m from gs_hash_3 except
+ select g100,g10,unnest(a),c,m from gs_group_3)
+ union all
+(select g100,g10,unnest(a),c,m from gs_group_3 except
+ select g100,g10,unnest(a),c,m from gs_hash_3);
+ g100 | g10 | unnest | c | m
+------+-----+--------+---+---
+(0 rows)
+
+drop table gs_group_1;
+drop table gs_group_2;
+drop table gs_group_3;
+drop table gs_hash_1;
+drop table gs_hash_2;
+drop table gs_hash_3;
-- end
diff --git a/src/test/regress/expected/select_distinct.out b/src/test/regress/expected/select_distinct.out
index f3696c6d1de..11c6f50fbfa 100644
--- a/src/test/regress/expected/select_distinct.out
+++ b/src/test/regress/expected/select_distinct.out
@@ -148,6 +148,68 @@ SELECT count(*) FROM
4
(1 row)
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+SET work_mem='64kB';
+-- Produce results with sorting.
+SET enable_hashagg=FALSE;
+SET jit_above_cost=0;
+EXPLAIN (costs off)
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+ QUERY PLAN
+------------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: ((g % 1000))
+ -> Function Scan on generate_series g
+(4 rows)
+
+CREATE TABLE distinct_group_1 AS
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+SET jit_above_cost TO DEFAULT;
+CREATE TABLE distinct_group_2 AS
+SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
+SET enable_hashagg=TRUE;
+-- Produce results with hash aggregation.
+SET enable_sort=FALSE;
+SET jit_above_cost=0;
+EXPLAIN (costs off)
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+ QUERY PLAN
+------------------------------------------
+ HashAggregate
+ Group Key: (g % 1000)
+ -> Function Scan on generate_series g
+(3 rows)
+
+CREATE TABLE distinct_hash_1 AS
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+SET jit_above_cost TO DEFAULT;
+CREATE TABLE distinct_hash_2 AS
+SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
+SET enable_sort=TRUE;
+SET work_mem TO DEFAULT;
+-- Compare results
+(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
+ UNION ALL
+(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
+ ?column?
+----------
+(0 rows)
+
+(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
+ UNION ALL
+(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
+ ?column?
+----------
+(0 rows)
+
+DROP TABLE distinct_hash_1;
+DROP TABLE distinct_hash_2;
+DROP TABLE distinct_group_1;
+DROP TABLE distinct_group_2;
--
-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
-- very own regression file.
diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out
index a1c90eb9057..c40bf6c16eb 100644
--- a/src/test/regress/expected/sysviews.out
+++ b/src/test/regress/expected/sysviews.out
@@ -75,6 +75,7 @@ select name, setting from pg_settings where name like 'enable%';
enable_bitmapscan | on
enable_gathermerge | on
enable_hashagg | on
+ enable_hashagg_spill | on
enable_hashjoin | on
enable_indexonlyscan | on
enable_indexscan | on
@@ -89,7 +90,7 @@ select name, setting from pg_settings where name like 'enable%';
enable_seqscan | on
enable_sort | on
enable_tidscan | on
-(17 rows)
+(18 rows)
-- Test that the pg_timezone_names and pg_timezone_abbrevs views are
-- more-or-less working. We can't test their contents in any great detail
diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql
index 3e593f2d615..a4d476c4bb3 100644
--- a/src/test/regress/sql/aggregates.sql
+++ b/src/test/regress/sql/aggregates.sql
@@ -1032,3 +1032,119 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
explain (costs off)
select 1 from tenk1
where (hundred, thousand) in (select twothousand, twothousand from onek);
+
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+
+set work_mem='64kB';
+
+-- Produce results with sorting.
+
+set enable_hashagg = false;
+
+set jit_above_cost = 0;
+
+explain (costs off)
+select g%100000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 199999) g
+ group by g%100000;
+
+create table agg_group_1 as
+select g%100000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 199999) g
+ group by g%100000;
+
+create table agg_group_2 as
+select * from
+ (values (100), (300), (500)) as r(a),
+ lateral (
+ select (g/2)::numeric as c1,
+ array_agg(g::numeric) as c2,
+ count(*) as c3
+ from generate_series(0, 1999) g
+ where g < r.a
+ group by g/2) as s;
+
+set jit_above_cost to default;
+
+create table agg_group_3 as
+select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
+ from generate_series(0, 1999) g
+ group by g/2;
+
+create table agg_group_4 as
+select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 1999) g
+ group by g/2;
+
+-- Produce results with hash aggregation
+
+set enable_hashagg = true;
+set enable_sort = false;
+
+set jit_above_cost = 0;
+
+explain (costs off)
+select g%100000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 199999) g
+ group by g%100000;
+
+create table agg_hash_1 as
+select g%100000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 199999) g
+ group by g%100000;
+
+create table agg_hash_2 as
+select * from
+ (values (100), (300), (500)) as r(a),
+ lateral (
+ select (g/2)::numeric as c1,
+ array_agg(g::numeric) as c2,
+ count(*) as c3
+ from generate_series(0, 1999) g
+ where g < r.a
+ group by g/2) as s;
+
+set jit_above_cost to default;
+
+create table agg_hash_3 as
+select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
+ from generate_series(0, 1999) g
+ group by g/2;
+
+create table agg_hash_4 as
+select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
+ from generate_series(0, 1999) g
+ group by g/2;
+
+set enable_sort = true;
+set work_mem to default;
+
+-- Compare group aggregation results to hash aggregation results
+
+(select * from agg_hash_1 except select * from agg_group_1)
+ union all
+(select * from agg_group_1 except select * from agg_hash_1);
+
+(select * from agg_hash_2 except select * from agg_group_2)
+ union all
+(select * from agg_group_2 except select * from agg_hash_2);
+
+(select * from agg_hash_3 except select * from agg_group_3)
+ union all
+(select * from agg_group_3 except select * from agg_hash_3);
+
+(select * from agg_hash_4 except select * from agg_group_4)
+ union all
+(select * from agg_group_4 except select * from agg_hash_4);
+
+drop table agg_group_1;
+drop table agg_group_2;
+drop table agg_group_3;
+drop table agg_group_4;
+drop table agg_hash_1;
+drop table agg_hash_2;
+drop table agg_hash_3;
+drop table agg_hash_4;
diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql
index 95ac3fb52f6..bf8bce6ed31 100644
--- a/src/test/regress/sql/groupingsets.sql
+++ b/src/test/regress/sql/groupingsets.sql
@@ -441,4 +441,103 @@ select v||'a', case when grouping(v||'a') = 1 then 1 else 0 end, count(*)
from unnest(array[1,1], array['a','b']) u(i,v)
group by rollup(i, v||'a') order by 1,3;
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+
+SET work_mem='64kB';
+
+-- Produce results with sorting.
+
+set enable_hashagg = false;
+
+set jit_above_cost = 0;
+
+explain (costs off)
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g%1000 as g1000, g%100 as g100, g%10 as g10, g
+ from generate_series(0,199999) g) s
+group by cube (g1000,g100,g10);
+
+create table gs_group_1 as
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g%1000 as g1000, g%100 as g100, g%10 as g10, g
+ from generate_series(0,199999) g) s
+group by cube (g1000,g100,g10);
+
+set jit_above_cost to default;
+
+create table gs_group_2 as
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g/20 as g1000, g/200 as g100, g/2000 as g10, g
+ from generate_series(0,19999) g) s
+group by cube (g1000,g100,g10);
+
+create table gs_group_3 as
+select g100, g10, array_agg(g) as a, count(*) as c, max(g::text) as m from
+ (select g/200 as g100, g/2000 as g10, g
+ from generate_series(0,19999) g) s
+group by grouping sets (g100,g10);
+
+-- Produce results with hash aggregation.
+
+set enable_hashagg = true;
+set enable_sort = false;
+set work_mem='64kB';
+
+set jit_above_cost = 0;
+
+explain (costs off)
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g%1000 as g1000, g%100 as g100, g%10 as g10, g
+ from generate_series(0,199999) g) s
+group by cube (g1000,g100,g10);
+
+create table gs_hash_1 as
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g%1000 as g1000, g%100 as g100, g%10 as g10, g
+ from generate_series(0,199999) g) s
+group by cube (g1000,g100,g10);
+
+set jit_above_cost to default;
+
+create table gs_hash_2 as
+select g1000, g100, g10, sum(g::numeric), count(*), max(g::text) from
+ (select g/20 as g1000, g/200 as g100, g/2000 as g10, g
+ from generate_series(0,19999) g) s
+group by cube (g1000,g100,g10);
+
+create table gs_hash_3 as
+select g100, g10, array_agg(g) as a, count(*) as c, max(g::text) as m from
+ (select g/200 as g100, g/2000 as g10, g
+ from generate_series(0,19999) g) s
+group by grouping sets (g100,g10);
+
+set enable_sort = true;
+set work_mem to default;
+
+-- Compare results
+
+(select * from gs_hash_1 except select * from gs_group_1)
+ union all
+(select * from gs_group_1 except select * from gs_hash_1);
+
+(select * from gs_hash_2 except select * from gs_group_2)
+ union all
+(select * from gs_group_2 except select * from gs_hash_2);
+
+(select g100,g10,unnest(a),c,m from gs_hash_3 except
+ select g100,g10,unnest(a),c,m from gs_group_3)
+ union all
+(select g100,g10,unnest(a),c,m from gs_group_3 except
+ select g100,g10,unnest(a),c,m from gs_hash_3);
+
+drop table gs_group_1;
+drop table gs_group_2;
+drop table gs_group_3;
+drop table gs_hash_1;
+drop table gs_hash_2;
+drop table gs_hash_3;
+
-- end
diff --git a/src/test/regress/sql/select_distinct.sql b/src/test/regress/sql/select_distinct.sql
index a605e86449e..33102744ebf 100644
--- a/src/test/regress/sql/select_distinct.sql
+++ b/src/test/regress/sql/select_distinct.sql
@@ -45,6 +45,68 @@ SELECT count(*) FROM
SELECT count(*) FROM
(SELECT DISTINCT two, four, two FROM tenk1) ss;
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+
+SET work_mem='64kB';
+
+-- Produce results with sorting.
+
+SET enable_hashagg=FALSE;
+
+SET jit_above_cost=0;
+
+EXPLAIN (costs off)
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+
+CREATE TABLE distinct_group_1 AS
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+
+SET jit_above_cost TO DEFAULT;
+
+CREATE TABLE distinct_group_2 AS
+SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
+
+SET enable_hashagg=TRUE;
+
+-- Produce results with hash aggregation.
+
+SET enable_sort=FALSE;
+
+SET jit_above_cost=0;
+
+EXPLAIN (costs off)
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+
+CREATE TABLE distinct_hash_1 AS
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+
+SET jit_above_cost TO DEFAULT;
+
+CREATE TABLE distinct_hash_2 AS
+SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
+
+SET enable_sort=TRUE;
+
+SET work_mem TO DEFAULT;
+
+-- Compare results
+
+(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
+ UNION ALL
+(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
+
+(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
+ UNION ALL
+(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
+
+DROP TABLE distinct_hash_1;
+DROP TABLE distinct_hash_2;
+DROP TABLE distinct_group_1;
+DROP TABLE distinct_group_2;
+
--
-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
-- very own regression file.