Attached is a patch for a write after allocated memory which we found in
testing. Its an obscure case but can happen if the same column is used in
different grouping keys, as in the example below, which uses tables from
the regress test suite (build with --enable-cassert in order to turn on
memory warnings). Patch is against master.

The hashed aggregate state has an array for the column indices that is
sized using the number of non-aggregated columns in the set that includes
the agg's targetlist, quals and input grouping columns. The duplicate
elimination of columns can result in under-allocation, as below. Sizing
based on the number of grouping columns and number of quals/targetlists not
in the grouping columns avoids this.

Regards,
Colm McHugh (Salesforce)

explain (costs off) select 1 from tenk where (hundred, thousand) in (select
twothousand, twothousand from onek);

psql: WARNING:  problem in alloc set ExecutorState: detected write past
chunk end in block 0x7f8b8901fa00, chunk 0x7f8b89020cd0

psql: WARNING:  problem in alloc set ExecutorState: detected write past
chunk end in block 0x7f8b8901fa00, chunk 0x7f8b89020cd0

                         QUERY PLAN

-------------------------------------------------------------

 Hash Join

   Hash Cond: (tenk.hundred = onek.twothousand)

   ->  Seq Scan on tenk

         Filter: (hundred = thousand)

   ->  Hash

         ->  HashAggregate

               Group Key: onek.twothousand, onek.twothousand

               ->  Seq Scan on onek
(8 rows)
From 39e2a404909e34de82a17be9110dc9f42a38823c Mon Sep 17 00:00:00 2001
From: Colm McHugh <cmchugh@salesforce.com>
Date: Fri, 17 May 2019 15:32:52 -0700
Subject: [PATCH] Fix write after end of array in hashed Agg initialization.

The array for the column indices in the per-hashtable data structure
may be under-allocated in some corner cases, resulting in a write
after the end of this array. Using the number of grouping columns and
number of other columns in the qualifier and targetlist (if any) to
size the array avoids the corner cases where this can happen.
---
 src/backend/executor/nodeAgg.c           | 28 +++++++++++++++++-----------
 src/test/regress/expected/aggregates.out | 27 +++++++++++++++++++++++++++
 src/test/regress/sql/aggregates.sql      |  4 ++++
 3 files changed, 48 insertions(+), 11 deletions(-)

diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index d01fc4f..079bffd 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1331,14 +1331,23 @@ find_hash_columns(AggState *aggstate)
 	for (j = 0; j < numHashes; ++j)
 	{
 		AggStatePerHash perhash = &aggstate->perhash[j];
-		Bitmapset  *colnos = bms_copy(base_colnos);
+		Bitmapset  *grouping_colnos = NULL;
+		Bitmapset  *non_grouping_colnos = NULL;
 		AttrNumber *grpColIdx = perhash->aggnode->grpColIdx;
 		List	   *hashTlist = NIL;
 		TupleDesc	hashDesc;
 		int			i;
+		int			hashGrpColIdxSize = 0;
 
 		perhash->largestGrpColIdx = 0;
 
+		/* Populate grouping_colnos - this is the set of all the grouping columns */
+		for (i = 0; i < perhash->numCols; i++)
+			grouping_colnos = bms_add_member(grouping_colnos, grpColIdx[i]);
+
+		/* Determine remaining columns (if any) */
+		non_grouping_colnos = bms_difference(base_colnos, grouping_colnos);
+
 		/*
 		 * If we're doing grouping sets, then some Vars might be referenced in
 		 * tlist/qual for the benefit of other grouping sets, but not needed
@@ -1356,15 +1365,13 @@ find_hash_columns(AggState *aggstate)
 				int			attnum = lfirst_int(lc);
 
 				if (!bms_is_member(attnum, grouped_cols))
-					colnos = bms_del_member(colnos, attnum);
+					non_grouping_colnos = bms_del_member(non_grouping_colnos, attnum);
 			}
 		}
-		/* Add in all the grouping columns */
-		for (i = 0; i < perhash->numCols; i++)
-			colnos = bms_add_member(colnos, grpColIdx[i]);
-
+		/* allocate a slot for each grouping column and remaining column */
+		hashGrpColIdxSize = perhash->numCols + bms_num_members(non_grouping_colnos);
 		perhash->hashGrpColIdxInput =
-			palloc(bms_num_members(colnos) * sizeof(AttrNumber));
+			palloc(hashGrpColIdxSize * sizeof(AttrNumber));
 		perhash->hashGrpColIdxHash =
 			palloc(perhash->numCols * sizeof(AttrNumber));
 
@@ -1380,12 +1387,10 @@ find_hash_columns(AggState *aggstate)
 			perhash->hashGrpColIdxInput[i] = grpColIdx[i];
 			perhash->hashGrpColIdxHash[i] = i + 1;
 			perhash->numhashGrpCols++;
-			/* delete already mapped columns */
-			bms_del_member(colnos, grpColIdx[i]);
 		}
 
 		/* and add the remaining columns */
-		while ((i = bms_first_member(colnos)) >= 0)
+		while ((i = bms_first_member(non_grouping_colnos)) >= 0)
 		{
 			perhash->hashGrpColIdxInput[perhash->numhashGrpCols] = i;
 			perhash->numhashGrpCols++;
@@ -1412,7 +1417,8 @@ find_hash_columns(AggState *aggstate)
 							   &TTSOpsMinimalTuple);
 
 		list_free(hashTlist);
-		bms_free(colnos);
+		bms_free(grouping_colnos);
+		bms_free(non_grouping_colnos);
 	}
 
 	bms_free(base_colnos);
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index 129c1e5..3ef7ac9 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -2265,3 +2265,30 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
  ba       |    0 |     1
 (2 rows)
 
+explain (costs off) delete from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek);
+                            QUERY PLAN                             
+-------------------------------------------------------------------
+ Delete on tenk1
+   ->  Hash Join
+         Hash Cond: (tenk1.hundred = onek.twothousand)
+         ->  Seq Scan on tenk1
+               Filter: (hundred = thousand)
+         ->  Hash
+               ->  HashAggregate
+                     Group Key: onek.twothousand, onek.twothousand
+                     ->  Seq Scan on onek
+(9 rows)
+
+explain (costs off) select 1 from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek); 
+                         QUERY PLAN                          
+-------------------------------------------------------------
+ Hash Join
+   Hash Cond: (tenk1.hundred = onek.twothousand)
+   ->  Seq Scan on tenk1
+         Filter: (hundred = thousand)
+   ->  Hash
+         ->  HashAggregate
+               Group Key: onek.twothousand, onek.twothousand
+               ->  Seq Scan on onek
+(8 rows)
+
diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql
index d4fd657..5bd77d4 100644
--- a/src/test/regress/sql/aggregates.sql
+++ b/src/test/regress/sql/aggregates.sql
@@ -987,3 +987,7 @@ select v||'a', case v||'a' when 'aa' then 1 else 0 end, count(*)
 select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
   from unnest(array['a','b']) u(v)
  group by v||'a' order by 1;
+
+explain (costs off) delete from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek);
+
+explain (costs off) select 1 from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek);
-- 
2.7.4

Reply via email to