This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new 330736e0fae Fix error in create_partial_grouping_paths
330736e0fae is described below

commit 330736e0fae687e703f76c6506f1b2ac60c861c4
Author: Jinbao Chen <[email protected]>
AuthorDate: Sat Dec 20 23:24:43 2025 +0800

    Fix error in create_partial_grouping_paths
---
 src/backend/optimizer/plan/planner.c          |  31 +++---
 src/test/regress/expected/select_parallel.out | 132 ++++++++++++++------------
 2 files changed, 86 insertions(+), 77 deletions(-)

diff --git a/src/backend/optimizer/plan/planner.c 
b/src/backend/optimizer/plan/planner.c
index d9184c897f0..527a4136e69 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8883,21 +8883,6 @@ create_partial_grouping_paths(PlannerInfo *root,
                                                                                
                         root->group_pathkeys,
                                                                                
                         -1.0);
 
-                               //if (parse->hasAggs)
-                               {
-                                       add_partial_path(partially_grouped_rel, 
(Path *)
-                                                                        
create_agg_path(root,
-                                                                               
                         partially_grouped_rel,
-                                                                               
                         path,
-                                                                               
                         partially_grouped_rel->reltarget,
-                                                                               
                         parse->groupClause ? AGG_SORTED : AGG_PLAIN,
-                                                                               
                         AGGSPLIT_INITIAL_SERIAL,
-                                                                               
                         false,
-                                                                               
                         root->processed_groupClause,
-                                                                               
                         NIL,
-                                                                               
                         agg_partial_costs,
-                                                                               
                         dNumPartialPartialGroups));
-                               }
                                                /* Group nodes are not used in 
GPDB */
 #if 0
                                else
@@ -8911,6 +8896,22 @@ create_partial_grouping_paths(PlannerInfo *root,
 #endif
                        }
 
+                       //if (parse->hasAggs)
+                       {
+                               add_partial_path(partially_grouped_rel, (Path *)
+                                               create_agg_path(root,
+                                                                               
partially_grouped_rel,
+                                                                               
path,
+                                                                               
partially_grouped_rel->reltarget,
+                                                                               
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+                                                                               
AGGSPLIT_INITIAL_SERIAL,
+                                                                               
false,
+                                                                               
root->processed_groupClause,
+                                                                               
NIL,
+                                                                               
agg_partial_costs,
+                                                                               
dNumPartialPartialGroups));
+                       }
+                       
                        /*
                         * Now we may consider incremental sort on this path, 
but only
                         * when the path is not already sorted and when 
incremental sort
diff --git a/src/test/regress/expected/select_parallel.out 
b/src/test/regress/expected/select_parallel.out
index e2117df4bd3..697ed067121 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -15,19 +15,20 @@ set max_parallel_workers_per_gather=4;
 -- Parallel Append with partial-subplans
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                   QUERY PLAN                   
-------------------------------------------------
- Aggregate
+                     QUERY PLAN                      
+-----------------------------------------------------
+ Finalize Aggregate
    ->  Gather Motion 9:1  (slice1; segments: 9)
-         ->  Parallel Append
-               ->  Seq Scan on d_star a_star_4
-               ->  Seq Scan on f_star a_star_6
-               ->  Seq Scan on e_star a_star_5
-               ->  Seq Scan on b_star a_star_2
-               ->  Seq Scan on c_star a_star_3
-               ->  Seq Scan on a_star a_star_1
+         ->  Partial Aggregate
+               ->  Parallel Append
+                     ->  Seq Scan on d_star a_star_4
+                     ->  Seq Scan on f_star a_star_6
+                     ->  Seq Scan on e_star a_star_5
+                     ->  Seq Scan on b_star a_star_2
+                     ->  Seq Scan on c_star a_star_3
+                     ->  Seq Scan on a_star a_star_1
  Optimizer: Postgres query optimizer
-(10 rows)
+(11 rows)
 
 select round(avg(aa)), sum(aa) from a_star a1;
  round | sum 
@@ -40,19 +41,20 @@ alter table c_star set (parallel_workers = 0);
 alter table d_star set (parallel_workers = 0);
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                   QUERY PLAN                   
-------------------------------------------------
- Aggregate
+                     QUERY PLAN                      
+-----------------------------------------------------
+ Finalize Aggregate
    ->  Gather Motion 9:1  (slice1; segments: 9)
-         ->  Parallel Append
-               ->  Seq Scan on d_star a_star_4
-               ->  Seq Scan on f_star a_star_6
-               ->  Seq Scan on e_star a_star_5
-               ->  Seq Scan on b_star a_star_2
-               ->  Seq Scan on c_star a_star_3
-               ->  Seq Scan on a_star a_star_1
+         ->  Partial Aggregate
+               ->  Parallel Append
+                     ->  Seq Scan on d_star a_star_4
+                     ->  Seq Scan on f_star a_star_6
+                     ->  Seq Scan on e_star a_star_5
+                     ->  Seq Scan on b_star a_star_2
+                     ->  Seq Scan on c_star a_star_3
+                     ->  Seq Scan on a_star a_star_1
  Optimizer: Postgres query optimizer
-(10 rows)
+(11 rows)
 
 select round(avg(aa)), sum(aa) from a_star a2;
  round | sum 
@@ -67,19 +69,20 @@ alter table e_star set (parallel_workers = 0);
 alter table f_star set (parallel_workers = 0);
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                   QUERY PLAN                   
-------------------------------------------------
- Aggregate
+                     QUERY PLAN                      
+-----------------------------------------------------
+ Finalize Aggregate
    ->  Gather Motion 9:1  (slice1; segments: 9)
-         ->  Parallel Append
-               ->  Seq Scan on d_star a_star_4
-               ->  Seq Scan on f_star a_star_6
-               ->  Seq Scan on e_star a_star_5
-               ->  Seq Scan on b_star a_star_2
-               ->  Seq Scan on c_star a_star_3
-               ->  Seq Scan on a_star a_star_1
+         ->  Partial Aggregate
+               ->  Parallel Append
+                     ->  Seq Scan on d_star a_star_4
+                     ->  Seq Scan on f_star a_star_6
+                     ->  Seq Scan on e_star a_star_5
+                     ->  Seq Scan on b_star a_star_2
+                     ->  Seq Scan on c_star a_star_3
+                     ->  Seq Scan on a_star a_star_1
  Optimizer: Postgres query optimizer
-(10 rows)
+(11 rows)
 
 select round(avg(aa)), sum(aa) from a_star a3;
  round | sum 
@@ -167,14 +170,15 @@ drop table part_pa_test;
 set parallel_leader_participation = off;
 explain (costs off)
   select count(*) from tenk1 where stringu1 = 'GRAAAA';
-                    QUERY PLAN                     
----------------------------------------------------
- Aggregate
+                       QUERY PLAN                        
+---------------------------------------------------------
+ Finalize Aggregate
    ->  Gather Motion 12:1  (slice1; segments: 12)
-         ->  Parallel Seq Scan on tenk1
-               Filter: (stringu1 = 'GRAAAA'::name)
+         ->  Partial Aggregate
+               ->  Parallel Seq Scan on tenk1
+                     Filter: (stringu1 = 'GRAAAA'::name)
  Optimizer: Postgres query optimizer
-(5 rows)
+(6 rows)
 
 select count(*) from tenk1 where stringu1 = 'GRAAAA';
  count 
@@ -187,14 +191,15 @@ select count(*) from tenk1 where stringu1 = 'GRAAAA';
 set max_parallel_workers = 0;
 explain (costs off)
   select count(*) from tenk1 where stringu1 = 'GRAAAA';
-                    QUERY PLAN                     
----------------------------------------------------
- Aggregate
+                       QUERY PLAN                        
+---------------------------------------------------------
+ Finalize Aggregate
    ->  Gather Motion 12:1  (slice1; segments: 12)
-         ->  Parallel Seq Scan on tenk1
-               Filter: (stringu1 = 'GRAAAA'::name)
+         ->  Partial Aggregate
+               ->  Parallel Seq Scan on tenk1
+                     Filter: (stringu1 = 'GRAAAA'::name)
  Optimizer: Postgres query optimizer
-(5 rows)
+(6 rows)
 
 select count(*) from tenk1 where stringu1 = 'GRAAAA';
  count 
@@ -1212,24 +1217,27 @@ EXPLAIN (VERBOSE, COSTS OFF)
 SELECT unnest(ARRAY[]::integer[]) + 1 AS pathkey
   FROM tenk1 t1 JOIN tenk1 t2 ON TRUE
   ORDER BY pathkey;
-                                             QUERY PLAN                        
                      
------------------------------------------------------------------------------------------------------
- Sort
+                                                                               
                           QUERY PLAN                                           
                                                                
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Gather Motion 6:1  (slice1; segments: 6)
    Output: (((unnest('{}'::integer[])) + 1))
-   Sort Key: (((unnest('{}'::integer[])) + 1))
-   ->  Result
-         Output: ((unnest('{}'::integer[])) + 1)
-         ->  ProjectSet
-               Output: unnest('{}'::integer[])
-               ->  Nested Loop
-                     ->  Gather
-                           Workers Planned: 4
-                           ->  Parallel Index Only Scan using tenk1_hundred on 
public.tenk1 t1
-                     ->  Materialize
-                           ->  Gather
-                                 Workers Planned: 4
-                                 ->  Parallel Index Only Scan using 
tenk1_hundred on public.tenk1 t2
-(15 rows)
+   Merge Key: (((unnest('{}'::integer[])) + 1))
+   ->  Sort
+         Output: (((unnest('{}'::integer[])) + 1))
+         Sort Key: (((unnest('{}'::integer[])) + 1))
+         ->  Result
+               Output: ((unnest('{}'::integer[])) + 1)
+               ->  ProjectSet
+                     Output: unnest('{}'::integer[])
+                     ->  Nested Loop
+                           ->  Parallel Seq Scan on public.tenk1 t1
+                                 Output: t1.unique1, t1.unique2, t1.two, 
t1.four, t1.ten, t1.twenty, t1.hundred, t1.thousand, t1.twothousand, 
t1.fivethous, t1.tenthous, t1.odd, t1.even, t1.stringu1, t1.stringu2, t1.string4
+                           ->  Materialize
+                                 ->  Broadcast Motion 3:6  (slice2; segments: 
3)
+                                       ->  Seq Scan on public.tenk1 t2
+ Settings: enable_parallel = 'on', optimizer = 'off', parallel_setup_cost = 
'0', parallel_tuple_cost = '0', min_parallel_table_scan_size = '0'
+ Optimizer: Postgres query optimizer
+(18 rows)
 
 -- test passing expanded-value representations to workers
 CREATE FUNCTION make_some_array(int,int) returns int[] as


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to