This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new de82491efb2 Fix errors for oid_dispatch and user
de82491efb2 is described below

commit de82491efb250aa5980dca4d145d8d8d5b669824
Author: Jinbao Chen <[email protected]>
AuthorDate: Mon Nov 17 23:53:07 2025 +0800

    Fix errors for oid_dispatch and user
---
 src/backend/catalog/heap.c              |  16 ++--
 src/backend/catalog/oid_dispatch.c      |  17 ++++
 src/backend/commands/user.c             |   6 +-
 src/backend/optimizer/plan/subselect.c  |   3 +-
 src/include/catalog/oid_dispatch.h      |   2 +
 src/test/regress/expected/gin.out       |  26 +++---
 src/test/regress/expected/gist.out      |  44 +--------
 src/test/regress/expected/namespace.out |  41 ++++++++-
 src/test/regress/expected/spgist.out    |  18 ++--
 src/test/regress/expected/subselect.out | 154 ++++++++++++++++++++++----------
 src/test/regress/serial_schedule        |  24 ++---
 src/test/regress/sql/gist.sql           |   5 --
 src/test/regress/sql/privileges.sql     |  22 -----
 src/test/regress/sql/spgist.sql         |   3 -
 14 files changed, 211 insertions(+), 170 deletions(-)

diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index fd222b096ea..0fac70d05fb 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -2328,8 +2328,8 @@ heap_drop_with_catalog(Oid relid)
         */
        if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
        {
-               Relation        ftrel;
-               HeapTuple       fttuple;
+               Relation        rel;
+               HeapTuple       tuple;
                ScanKeyData     ftkey;
                SysScanDesc     ftscan;
 
@@ -2351,16 +2351,16 @@ heap_drop_with_catalog(Oid relid)
                systable_endscan(ftscan);
                table_close(rel, RowExclusiveLock);
 
-               ftrel = table_open(ForeignTableRelationId, RowExclusiveLock);
+               rel = table_open(ForeignTableRelationId, RowExclusiveLock);
 
-               fttuple = SearchSysCache1(FOREIGNTABLEREL, 
ObjectIdGetDatum(relid));
-               if (!HeapTupleIsValid(fttuple))
+               tuple = SearchSysCache1(FOREIGNTABLEREL, 
ObjectIdGetDatum(relid));
+               if (!HeapTupleIsValid(tuple))
                        elog(ERROR, "cache lookup failed for foreign table %u", 
relid);
 
-               CatalogTupleDelete(ftrel, &fttuple->t_self);
+               CatalogTupleDelete(rel, &tuple->t_self);
 
-               ReleaseSysCache(fttuple);
-               table_close(ftrel, RowExclusiveLock);
+               ReleaseSysCache(tuple);
+               table_close(rel, RowExclusiveLock);
        }
 
        /*
diff --git a/src/backend/catalog/oid_dispatch.c 
b/src/backend/catalog/oid_dispatch.c
index 6f39a07857e..6e958551d31 100644
--- a/src/backend/catalog/oid_dispatch.c
+++ b/src/backend/catalog/oid_dispatch.c
@@ -87,6 +87,7 @@
 #include "catalog/pg_amproc.h"
 #include "catalog/pg_attrdef.h"
 #include "catalog/pg_authid.h"
+#include "catalog/pg_auth_members.h"
 #include "catalog/pg_cast.h"
 #include "catalog/pg_collation.h"
 #include "catalog/pg_constraint.h"
@@ -575,6 +576,22 @@ GetNewOidForAuthId(Relation relation, Oid indexId, 
AttrNumber oidcolumn,
        return GetNewOrPreassignedOid(relation, indexId, oidcolumn, &key);
 }
 
+Oid
+GetNewOidForAuthMem(Relation relation, Oid indexId, AttrNumber oidcolumn,
+                                  char *rolname)
+{
+       OidAssignment key;
+
+       Assert(RelationGetRelid(relation) == AuthMemRelationId);
+       Assert(indexId == AuthMemOidIndexId);
+       Assert(oidcolumn == Anum_pg_auth_members_oid);
+
+       memset(&key, 0, sizeof(OidAssignment));
+       key.type = T_OidAssignment;
+       key.objname = rolname;
+       return GetNewOrPreassignedOid(relation, indexId, oidcolumn, &key);
+}
+
 Oid
 GetNewOidForCast(Relation relation, Oid indexId, AttrNumber oidcolumn,
                                 Oid castsource, Oid casttarget)
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index a7531357afa..04ec5d5f380 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -3038,8 +3038,10 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid 
roleid,
                        }
 
                        /* get an OID for the new row and insert it */
-                       objectId = GetNewOidWithIndex(pg_authmem_rel, 
AuthMemOidIndexId,
-                                                                               
  Anum_pg_auth_members_oid);
+//                     objectId = GetNewOidWithIndex(pg_authmem_rel, 
AuthMemOidIndexId,
+//                                                                             
  Anum_pg_auth_members_oid);
+                       objectId = GetNewOidForAuthMem(pg_authmem_rel, 
AuthMemOidIndexId,
+                                                                               
   Anum_pg_auth_members_oid, (char*) rolename);
                        new_record[Anum_pg_auth_members_oid - 1] = objectId;
                        tuple = heap_form_tuple(pg_authmem_dsc,
                                                                        
new_record, new_record_nulls);
diff --git a/src/backend/optimizer/plan/subselect.c 
b/src/backend/optimizer/plan/subselect.c
index dc2bc965dcc..c7fbe634fae 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -2766,7 +2766,8 @@ finalize_plan(PlannerInfo *root, Plan *plan,
                        }
 
                        upperset = bms_difference(extset, parentset);
-                       while ((paramid = bms_next_member(upperset, -1)) >= 0)
+                       paramid = -1;
+                       while ((paramid = bms_next_member(upperset, paramid)) 
>= 0)
                                initsubplan->extParam = 
lappend_int(initsubplan->extParam, paramid);
                }
        }
diff --git a/src/include/catalog/oid_dispatch.h 
b/src/include/catalog/oid_dispatch.h
index fbb7a14f59e..4a7f713783e 100644
--- a/src/include/catalog/oid_dispatch.h
+++ b/src/include/catalog/oid_dispatch.h
@@ -37,6 +37,8 @@ extern Oid GetNewOidForAttrDefault(Relation relation, Oid 
indexId, AttrNumber oi
                                                                   Oid adrelid, 
int16 adnum);
 extern Oid GetNewOidForAuthId(Relation relation, Oid indexId, AttrNumber 
oidcolumn,
                                                          char *rolname);
+extern Oid GetNewOidForAuthMem(Relation relation, Oid indexId, AttrNumber 
oidcolumn,
+                                                          char *rolname);
 extern Oid GetNewOidForCast(Relation relation, Oid indexId, AttrNumber 
oidcolumn,
                                                        Oid castsource, Oid 
casttarget);
 extern Oid GetNewOidForCollation(Relation relation, Oid indexId, AttrNumber 
oidcolumn,
diff --git a/src/test/regress/expected/gin.out 
b/src/test/regress/expected/gin.out
index d628c1d88c7..e767f7705c2 100644
--- a/src/test/regress/expected/gin.out
+++ b/src/test/regress/expected/gin.out
@@ -48,10 +48,12 @@ select count(*) from gin_test_tbl where i @> array[1, 999];
  Finalize Aggregate
    ->  Gather Motion 3:1  (slice1; segments: 3)
          ->  Partial Aggregate
-               ->  Seq Scan on gin_test_tbl
-                     Filter: (i @> '{1,999}'::integer[])
+               ->  Bitmap Heap Scan on gin_test_tbl
+                     Recheck Cond: (i @> '{1,999}'::integer[])
+                     ->  Bitmap Index Scan on gin_test_idx
+                           Index Cond: (i @> '{1,999}'::integer[])
  Optimizer: Postgres query optimizer
-(6 rows)
+(8 rows)
 
 select count(*) from gin_test_tbl where i @> array[1, 999];
  count 
@@ -199,15 +201,15 @@ from
                    query                   | return by index | removed by 
recheck | match 
 
-------------------------------------------+-----------------+--------------------+-------
   i @> '{}'                                | 4               | 0               
   | t
-  j @> '{}'                                | 6               | 0               
   | t
-  i @> '{}' and j @> '{}'                  | 4               | 0               
   | t
-  i @> '{1}'                               | 5               | 0               
   | t
-  i @> '{1}' and j @> '{}'                 | 3               | 0               
   | t
-  i @> '{1}' and i @> '{}' and j @> '{}'   | 3               | 0               
   | t
-  j @> '{10}'                              | 4               | 0               
   | t
-  j @> '{10}' and i @> '{}'                | 3               | 0               
   | t
-  j @> '{10}' and j @> '{}' and i @> '{}'  | 3               | 0               
   | t
-  i @> '{1}' and j @> '{10}'               | 2               | 0               
   | t
+  j @> '{}'                                | 5               | 0               
   | f
+  i @> '{}' and j @> '{}'                  | 3               | 0               
   | t
+  i @> '{1}'                               | 2               | 0               
   | t
+  i @> '{1}' and j @> '{}'                 | 2               | 0               
   | t
+  i @> '{1}' and i @> '{}' and j @> '{}'   | 2               | 0               
   | t
+  j @> '{10}'                              | 3               | 0               
   | f
+  j @> '{10}' and i @> '{}'                | 2               | 0               
   | f
+  j @> '{10}' and j @> '{}' and i @> '{}'  | 2               | 0               
   | t
+  i @> '{1}' and j @> '{10}'               | 1               | 0               
   | t
 (10 rows)
 
 reset enable_seqscan;
diff --git a/src/test/regress/expected/gist.out 
b/src/test/regress/expected/gist.out
index b0ec1c4f7e2..24308040106 100644
--- a/src/test/regress/expected/gist.out
+++ b/src/test/regress/expected/gist.out
@@ -364,7 +364,6 @@ create index gist_tbl_multi_index on gist_tbl using gist 
(circle(p,1), p);
 explain (verbose, costs off)
 select circle(p,1) from gist_tbl
 where p <@ box(point(5, 5), point(5.3, 5.3));
-<<<<<<< HEAD
                                         QUERY PLAN                             
           
 
------------------------------------------------------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3)
@@ -375,14 +374,6 @@ where p <@ box(point(5, 5), point(5.3, 5.3));
  Settings: enable_bitmapscan = 'off', enable_indexonlyscan = 'on', 
enable_seqscan = 'off'
  Optimizer: Postgres query optimizer
 (7 rows)
-=======
-                          QUERY PLAN                           
----------------------------------------------------------------
- Index Only Scan using gist_tbl_multi_index on public.gist_tbl
-   Output: circle(p, '1'::double precision)
-   Index Cond: (gist_tbl.p <@ '(5.3,5.3),(5,5)'::box)
-(3 rows)
->>>>>>> REL_16_9
 
 select circle(p,1) from gist_tbl
 where p <@ box(point(5, 5), point(5.3, 5.3));
@@ -401,7 +392,6 @@ where p <@ box(point(5, 5), point(5.3, 5.3));
 -- are done correctly.
 explain (verbose, costs off)
 select p from gist_tbl where circle(p,1) @> circle(point(0,0),0.95);
-<<<<<<< HEAD
                                          QUERY PLAN                            
              
 
---------------------------------------------------------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3)
@@ -412,14 +402,6 @@ select p from gist_tbl where circle(p,1) @> 
circle(point(0,0),0.95);
  Settings: enable_bitmapscan = 'off', enable_indexonlyscan = 'on', 
enable_seqscan = 'off'
  Optimizer: Postgres query optimizer
 (7 rows)
-=======
-                                      QUERY PLAN                               
        
----------------------------------------------------------------------------------------
- Index Only Scan using gist_tbl_multi_index on public.gist_tbl
-   Output: p
-   Index Cond: ((circle(gist_tbl.p, '1'::double precision)) @> 
'<(0,0),0.95>'::circle)
-(3 rows)
->>>>>>> REL_16_9
 
 select p from gist_tbl where circle(p,1) @> circle(point(0,0),0.95);
    p   
@@ -430,7 +412,6 @@ select p from gist_tbl where circle(p,1) @> 
circle(point(0,0),0.95);
 -- Also check that use_physical_tlist doesn't trigger in such cases.
 explain (verbose, costs off)
 select count(*) from gist_tbl;
-<<<<<<< HEAD
                                         QUERY PLAN                             
           
 
------------------------------------------------------------------------------------------
  Finalize Aggregate
@@ -443,14 +424,6 @@ select count(*) from gist_tbl;
  Settings: enable_bitmapscan = 'off', enable_indexonlyscan = 'on', 
enable_seqscan = 'off'
  Optimizer: Postgres query optimizer
 (9 rows)
-=======
-                             QUERY PLAN                              
----------------------------------------------------------------------
- Aggregate
-   Output: count(*)
-   ->  Index Only Scan using gist_tbl_multi_index on public.gist_tbl
-(3 rows)
->>>>>>> REL_16_9
 
 select count(*) from gist_tbl;
  count 
@@ -460,7 +433,6 @@ select count(*) from gist_tbl;
 
 -- This case isn't supported, but it should at least EXPLAIN correctly.
 explain (verbose, costs off)
-<<<<<<< HEAD
 select p from gist_tbl order by circle(p,1) <-> point(0,0), p <-> point(0,0) 
limit 15;
                                                               QUERY PLAN
 
--------------------------------------------------------------------------------------------------------------------------------------
@@ -479,24 +451,10 @@ select p from gist_tbl order by circle(p,1) <-> 
point(0,0), p <-> point(0,0) lim
 (12 rows)
 
 select p from gist_tbl order by circle(p,1) <-> point(0,0), p <-> point(0,0) 
limit 15;
-ERROR:  lossy distance functions are not supported in index-only scans  (seg0 
slice1 172.17.0.2:7002 pid=85777)
-=======
-select p from gist_tbl order by circle(p,1) <-> point(0,0) limit 1;
-                                     QUERY PLAN                                
     
-------------------------------------------------------------------------------------
- Limit
-   Output: p, ((circle(p, '1'::double precision) <-> '(0,0)'::point))
-   ->  Index Only Scan using gist_tbl_multi_index on public.gist_tbl
-         Output: p, (circle(p, '1'::double precision) <-> '(0,0)'::point)
-         Order By: ((circle(gist_tbl.p, '1'::double precision)) <-> 
'(0,0)'::point)
-(5 rows)
-
-select p from gist_tbl order by circle(p,1) <-> point(0,0) limit 1;
-ERROR:  lossy distance functions are not supported in index-only scans
+ERROR:  lossy distance functions are not supported in index-only scans  (seg0 
slice1 127.0.1.1:7002 pid=1227707)
 -- Force an index build using buffering.
 create index gist_tbl_box_index_forcing_buffering on gist_tbl using gist (p)
   with (buffering=on, fillfactor=50);
->>>>>>> REL_16_9
 -- Clean up
 reset enable_seqscan;
 reset enable_bitmapscan;
diff --git a/src/test/regress/expected/namespace.out 
b/src/test/regress/expected/namespace.out
index f86e6cb32ec..42b6c474157 100644
--- a/src/test/regress/expected/namespace.out
+++ b/src/test/regress/expected/namespace.out
@@ -16,11 +16,44 @@ CREATE SCHEMA test_ns_schema_1
        CREATE TABLE abc (
               a serial,
               b int UNIQUE
-<<<<<<< HEAD
        ) DISTRIBUTED BY (b);
-=======
-       );
->>>>>>> REL_16_9
+-- verify that the correct search_path restored on abort
+SET search_path to public;
+BEGIN;
+SET search_path to public, test_ns_schema_1;
+CREATE SCHEMA test_ns_schema_2
+       CREATE VIEW abc_view AS SELECT c FROM abc;
+ERROR:  column "c" does not exist
+LINE 2:        CREATE VIEW abc_view AS SELECT c FROM abc;
+                                              ^
+COMMIT;
+SHOW search_path;
+ search_path 
+-------------
+ public
+(1 row)
+
+-- verify that the correct search_path preserved
+-- after creating the schema and on commit
+BEGIN;
+SET search_path to public, test_ns_schema_1;
+CREATE SCHEMA test_ns_schema_2
+       CREATE VIEW abc_view AS SELECT a FROM abc;
+SHOW search_path;
+       search_path        
+--------------------------
+ public, test_ns_schema_1
+(1 row)
+
+COMMIT;
+SHOW search_path;
+       search_path        
+--------------------------
+ public, test_ns_schema_1
+(1 row)
+
+DROP SCHEMA test_ns_schema_2 CASCADE;
+NOTICE:  drop cascades to view test_ns_schema_2.abc_view
 -- verify that the correct search_path restored on abort
 SET search_path to public;
 BEGIN;
diff --git a/src/test/regress/expected/spgist.out 
b/src/test/regress/expected/spgist.out
index 9dba910fef7..7af016d8f6d 100644
--- a/src/test/regress/expected/spgist.out
+++ b/src/test/regress/expected/spgist.out
@@ -73,7 +73,6 @@ create index spgist_domain_idx on spgist_domain_tbl using 
spgist(f1);
 insert into spgist_domain_tbl values('fee'), ('fi'), ('fo'), ('fum');
 explain (costs off)
 select * from spgist_domain_tbl where f1 = 'fo';
-<<<<<<< HEAD
                      QUERY PLAN                      
 -----------------------------------------------------
  Gather Motion 1:1  (slice1; segments: 1)
@@ -83,15 +82,6 @@ select * from spgist_domain_tbl where f1 = 'fo';
                Index Cond: ((f1)::text = 'fo'::text)
  Optimizer: Postgres query optimizer
 (6 rows)
-=======
-                  QUERY PLAN                   
------------------------------------------------
- Bitmap Heap Scan on spgist_domain_tbl
-   Recheck Cond: ((f1)::text = 'fo'::text)
-   ->  Bitmap Index Scan on spgist_domain_idx
-         Index Cond: ((f1)::text = 'fo'::text)
-(4 rows)
->>>>>>> REL_16_9
 
 select * from spgist_domain_tbl where f1 = 'fo';
  f1 
@@ -99,14 +89,16 @@ select * from spgist_domain_tbl where f1 = 'fo';
  fo
 (1 row)
 
-<<<<<<< HEAD
-=======
 -- test an unlogged table, mostly to get coverage of spgistbuildempty
 create unlogged table spgist_unlogged_tbl(id serial, b box);
+ERROR:  unlogged sequences are not supported
 create index spgist_unlogged_idx on spgist_unlogged_tbl using spgist (b);
+ERROR:  relation "spgist_unlogged_tbl" does not exist
 insert into spgist_unlogged_tbl(b)
 select box(point(i,j))
   from generate_series(1,100,5) i,
        generate_series(1,10,5) j;
+ERROR:  relation "spgist_unlogged_tbl" does not exist
+LINE 1: insert into spgist_unlogged_tbl(b)
+                    ^
 -- leave this table around, to help in testing dump/restore
->>>>>>> REL_16_9
diff --git a/src/test/regress/expected/subselect.out 
b/src/test/regress/expected/subselect.out
index 75648050d52..a36f1c63574 100644
--- a/src/test/regress/expected/subselect.out
+++ b/src/test/regress/expected/subselect.out
@@ -1402,6 +1402,41 @@ select * from int4_tbl where
  f1 
 ----
   0
+(1 row)
+
+--
+-- Check for incorrect optimization when IN subquery contains a SRF
+--
+explain (verbose, costs off)
+select * from int4_tbl o where (f1, f1) in
+  (select f1, generate_series(1,50) / 10 g from int4_tbl i group by f1);
+                               QUERY PLAN                                
+-------------------------------------------------------------------------
+ Gather Motion 3:1  (slice1; segments: 3)
+   Output: o.f1
+   ->  Hash Semi Join
+         Output: o.f1
+         Hash Cond: (o.f1 = "ANY_subquery".f1)
+         ->  Seq Scan on public.int4_tbl o
+               Output: o.f1
+         ->  Hash
+               Output: "ANY_subquery".f1, "ANY_subquery".g
+               ->  Subquery Scan on "ANY_subquery"
+                     Output: "ANY_subquery".f1, "ANY_subquery".g
+                     Filter: ("ANY_subquery".f1 = "ANY_subquery".g)
+                     ->  Result
+                           Output: i.f1, ((generate_series(1, 50)) / 10)
+                           ->  ProjectSet
+                                 Output: generate_series(1, 50), i.f1
+                                 ->  Seq Scan on public.int4_tbl i
+                                       Output: i.f1
+ Optimizer: Postgres query optimizer
+(19 rows)
+
+select * from int4_tbl o where (f1, f1) in
+  (select f1, generate_series(1,50) / 10 g from int4_tbl i group by f1);
+ f1 
+----
   0
 (1 row)
 
@@ -1704,14 +1739,17 @@ select relname::information_schema.sql_identifier as 
tname, * from
   (select * from pg_class c) ss1) ss2
   right join pg_attribute a on a.attrelid = ss2.oid
 where tname = 'tenk1' and attnum = 1;
-                                QUERY PLAN                                
---------------------------------------------------------------------------
- Nested Loop
-   ->  Index Scan using pg_class_relname_nsp_index on pg_class c
-         Index Cond: (relname = 'tenk1'::name)
-   ->  Index Scan using pg_attribute_relid_attnum_index on pg_attribute a
-         Index Cond: ((attrelid = c.oid) AND (attnum = 1))
-(5 rows)
+                              QUERY PLAN                               
+-----------------------------------------------------------------------
+ Hash Join
+   Hash Cond: (a.attrelid = c.oid)
+   ->  Seq Scan on pg_attribute a
+         Filter: (attnum = 1)
+   ->  Hash
+         ->  Index Scan using pg_class_relname_nsp_index on pg_class c
+               Index Cond: (relname = 'tenk1'::name)
+ Optimizer: Postgres query optimizer
+(8 rows)
 
 select tname, attname from (
 select relname::information_schema.sql_identifier as tname, * from
@@ -1731,24 +1769,37 @@ select t1.ten, sum(x) from
   ) ss on t1.unique1 = ss.fivethous
 group by t1.ten
 order by t1.ten;
-                                                                               
                     QUERY PLAN                                                 
                                                    
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Sort
+                                     QUERY PLAN                                
     
+------------------------------------------------------------------------------------
+ Gather Motion 3:1  (slice1; segments: 3)
    Output: t1.ten, (sum((t1.ten + t2.ten)))
-   Sort Key: t1.ten
-   ->  HashAggregate
-         Output: t1.ten, sum((t1.ten + t2.ten))
-         Group Key: t1.ten
-         ->  Hash Right Join
-               Output: t1.ten, t2.ten
-               Hash Cond: (t2.fivethous = t1.unique1)
-               ->  Seq Scan on public.tenk1 t2
-                     Output: t2.unique1, t2.unique2, t2.two, t2.four, t2.ten, 
t2.twenty, t2.hundred, t2.thousand, t2.twothousand, t2.fivethous, t2.tenthous, 
t2.odd, t2.even, t2.stringu1, t2.stringu2, t2.string4
-               ->  Hash
-                     Output: t1.ten, t1.unique1
-                     ->  Seq Scan on public.tenk1 t1
-                           Output: t1.ten, t1.unique1
-(15 rows)
+   Merge Key: t1.ten
+   ->  Sort
+         Output: t1.ten, (sum((t1.ten + t2.ten)))
+         Sort Key: t1.ten
+         ->  Finalize HashAggregate
+               Output: t1.ten, sum((t1.ten + t2.ten))
+               Group Key: t1.ten
+               ->  Redistribute Motion 3:3  (slice2; segments: 3)
+                     Output: t1.ten, (PARTIAL sum((t1.ten + t2.ten)))
+                     Hash Key: t1.ten
+                     ->  Partial HashAggregate
+                           Output: t1.ten, PARTIAL sum((t1.ten + t2.ten))
+                           Group Key: t1.ten
+                           ->  Hash Right Join
+                                 Output: t1.ten, t2.ten
+                                 Hash Cond: (t2.fivethous = t1.unique1)
+                                 ->  Redistribute Motion 3:3  (slice3; 
segments: 3)
+                                       Output: t2.ten, t2.fivethous
+                                       Hash Key: t2.fivethous
+                                       ->  Seq Scan on public.tenk1 t2
+                                             Output: t2.ten, t2.fivethous
+                                 ->  Hash
+                                       Output: t1.ten, t1.unique1
+                                       ->  Seq Scan on public.tenk1 t1
+                                             Output: t1.ten, t1.unique1
+ Optimizer: Postgres query optimizer
+(28 rows)
 
 select t1.ten, sum(x) from
   tenk1 t1 left join lateral (
@@ -1777,28 +1828,41 @@ select t1.q1, x from
    lateral (select t2.q1+t3.q1 as x, * from int8_tbl t3) t3 on t2.q2 = t3.q2)
   on t1.q2 = t2.q2
 order by 1, 2;
-                       QUERY PLAN                       
---------------------------------------------------------
- Sort
+                                  QUERY PLAN                                  
+------------------------------------------------------------------------------
+ Gather Motion 3:1  (slice1; segments: 3)
    Output: t1.q1, ((t2.q1 + t3.q1))
-   Sort Key: t1.q1, ((t2.q1 + t3.q1))
-   ->  Hash Left Join
-         Output: t1.q1, (t2.q1 + t3.q1)
-         Hash Cond: (t2.q2 = t3.q2)
-         ->  Hash Left Join
-               Output: t1.q1, t2.q1, t2.q2
-               Hash Cond: (t1.q2 = t2.q2)
-               ->  Seq Scan on public.int8_tbl t1
-                     Output: t1.q1, t1.q2
-               ->  Hash
-                     Output: t2.q1, t2.q2
-                     ->  Seq Scan on public.int8_tbl t2
+   Merge Key: t1.q1, ((t2.q1 + t3.q1))
+   ->  Sort
+         Output: t1.q1, ((t2.q1 + t3.q1))
+         Sort Key: t1.q1, ((t2.q1 + t3.q1))
+         ->  Hash Right Join
+               Output: t1.q1, (t2.q1 + t3.q1)
+               Hash Cond: (t2.q2 = t1.q2)
+               ->  Hash Left Join
+                     Output: t2.q1, t2.q2, t3.q1
+                     Hash Cond: (t2.q2 = t3.q2)
+                     ->  Redistribute Motion 3:3  (slice2; segments: 3)
                            Output: t2.q1, t2.q2
-         ->  Hash
-               Output: t3.q1, t3.q2
-               ->  Seq Scan on public.int8_tbl t3
-                     Output: t3.q1, t3.q2
-(19 rows)
+                           Hash Key: t2.q2
+                           ->  Seq Scan on public.int8_tbl t2
+                                 Output: t2.q1, t2.q2
+                     ->  Hash
+                           Output: t3.q1, t3.q2
+                           ->  Redistribute Motion 3:3  (slice3; segments: 3)
+                                 Output: t3.q1, t3.q2
+                                 Hash Key: t3.q2
+                                 ->  Seq Scan on public.int8_tbl t3
+                                       Output: t3.q1, t3.q2
+               ->  Hash
+                     Output: t1.q1, t1.q2
+                     ->  Redistribute Motion 3:3  (slice4; segments: 3)
+                           Output: t1.q1, t1.q2
+                           Hash Key: t1.q2
+                           ->  Seq Scan on public.int8_tbl t1
+                                 Output: t1.q1, t1.q2
+ Optimizer: Postgres query optimizer
+(32 rows)
 
 select t1.q1, x from
   int8_tbl t1 left join
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 48bd03dc8a3..b04e6f85f35 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -101,19 +101,19 @@ test: arrays
 test: btree_index
 test: hash_index
 test: update
-# test: delete
-# test: namespace
+test: delete
+test: namespace
 # ignore: prepared_xacts
-# test: brin
-# test: gin
-# test: gist
-# test: spgist
-# test: privileges
-# test: init_privs
-# test: security_label
-# test: collate
-# test: matview
-# test: lock
+test: brin
+test: gin
+test: gist
+test: spgist
+test: privileges
+test: init_privs
+test: security_label
+test: collate
+test: matview
+test: lock
 # test: replica_identity
 # test: rowsecurity
 # test: object_address
diff --git a/src/test/regress/sql/gist.sql b/src/test/regress/sql/gist.sql
index ea03b95df93..83ba1a9a0f3 100644
--- a/src/test/regress/sql/gist.sql
+++ b/src/test/regress/sql/gist.sql
@@ -174,18 +174,13 @@ select count(*) from gist_tbl;
 
 -- This case isn't supported, but it should at least EXPLAIN correctly.
 explain (verbose, costs off)
-<<<<<<< HEAD
 select p from gist_tbl order by circle(p,1) <-> point(0,0), p <-> point(0,0) 
limit 15;
 select p from gist_tbl order by circle(p,1) <-> point(0,0), p <-> point(0,0) 
limit 15;
-=======
-select p from gist_tbl order by circle(p,1) <-> point(0,0) limit 1;
-select p from gist_tbl order by circle(p,1) <-> point(0,0) limit 1;
 
 -- Force an index build using buffering.
 create index gist_tbl_box_index_forcing_buffering on gist_tbl using gist (p)
   with (buffering=on, fillfactor=50);
 
->>>>>>> REL_16_9
 -- Clean up
 reset enable_seqscan;
 reset enable_bitmapscan;
diff --git a/src/test/regress/sql/privileges.sql 
b/src/test/regress/sql/privileges.sql
index 3d39cada7bf..442fbd0a7f4 100644
--- a/src/test/regress/sql/privileges.sql
+++ b/src/test/regress/sql/privileges.sql
@@ -47,9 +47,6 @@ CREATE USER regress_priv_user5;
 CREATE USER regress_priv_user5;        -- duplicate
 CREATE USER regress_priv_user6;
 CREATE USER regress_priv_user7;
-<<<<<<< HEAD
-CREATE ROLE regress_priv_role;
-=======
 CREATE USER regress_priv_user8;
 CREATE USER regress_priv_user9;
 CREATE USER regress_priv_user10;
@@ -107,7 +104,6 @@ CREATE USER regress_priv_user2;
 CREATE USER regress_priv_user3;
 CREATE USER regress_priv_user4;
 CREATE USER regress_priv_user5;
->>>>>>> REL_16_9
 
 GRANT pg_read_all_data TO regress_priv_user6;
 GRANT pg_write_all_data TO regress_priv_user7;
@@ -209,16 +205,10 @@ ALTER FUNCTION leak(integer,integer) OWNER TO 
regress_priv_user1;
 
 -- test owner privileges
 
-<<<<<<< HEAD
-GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY 
CURRENT_ROLE;
-REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY 
foo; -- error
-REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY 
regress_priv_user2; -- error
-=======
 GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY 
regress_priv_role; -- error, doesn't have ADMIN OPTION
 GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY 
CURRENT_ROLE;
 REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY 
foo; -- error
 REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY 
regress_priv_user2; -- warning, noop
->>>>>>> REL_16_9
 REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY 
CURRENT_USER;
 REVOKE regress_priv_role FROM regress_priv_user1 GRANTED BY CURRENT_ROLE;
 DROP ROLE regress_priv_role;
@@ -1218,20 +1208,12 @@ CREATE INDEX sro_idx ON sro_tab ((sro_ifun(a) + 
sro_ifun(0)))
        WHERE sro_ifun(a + 10) > sro_ifun(10);
 DROP INDEX sro_idx;
 -- Do the same concurrently
-<<<<<<< HEAD
 CREATE INDEX sro_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0)))
-=======
-CREATE INDEX CONCURRENTLY sro_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0)))
->>>>>>> REL_16_9
        WHERE sro_ifun(a + 10) > sro_ifun(10);
 -- REINDEX
 REINDEX TABLE sro_tab;
 REINDEX INDEX sro_idx;
-<<<<<<< HEAD
 REINDEX TABLE sro_tab;
-=======
-REINDEX TABLE CONCURRENTLY sro_tab;
->>>>>>> REL_16_9
 DROP INDEX sro_idx;
 -- CLUSTER
 CREATE INDEX sro_cluster_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0)));
@@ -1251,11 +1233,7 @@ INSERT INTO sro_ptab VALUES (1), (2), (3);
 CREATE INDEX sro_pidx ON sro_ptab ((sro_ifun(a) + sro_ifun(0)))
        WHERE sro_ifun(a + 10) > sro_ifun(10);
 REINDEX TABLE sro_ptab;
-<<<<<<< HEAD
 REINDEX INDEX sro_pidx;
-=======
-REINDEX INDEX CONCURRENTLY sro_pidx;
->>>>>>> REL_16_9
 
 SET SESSION AUTHORIZATION regress_sro_user;
 CREATE FUNCTION unwanted_grant() RETURNS void LANGUAGE sql AS
diff --git a/src/test/regress/sql/spgist.sql b/src/test/regress/sql/spgist.sql
index c55c6639b32..d066eac2a4f 100644
--- a/src/test/regress/sql/spgist.sql
+++ b/src/test/regress/sql/spgist.sql
@@ -81,8 +81,6 @@ insert into spgist_domain_tbl values('fee'), ('fi'), ('fo'), 
('fum');
 explain (costs off)
 select * from spgist_domain_tbl where f1 = 'fo';
 select * from spgist_domain_tbl where f1 = 'fo';
-<<<<<<< HEAD
-=======
 
 -- test an unlogged table, mostly to get coverage of spgistbuildempty
 create unlogged table spgist_unlogged_tbl(id serial, b box);
@@ -92,4 +90,3 @@ select box(point(i,j))
   from generate_series(1,100,5) i,
        generate_series(1,10,5) j;
 -- leave this table around, to help in testing dump/restore
->>>>>>> REL_16_9


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to